From 70cfb8e0cd12bfbacf665d7ff77d59282ee8dc6c Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 11:51:35 +0100 Subject: [PATCH 01/32] refactor: harden dashboard refresh flow and consolidate parsers - reject stale dashboard snapshots by time window and request id - normalize runtime provider paths and route dashboard side effects through services - share codex and claude code local-source parsing across dashboard and telemetry paths - update audit notes and regression coverage for the cleanup pass Co-Authored-By: Claude Opus 4.6 --- cmd/demo/main.go | 8 +- cmd/openusage/dashboard.go | 21 +-- cmd/openusage/snapshot_dispatcher.go | 43 +++++ .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 63 +++++++ internal/config/config.go | 1 + internal/core/detail_widget.go | 20 ++ internal/core/detail_widget_test.go | 31 ++++ internal/core/provider.go | 31 +++- internal/core/provider_test.go | 43 +++++ internal/daemon/accounts.go | 16 +- internal/daemon/accounts_test.go | 22 +++ internal/daemon/runtime.go | 51 +++--- internal/daemon/server.go | 12 +- internal/daemon/types.go | 28 +-- internal/dashboardapp/service.go | 89 +++++++++ internal/detect/claude_code.go | 2 - internal/detect/cursor.go | 2 - internal/providers/claude_code/claude_code.go | 105 ++--------- .../claude_code/conversation_records.go | 125 +++++++++++++ .../providers/claude_code/telemetry_usage.go | 113 ++---------- internal/providers/codex/codex.go | 164 +++-------------- internal/providers/codex/session_decoder.go | 128 +++++++++++++ internal/providers/codex/telemetry_usage.go | 140 ++++---------- internal/providers/copilot/copilot.go | 13 +- internal/providers/cursor/cursor.go | 20 +- internal/providers/gemini_cli/gemini_cli.go | 13 +- internal/telemetry/read_model.go | 4 +- internal/telemetry/usage_view.go | 99 +++++----- internal/tui/model.go | 171 +++++++++++------- internal/tui/model_display_test.go | 97 +++++++++- internal/tui/model_refresh_test.go | 14 +- internal/tui/settings_modal.go | 9 +- 32 files changed, 1042 insertions(+), 656 deletions(-) create mode 100644 cmd/openusage/snapshot_dispatcher.go create mode 100644 docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md create mode 100644 internal/core/provider_test.go create mode 100644 internal/dashboardapp/service.go create mode 100644 internal/providers/claude_code/conversation_records.go create mode 100644 internal/providers/codex/session_decoder.go diff --git a/cmd/demo/main.go b/cmd/demo/main.go index 2417791..94d02ef 100644 --- a/cmd/demo/main.go +++ b/cmd/demo/main.go @@ -6,6 +6,7 @@ import ( "io" "log" "os" + "sync/atomic" "time" tea "github.com/charmbracelet/bubbletea" @@ -39,6 +40,7 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var snapshotRequestID atomic.Uint64 refreshAll := func() { snaps := make(map[string]core.UsageSnapshot, len(accounts)) @@ -61,7 +63,11 @@ func main() { } snaps[acct.ID] = snap } - p.Send(tui.SnapshotsMsg(snaps)) + p.Send(tui.SnapshotsMsg{ + Snapshots: snaps, + TimeWindow: core.TimeWindow30d, + RequestID: snapshotRequestID.Add(1), + }) } go func() { diff --git a/cmd/openusage/dashboard.go b/cmd/openusage/dashboard.go index 42192e5..c79d5e2 100644 --- a/cmd/openusage/dashboard.go +++ b/cmd/openusage/dashboard.go @@ -14,6 +14,7 @@ import ( "github.com/janekbaraniewski/openusage/internal/config" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/daemon" + "github.com/janekbaraniewski/openusage/internal/dashboardapp" "github.com/janekbaraniewski/openusage/internal/tui" "github.com/janekbaraniewski/openusage/internal/version" ) @@ -39,6 +40,7 @@ func runDashboard(cfg config.Config) { cachedAccounts, timeWindow, ) + model.SetServices(dashboardapp.NewService()) socketPath := daemon.ResolveSocketPath() @@ -47,12 +49,13 @@ func runDashboard(cfg config.Config) { socketPath, verbose, ) - viewRuntime.SetTimeWindow(string(timeWindow)) + viewRuntime.SetTimeWindow(timeWindow) ctx, cancel := context.WithCancel(context.Background()) defer cancel() var program *tea.Program + dispatcher := &snapshotDispatcher{} model.SetOnAddAccount(func(acct core.AccountConfig) { if strings.TrimSpace(acct.ID) == "" || strings.TrimSpace(acct.Provider) == "" { @@ -96,16 +99,11 @@ func runDashboard(cfg config.Config) { } }) - model.SetOnRefresh(func() { - go func() { - snaps := viewRuntime.ReadWithFallback(ctx) - if len(snaps) > 0 && program != nil { - program.Send(tui.SnapshotsMsg(snaps)) - } - }() + model.SetOnRefresh(func(window core.TimeWindow) { + dispatcher.refresh(ctx, viewRuntime, window) }) - model.SetOnTimeWindowChange(func(tw string) { + model.SetOnTimeWindowChange(func(tw core.TimeWindow) { viewRuntime.SetTimeWindow(tw) }) @@ -118,6 +116,7 @@ func runDashboard(cfg config.Config) { }) program = tea.NewProgram(model, tea.WithAltScreen(), tea.WithMouseCellMotion()) + dispatcher.bind(program) go func() { runStartupUpdateCheck( @@ -139,8 +138,8 @@ func runDashboard(cfg config.Config) { ctx, viewRuntime, interval, - func(snaps map[string]core.UsageSnapshot) { - program.Send(tui.SnapshotsMsg(snaps)) + func(frame daemon.SnapshotFrame) { + dispatcher.dispatch(frame) }, func(state daemon.DaemonState) { program.Send(mapDaemonState(state)) diff --git a/cmd/openusage/snapshot_dispatcher.go b/cmd/openusage/snapshot_dispatcher.go new file mode 100644 index 0000000..282c8fb --- /dev/null +++ b/cmd/openusage/snapshot_dispatcher.go @@ -0,0 +1,43 @@ +package main + +import ( + "context" + "sync/atomic" + + tea "github.com/charmbracelet/bubbletea" + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/daemon" + "github.com/janekbaraniewski/openusage/internal/tui" +) + +type snapshotDispatcher struct { + program *tea.Program + nextID atomic.Uint64 +} + +func (d *snapshotDispatcher) bind(program *tea.Program) { + d.program = program +} + +func (d *snapshotDispatcher) dispatch(frame daemon.SnapshotFrame) { + d.send(frame, d.nextID.Add(1)) +} + +func (d *snapshotDispatcher) refresh(ctx context.Context, rt *daemon.ViewRuntime, window core.TimeWindow) { + requestID := d.nextID.Add(1) + go func() { + frame := rt.ReadWithFallbackForWindow(ctx, window) + d.send(frame, requestID) + }() +} + +func (d *snapshotDispatcher) send(frame daemon.SnapshotFrame, requestID uint64) { + if d == nil || d.program == nil || len(frame.Snapshots) == 0 { + return + } + d.program.Send(tui.SnapshotsMsg{ + Snapshots: frame.Snapshots, + TimeWindow: frame.TimeWindow, + RequestID: requestID, + }) +} diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md new file mode 100644 index 0000000..b1d41a6 --- /dev/null +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -0,0 +1,63 @@ +# Codebase Audit Action Table + +Date: 2026-03-09 +Repository: `/Users/janekbaraniewski/Workspace/priv/openusage` + +## Scope + +This pass combined: + +- full test run: `go test ./...` +- targeted race run: `go test -race ./internal/daemon ./internal/telemetry ./internal/tui ./cmd/openusage` +- repo-wide static scans for large files, goroutines, mutex usage, legacy markers, and duplicated metric-prefix parsing +- targeted reads of the highest-risk files and subsystems + +This table captures every issue found in this pass. It is broad and high-signal, but it is still a static audit, not a proof that no additional edge-case bugs exist. + +## Resolved In This Pass + +| ID | Priority | Area | Evidence | Change made | Follow-up | +| --- | --- | --- | --- | --- | --- | +| R1 | Fixed | Dashboard timeframe race | `cmd/openusage/dashboard.go`, `internal/tui/model.go`, `internal/daemon/runtime.go` | Snapshot messages now carry `TimeWindow` and `RequestID`, and stale responses are rejected. | None. Keep regression tests. | +| R2 | Fixed | Daemon cache refresh bug | `internal/daemon/accounts.go`, `internal/daemon/server.go`, `internal/daemon/types.go` | Read-model cache refresh dedupe now keys by normalized time window instead of collapsing all windows together. | None. | +| R3 | Fixed | Weakly typed time-window flow | `internal/daemon/types.go`, `internal/daemon/runtime.go`, `internal/telemetry/read_model.go`, `internal/telemetry/usage_view.go` | Internal daemon and telemetry paths now use `core.TimeWindow` instead of raw strings. | Continue shrinking stringly typed config boundaries over time. | +| R4 | Fixed | Dashboard refresh orchestration sprawl | `cmd/openusage/snapshot_dispatcher.go`, `cmd/openusage/dashboard.go` | Snapshot sequencing/version dispatch moved out of dashboard wiring into a dedicated helper. | Reuse the same pattern if other async UI data channels are added. | +| R5 | Fixed | Legacy runtime path overload cleanup | `internal/core/provider.go`, `internal/config/config.go`, `internal/detect/cursor.go`, `internal/detect/claude_code.go`, `internal/providers/cursor/cursor.go`, `internal/providers/claude_code/claude_code.go` | Legacy provider-specific path overloads are normalized into `Paths`, and runtime provider code now uses named paths instead of normal-path dependence on `Binary` / `BaseURL`. | The type still contains `Binary` and `BaseURL` for legitimate CLI/base-URL providers. | +| R6 | Fixed | Repeated coding-tool detail widgets | `internal/core/detail_widget.go`, `internal/providers/cursor/cursor.go`, `internal/providers/codex/codex.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/copilot/copilot.go`, `internal/providers/gemini_cli/gemini_cli.go` | Repeated detail section arrays were replaced with a shared `CodingToolDetailWidget(...)` constructor. | Extend the same pattern if more coding-tool providers are added. | +| R7 | Fixed | TUI side-effect boundary | `internal/tui/model.go`, `internal/dashboardapp/service.go`, `cmd/openusage/dashboard.go` | `tui.Model` no longer directly persists settings, saves credentials, installs integrations, or validates API keys. Those side effects now go through an injected dashboard application service. | More UI decomposition is still useful, but the highest-leak side effects are no longer hardcoded in the model. | +| R8 | Fixed | Codex parser duplication | `internal/providers/codex/session_decoder.go`, `internal/providers/codex/codex.go`, `internal/providers/codex/telemetry_usage.go` | Codex session JSONL parsing now runs through one shared decoder used by both the dashboard breakdown reader and telemetry ingestion path. | Apply the same consolidation to Claude Code and Cursor. | +| R9 | Fixed | Claude Code parser duplication | `internal/providers/claude_code/conversation_records.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/telemetry_usage.go` | Claude Code JSONL parsing, token total calculation, and usage/tool dedupe keys now run through one shared normalized conversation-record helper used by both the dashboard aggregator and telemetry collector. | Apply the same consolidation pattern to Cursor. | + +## Action Table + +| ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | +| --- | --- | --- | --- | --- | --- | --- | +| A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | +| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go:393-584`, `internal/dashboardapp/service.go` | The side effects are now injected, but `Model` still owns a very large amount of event-handling and state-transition logic. | Continue splitting update/action logic into smaller TUI units and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | +| A3 | P1 | UI metric-prefix parsing | `internal/tui/tiles_composition.go:302-322`, `internal/tui/tiles_composition.go:913-1527`, `internal/tui/detail.go:371-432`, `internal/tui/analytics.go:663-729` | Rendering code is still parsing raw metric key conventions (`model_`, `usage_client_`, `usage_source_`, `mcp_`, `lang_`) directly. This duplicates interpretation logic across views. | Introduce typed composition DTOs in `internal/core` or `internal/telemetry`; renderers should consume structured sections rather than re-parse maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go:307-2188` | `openrouter.go` mixes auth probing, credits, keys, analytics parsing, generation pagination, provider resolution, metadata enrichment, and output projection in one 2800+ LOC file. | Split into subpackages/files: `api_client`, `analytics`, `generations`, `provider_resolution`, `projection`, `types`. | Easier maintenance, smaller diff surface, faster targeted testing. | +| A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | +| A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go:160-1757` | `usage_view.go` is simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | +| A7 | P1 | Daemon service monolith | `internal/daemon/server.go:1-1211` | `server.go` owns service startup, socket server, polling, collection, retention, cache refresh, hook handling, and HTTP endpoints. | Split into `service_runtime`, `http_handlers`, `polling`, `collection`, `cache`, and `hook_ingest` files/types. | Lower mental load and easier concurrency review. | +| A8 | P1 | Shared parser duplication | `internal/providers/cursor/cursor.go:1087-2086`, `internal/providers/cursor/telemetry.go:97-231` | Codex and Claude Code are now consolidated, but Cursor still parses overlapping local source formats in multiple flows. Snapshot and telemetry ingestion paths can still drift there. | Build one canonical decoder/projection layer for Cursor local sources and use it from both dashboard and telemetry paths. | Eliminates duplicated bugfix work and reduces format drift risk. | +| A9 | P2 | Detached background work ownership | `internal/daemon/server.go:1108`, `internal/daemon/server.go:1126`, `internal/daemon/server.go:306-318` | Read-model cache refreshes are launched from request handlers with `context.Background()`. They are bounded by timeout but detached from service lifecycle ownership. | Give `Service` a root context and use it for detached async refreshes. Optionally expose a bounded worker pool instead of unconstrained goroutine creation. | Safer shutdown semantics and fewer background task ownership ambiguities. | +| A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/cursor/cursor.go:478`, `internal/providers/cursor/cursor.go:1704-1711`, `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Providers and analytics logic read `time.Now()` directly in many places, often mixing local time and UTC. This is hard to test and easy to get subtly wrong. | Introduce a small clock abstraction in time-sensitive subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | +| A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | +| A13 | P3 | Logging/throttling utilities are ad hoc | `internal/daemon/server.go:194-237`, `internal/daemon/runtime.go:198-207`, `internal/core/trace.go:14-27` | Log throttling and trace behavior are implemented in several small local patterns instead of one reusable utility. | Consolidate throttled logging and trace controls into a shared helper package. | Small cleanup, but it reduces low-grade duplication. | +| A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | +| A15 | P3 | Performance optimization opportunity in render path | `internal/tui/model.go:441-450`, `internal/tui/tiles_composition.go:302-322`, `internal/tui/detail.go:752-1046`, `internal/tui/analytics.go:663-729` | The UI recomputes display/composition structures from raw metric maps repeatedly during rendering. It is correct, but the work is duplicated across views and frames. | Cache derived display/composition sections per snapshot update instead of rebuilding them in each view path. | Lower render cost and less duplicated parsing logic. | + +## Suggested Execution Order + +1. A1, A8 +2. A2, A3 +3. A6, A7 +4. A4, A5 +5. A9, A11, A15 +6. A10, A12, A13, A14 + +## Notes + +- The highest-risk remaining issues are architectural rather than immediately broken behavior. +- The biggest drift risks are still the duplicated raw-source parsers and the metric-prefix parsing in the TUI. +- The race pass completed cleanly for the core dashboard/daemon/telemetry packages after the timeframe fix. diff --git a/internal/config/config.go b/internal/config/config.go index 2a5273d..e982359 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -226,6 +226,7 @@ func normalizeAccounts(in []core.AccountConfig) []core.AccountConfig { } normalized := lo.Map(in, func(acct core.AccountConfig, _ int) core.AccountConfig { acct.ID = normalizeAccountID(acct.ID) + acct.NormalizeRuntimePaths() return acct }) filtered := lo.Filter(normalized, func(acct core.AccountConfig, _ int) bool { return acct.ID != "" }) diff --git a/internal/core/detail_widget.go b/internal/core/detail_widget.go index 3c293b7..6910260 100644 --- a/internal/core/detail_widget.go +++ b/internal/core/detail_widget.go @@ -35,6 +35,26 @@ func DefaultDetailWidget() DetailWidget { } } +func CodingToolDetailWidget(includeMCP bool) DetailWidget { + sections := []DetailSection{ + {Name: "Usage", Order: 1, Style: DetailSectionStyleUsage}, + {Name: "Models", Order: 2, Style: DetailSectionStyleModels}, + {Name: "Languages", Order: 3, Style: DetailSectionStyleLanguages}, + } + nextOrder := 4 + if includeMCP { + sections = append(sections, DetailSection{Name: "MCP Usage", Order: nextOrder, Style: DetailSectionStyleMCP}) + nextOrder++ + } + sections = append(sections, + DetailSection{Name: "Spending", Order: nextOrder, Style: DetailSectionStyleSpending}, + DetailSection{Name: "Trends", Order: nextOrder + 1, Style: DetailSectionStyleTrends}, + DetailSection{Name: "Tokens", Order: nextOrder + 2, Style: DetailSectionStyleTokens}, + DetailSection{Name: "Activity", Order: nextOrder + 3, Style: DetailSectionStyleActivity}, + ) + return DetailWidget{Sections: sections} +} + func (w DetailWidget) section(name string) (DetailSection, bool) { for _, s := range w.Sections { if s.Name == name { diff --git a/internal/core/detail_widget_test.go b/internal/core/detail_widget_test.go index dc2a3cd..7f936c1 100644 --- a/internal/core/detail_widget_test.go +++ b/internal/core/detail_widget_test.go @@ -62,3 +62,34 @@ func TestDetailWidgetWithModelsAndTrends(t *testing.T) { t.Fatalf("Trends order = %d, want 3", got) } } + +func TestCodingToolDetailWidget(t *testing.T) { + tests := []struct { + name string + includeMCP bool + wantMCP bool + wantCount int + }{ + {name: "with mcp", includeMCP: true, wantMCP: true, wantCount: 8}, + {name: "without mcp", includeMCP: false, wantMCP: false, wantCount: 7}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := CodingToolDetailWidget(tt.includeMCP) + if len(w.Sections) != tt.wantCount { + t.Fatalf("sections len = %d, want %d", len(w.Sections), tt.wantCount) + } + _, hasMCP := w.section("MCP Usage") + if hasMCP != tt.wantMCP { + t.Fatalf("has MCP Usage = %v, want %v", hasMCP, tt.wantMCP) + } + if got := w.SectionStyle("Usage"); got != DetailSectionStyleUsage { + t.Fatalf("Usage style = %q, want %q", got, DetailSectionStyleUsage) + } + if got := w.SectionStyle("Activity"); got != DetailSectionStyleActivity { + t.Fatalf("Activity style = %q, want %q", got, DetailSectionStyleActivity) + } + }) + } +} diff --git a/internal/core/provider.go b/internal/core/provider.go index 9792ee7..92739e9 100644 --- a/internal/core/provider.go +++ b/internal/core/provider.go @@ -3,6 +3,7 @@ package core import ( "context" "os" + "strings" ) type AccountConfig struct { @@ -52,10 +53,38 @@ func (c AccountConfig) Path(key, fallback string) string { // SetPath stores a named provider-specific path. func (c *AccountConfig) SetPath(key, value string) { + if c == nil || strings.TrimSpace(key) == "" || strings.TrimSpace(value) == "" { + return + } if c.Paths == nil { c.Paths = make(map[string]string) } - c.Paths[key] = value + c.Paths[key] = strings.TrimSpace(value) +} + +// NormalizeRuntimePaths migrates legacy provider-specific path overloads out of +// Binary/BaseURL into Paths so runtime code can use a single access pattern. +func (c *AccountConfig) NormalizeRuntimePaths() { + if c == nil { + return + } + + switch strings.TrimSpace(c.Provider) { + case "cursor": + if strings.TrimSpace(c.Binary) != "" { + c.SetPath("tracking_db", c.Binary) + } + if strings.TrimSpace(c.BaseURL) != "" { + c.SetPath("state_db", c.BaseURL) + } + case "claude_code": + if strings.TrimSpace(c.Binary) != "" { + c.SetPath("stats_cache", c.Binary) + } + if strings.TrimSpace(c.BaseURL) != "" { + c.SetPath("account_config", c.BaseURL) + } + } } func (c AccountConfig) ResolveAPIKey() string { diff --git a/internal/core/provider_test.go b/internal/core/provider_test.go new file mode 100644 index 0000000..003a9ed --- /dev/null +++ b/internal/core/provider_test.go @@ -0,0 +1,43 @@ +package core + +import "testing" + +func TestAccountConfigNormalizeRuntimePaths(t *testing.T) { + tests := []struct { + name string + account AccountConfig + wantKey string + wantPath string + }{ + { + name: "cursor migrates legacy db fields", + account: AccountConfig{ + Provider: "cursor", + Binary: "/tmp/tracking.db", + BaseURL: "/tmp/state.vscdb", + }, + wantKey: "tracking_db", + wantPath: "/tmp/tracking.db", + }, + { + name: "claude migrates legacy config fields", + account: AccountConfig{ + Provider: "claude_code", + Binary: "/tmp/stats-cache.json", + BaseURL: "/tmp/.claude.json", + }, + wantKey: "stats_cache", + wantPath: "/tmp/stats-cache.json", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + acct := tt.account + acct.NormalizeRuntimePaths() + if got := acct.Path(tt.wantKey, ""); got != tt.wantPath { + t.Fatalf("Path(%q) = %q, want %q", tt.wantKey, got, tt.wantPath) + } + }) + } +} diff --git a/internal/daemon/accounts.go b/internal/daemon/accounts.go index 3ba87c6..eac6bf3 100644 --- a/internal/daemon/accounts.go +++ b/internal/daemon/accounts.go @@ -147,7 +147,7 @@ func LoadAccountsAndNorm() ([]core.AccountConfig, core.ModelNormalizationConfig, func BuildReadModelRequest( accounts []core.AccountConfig, providerLinks map[string]string, - timeWindow string, + timeWindow core.TimeWindow, ) ReadModelRequest { seen := make(map[string]bool, len(accounts)) outAccounts := make([]ReadModelAccount, 0, len(accounts)) @@ -171,7 +171,11 @@ func BuildReadModelRequest( links[source] = target } } - return ReadModelRequest{Accounts: outAccounts, ProviderLinks: links, TimeWindow: timeWindow} + return ReadModelRequest{ + Accounts: outAccounts, + ProviderLinks: links, + TimeWindow: normalizeReadModelTimeWindow(timeWindow), + } } func BuildReadModelRequestFromConfig() (ReadModelRequest, error) { @@ -180,7 +184,7 @@ func BuildReadModelRequestFromConfig() (ReadModelRequest, error) { return ReadModelRequest{}, err } accounts := resolveConfigAccounts(&cfg, ResolveAccounts) - return BuildReadModelRequest(accounts, cfg.Telemetry.ProviderLinks, cfg.Data.TimeWindow), nil + return BuildReadModelRequest(accounts, cfg.Telemetry.ProviderLinks, core.ParseTimeWindow(cfg.Data.TimeWindow)), nil } func ReadModelRequestKey(req ReadModelRequest) string { @@ -234,9 +238,15 @@ func ReadModelRequestKey(req ReadModelRequest) string { b.WriteString(key) b.WriteByte(';') } + b.WriteString("|window:") + b.WriteString(string(normalizeReadModelTimeWindow(req.TimeWindow))) return b.String() } +func normalizeReadModelTimeWindow(timeWindow core.TimeWindow) core.TimeWindow { + return core.ParseTimeWindow(strings.TrimSpace(string(timeWindow))) +} + func ReadModelTemplatesFromRequest( req ReadModelRequest, disabledAccounts map[string]bool, diff --git a/internal/daemon/accounts_test.go b/internal/daemon/accounts_test.go index a00626f..4208a9a 100644 --- a/internal/daemon/accounts_test.go +++ b/internal/daemon/accounts_test.go @@ -273,4 +273,26 @@ func TestSnapshotsHaveUsableData(t *testing.T) { } } +func TestReadModelRequestKeyIncludesNormalizedTimeWindow(t *testing.T) { + base := ReadModelRequest{ + Accounts: []ReadModelAccount{ + {AccountID: "openrouter", ProviderID: "openrouter"}, + }, + } + + key1d := ReadModelRequestKey(base) + + base.TimeWindow = core.TimeWindow30d + keyExplicit30d := ReadModelRequestKey(base) + if key1d != keyExplicit30d { + t.Fatalf("empty and explicit 30d normalization mismatch: %q vs %q", key1d, keyExplicit30d) + } + + base.TimeWindow = core.TimeWindow7d + key7d := ReadModelRequestKey(base) + if key7d == keyExplicit30d { + t.Fatalf("expected different cache keys for different windows, both were %q", key7d) + } +} + func float64Ptr(v float64) *float64 { return &v } diff --git a/internal/daemon/runtime.go b/internal/daemon/runtime.go index 70dc55a..9041671 100644 --- a/internal/daemon/runtime.go +++ b/internal/daemon/runtime.go @@ -25,7 +25,7 @@ type ViewRuntime struct { stateMu sync.RWMutex state DaemonState - timeWindow string + timeWindow core.TimeWindow } func NewViewRuntime( @@ -113,21 +113,24 @@ func (r *ViewRuntime) State() DaemonState { return r.state } -func (r *ViewRuntime) SetTimeWindow(tw string) { +func (r *ViewRuntime) SetTimeWindow(tw core.TimeWindow) { if r == nil { return } r.stateMu.Lock() - r.timeWindow = tw + r.timeWindow = normalizeReadModelTimeWindow(tw) r.stateMu.Unlock() } -func (r *ViewRuntime) TimeWindow() string { +func (r *ViewRuntime) TimeWindow() core.TimeWindow { if r == nil { - return "" + return core.TimeWindow30d } r.stateMu.RLock() defer r.stateMu.RUnlock() + if r.timeWindow == "" { + return core.TimeWindow30d + } return r.timeWindow } @@ -141,9 +144,14 @@ func (r *ViewRuntime) ResetEnsureThrottle() { r.SetClient(nil) } -func (r *ViewRuntime) ReadWithFallback(ctx context.Context) map[string]core.UsageSnapshot { +func (r *ViewRuntime) ReadWithFallback(ctx context.Context) SnapshotFrame { + return r.ReadWithFallbackForWindow(ctx, r.TimeWindow()) +} + +func (r *ViewRuntime) ReadWithFallbackForWindow(ctx context.Context, timeWindow core.TimeWindow) SnapshotFrame { + frame := SnapshotFrame{TimeWindow: normalizeReadModelTimeWindow(timeWindow)} if r == nil { - return nil + return frame } client := r.CurrentClient() @@ -151,12 +159,13 @@ func (r *ViewRuntime) ReadWithFallback(ctx context.Context) map[string]core.Usag client = r.EnsureClient(ctx) } - snaps, err := r.fetchReadModel(ctx, client, ReadModelRequest{TimeWindow: r.TimeWindow()}) + snaps, err := r.fetchReadModel(ctx, client, ReadModelRequest{TimeWindow: frame.TimeWindow}) if err != nil { r.throttledLogError(err) - return nil + return frame } - return snaps + frame.Snapshots = snaps + return frame } func (r *ViewRuntime) fetchReadModel( @@ -233,10 +242,10 @@ func StartBroadcaster( case <-ctx.Done(): return case <-ticker.C: - snaps := rt.ReadWithFallback(ctx) + frame := rt.ReadWithFallback(ctx) emitState() - if len(snaps) > 0 { - handler(snaps) + if len(frame.Snapshots) > 0 { + handler(frame) } } } @@ -244,11 +253,11 @@ func StartBroadcaster( } func warmUp(ctx context.Context, rt *ViewRuntime, handler SnapshotHandler, emitState func()) (cancelled bool) { - snaps := rt.ReadWithFallback(ctx) + frame := rt.ReadWithFallback(ctx) emitState() - if len(snaps) > 0 { - handler(snaps) - if SnapshotsHaveUsableData(snaps) { + if len(frame.Snapshots) > 0 { + handler(frame) + if SnapshotsHaveUsableData(frame.Snapshots) { return false } } @@ -260,13 +269,13 @@ func warmUp(ctx context.Context, rt *ViewRuntime, handler SnapshotHandler, emitS case <-ctx.Done(): return true case <-ticker.C: - snaps := rt.ReadWithFallback(ctx) + frame := rt.ReadWithFallback(ctx) emitState() - if len(snaps) == 0 { + if len(frame.Snapshots) == 0 { continue } - handler(snaps) - if SnapshotsHaveUsableData(snaps) { + handler(frame) + if SnapshotsHaveUsableData(frame.Snapshots) { return false } } diff --git a/internal/daemon/server.go b/internal/daemon/server.go index 7f9579d..563eb1d 100644 --- a/internal/daemon/server.go +++ b/internal/daemon/server.go @@ -283,14 +283,14 @@ func (s *Service) computeReadModel( if len(templates) == 0 { return map[string]core.UsageSnapshot{}, nil } - tw := core.ParseTimeWindow(req.TimeWindow) + tw := normalizeReadModelTimeWindow(req.TimeWindow) result, err := telemetry.ApplyCanonicalTelemetryViewWithOptions(ctx, s.cfg.DBPath, templates, telemetry.ReadModelOptions{ ProviderLinks: req.ProviderLinks, TimeWindowHours: tw.Hours(), - TimeWindow: req.TimeWindow, + TimeWindow: tw, }) core.Tracef("[read_model_perf] computeReadModel TOTAL: %dms (window=%s, accounts=%d, results=%d)", - time.Since(start).Milliseconds(), req.TimeWindow, len(req.Accounts), len(result)) + time.Since(start).Milliseconds(), tw, len(req.Accounts), len(result)) return result, err } @@ -314,7 +314,7 @@ func (s *Service) refreshReadModelCacheAsync( } return } - s.rmCache.set(cacheKey, snapshots, req.TimeWindow) + s.rmCache.set(cacheKey, snapshots) }() } @@ -1098,7 +1098,7 @@ func (s *Service) handleReadModel(w http.ResponseWriter, r *http.Request) { } cacheKey := ReadModelRequestKey(req) - if cached, cachedAt, ok := s.rmCache.get(cacheKey, req.TimeWindow); ok { + if cached, cachedAt, ok := s.rmCache.get(cacheKey); ok { core.Tracef("[read_model] cache hit key=%s age=%s providers=%d", cacheKey, time.Since(cachedAt).Round(time.Millisecond), len(cached)) for id, snap := range cached { core.Tracef("[read_model] %s: %d metrics", id, len(snap.Metrics)) @@ -1114,7 +1114,7 @@ func (s *Service) handleReadModel(w http.ResponseWriter, r *http.Request) { snapshots, err := s.computeReadModel(computeCtx, req) cancel() if err == nil && len(snapshots) > 0 { - s.rmCache.set(cacheKey, snapshots, req.TimeWindow) + s.rmCache.set(cacheKey, snapshots) writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: snapshots}) return } diff --git a/internal/daemon/types.go b/internal/daemon/types.go index 2d22d90..02da972 100644 --- a/internal/daemon/types.go +++ b/internal/daemon/types.go @@ -29,7 +29,7 @@ type ReadModelAccount struct { type ReadModelRequest struct { Accounts []ReadModelAccount `json:"accounts"` ProviderLinks map[string]string `json:"provider_links"` - TimeWindow string `json:"time_window,omitempty"` + TimeWindow core.TimeWindow `json:"time_window,omitempty"` } type ReadModelResponse struct { @@ -55,13 +55,10 @@ type HealthResponse struct { } type cachedReadModelEntry struct { - snapshots map[string]core.UsageSnapshot - updatedAt time.Time - timeWindow string + snapshots map[string]core.UsageSnapshot + updatedAt time.Time } -// readModelCache encapsulates the read-model caching layer with -// thread-safe access and in-flight deduplication. type readModelCache struct { mu sync.RWMutex entries map[string]cachedReadModelEntry @@ -75,13 +72,13 @@ func newReadModelCache() *readModelCache { } } -func (c *readModelCache) get(cacheKey, timeWindow string) (map[string]core.UsageSnapshot, time.Time, bool) { +func (c *readModelCache) get(cacheKey string) (map[string]core.UsageSnapshot, time.Time, bool) { if cacheKey == "" { return nil, time.Time{}, false } c.mu.RLock() entry, ok := c.entries[cacheKey] - if !ok || len(entry.snapshots) == 0 || entry.timeWindow != timeWindow { + if !ok || len(entry.snapshots) == 0 { c.mu.RUnlock() return nil, time.Time{}, false } @@ -90,16 +87,15 @@ func (c *readModelCache) get(cacheKey, timeWindow string) (map[string]core.Usage return cloned, entry.updatedAt, true } -func (c *readModelCache) set(cacheKey string, snapshots map[string]core.UsageSnapshot, timeWindow string) { +func (c *readModelCache) set(cacheKey string, snapshots map[string]core.UsageSnapshot) { if cacheKey == "" || len(snapshots) == 0 { return } now := time.Now().UTC() c.mu.Lock() c.entries[cacheKey] = cachedReadModelEntry{ - snapshots: core.DeepCloneSnapshots(snapshots), - updatedAt: now, - timeWindow: timeWindow, + snapshots: core.DeepCloneSnapshots(snapshots), + updatedAt: now, } // Evict stale entries to prevent unbounded growth. const maxEntries = 50 @@ -130,7 +126,6 @@ func (c *readModelCache) set(cacheKey string, snapshots map[string]core.UsageSna c.mu.Unlock() } -// beginRefresh marks a cache key as in-flight. Returns false if already refreshing. func (c *readModelCache) beginRefresh(cacheKey string) bool { if cacheKey == "" { return false @@ -157,7 +152,12 @@ type ingestTally struct { failed int } -type SnapshotHandler func(map[string]core.UsageSnapshot) +type SnapshotFrame struct { + Snapshots map[string]core.UsageSnapshot + TimeWindow core.TimeWindow +} + +type SnapshotHandler func(SnapshotFrame) type DaemonStatus int diff --git a/internal/dashboardapp/service.go b/internal/dashboardapp/service.go new file mode 100644 index 0000000..c1524e2 --- /dev/null +++ b/internal/dashboardapp/service.go @@ -0,0 +1,89 @@ +package dashboardapp + +import ( + "context" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/config" + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/integrations" + "github.com/janekbaraniewski/openusage/internal/providers" +) + +type Service struct{} + +func NewService() *Service { + return &Service{} +} + +func (s *Service) SaveTheme(themeName string) error { + return config.SaveTheme(themeName) +} + +func (s *Service) SaveDashboardProviders(providersCfg []config.DashboardProviderConfig) error { + return config.SaveDashboardProviders(providersCfg) +} + +func (s *Service) SaveDashboardView(view string) error { + return config.SaveDashboardView(view) +} + +func (s *Service) SaveDashboardWidgetSections(sections []config.DashboardWidgetSection) error { + return config.SaveDashboardWidgetSections(sections) +} + +func (s *Service) SaveDashboardHideSectionsWithNoData(hide bool) error { + return config.SaveDashboardHideSectionsWithNoData(hide) +} + +func (s *Service) SaveTimeWindow(window string) error { + return config.SaveTimeWindow(window) +} + +func (s *Service) ValidateAPIKey(accountID, providerID, apiKey string) (bool, string) { + var provider core.UsageProvider + for _, p := range providers.AllProviders() { + if p.ID() == providerID { + provider = p + break + } + } + if provider == nil { + return false, "unknown provider" + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + snap, err := provider.Fetch(ctx, core.AccountConfig{ + ID: accountID, + Provider: providerID, + Token: apiKey, + }) + if err != nil { + return false, err.Error() + } + if snap.Status == core.StatusAuth || snap.Status == core.StatusError { + msg := strings.TrimSpace(snap.Message) + if msg == "" { + msg = string(snap.Status) + } + return false, msg + } + return true, "" +} + +func (s *Service) SaveCredential(accountID, apiKey string) error { + return config.SaveCredential(accountID, apiKey) +} + +func (s *Service) DeleteCredential(accountID string) error { + return config.DeleteCredential(accountID) +} + +func (s *Service) InstallIntegration(id integrations.ID) ([]integrations.Status, error) { + manager := integrations.NewDefaultManager() + err := manager.Install(id) + return manager.ListStatuses(), err +} diff --git a/internal/detect/claude_code.go b/internal/detect/claude_code.go index f37073d..1b5333c 100644 --- a/internal/detect/claude_code.go +++ b/internal/detect/claude_code.go @@ -38,8 +38,6 @@ func detectClaudeCode(result *Result) { ID: "claude-code", Provider: "claude_code", Auth: "local", - Binary: statsFile, // compat fallback - BaseURL: accountFile, // compat fallback Paths: map[string]string{ "stats_cache": statsFile, "account_config": accountFile, diff --git a/internal/detect/cursor.go b/internal/detect/cursor.go index 02513d1..9e84878 100644 --- a/internal/detect/cursor.go +++ b/internal/detect/cursor.go @@ -55,12 +55,10 @@ func detectCursor(result *Result) { if hasTracking { acct.Paths["tracking_db"] = trackingDB acct.ExtraData["tracking_db"] = trackingDB - acct.Binary = trackingDB // compat fallback } if hasState { acct.Paths["state_db"] = stateDB acct.ExtraData["state_db"] = stateDB - acct.BaseURL = stateDB // compat fallback } if hasState { diff --git a/internal/providers/claude_code/claude_code.go b/internal/providers/claude_code/claude_code.go index 36f3ee8..240704a 100644 --- a/internal/providers/claude_code/claude_code.go +++ b/internal/providers/claude_code/claude_code.go @@ -297,21 +297,13 @@ func buildStatsCandidates(explicitPath, claudeDir, home string) []string { } func (p *Provider) DetailWidget() core.DetailWidget { - return core.DetailWidget{ - Sections: []core.DetailSection{ - {Name: "Usage", Order: 1, Style: core.DetailSectionStyleUsage}, - {Name: "Models", Order: 2, Style: core.DetailSectionStyleModels}, - {Name: "Languages", Order: 3, Style: core.DetailSectionStyleLanguages}, - {Name: "MCP Usage", Order: 4, Style: core.DetailSectionStyleMCP}, - {Name: "Spending", Order: 5, Style: core.DetailSectionStyleSpending}, - {Name: "Trends", Order: 6, Style: core.DetailSectionStyleTrends}, - {Name: "Tokens", Order: 7, Style: core.DetailSectionStyleTokens}, - {Name: "Activity", Order: 8, Style: core.DetailSectionStyleActivity}, - }, - } + return core.CodingToolDetailWidget(true) } func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.UsageSnapshot, error) { + if strings.TrimSpace(acct.Provider) == "" { + acct.Provider = p.ID() + } snap := core.UsageSnapshot{ ProviderID: p.ID(), AccountID: acct.ID, @@ -330,8 +322,9 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa home = filepath.Dir(claudeDir) // derive "home" from the override } - statsPath := acct.Path("stats_cache", acct.Binary) - accountPath := acct.Path("account_config", acct.BaseURL) + acct.NormalizeRuntimePaths() + statsPath := acct.Path("stats_cache", "") + accountPath := acct.Path("account_config", "") if accountPath == "" { accountPath = filepath.Join(home, ".claude.json") @@ -868,19 +861,7 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna blockStartCandidates := []time.Time{} - type parsedUsage struct { - timestamp time.Time - model string - usage *jsonlUsage - requestID string - messageID string - sessionID string - cwd string - sourcePath string - content []jsonlContent - } - - var allUsages []parsedUsage + var allUsages []conversationRecord modelTotals := make(map[string]*modelUsageTotals) clientTotals := make(map[string]*modelUsageTotals) projectTotals := make(map[string]*modelUsageTotals) @@ -965,70 +946,8 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna } return sanitizeModelName(dir) } - usageDedupKey := func(u parsedUsage) string { - if u.requestID != "" { - return "req:" + u.requestID - } - if u.messageID != "" { - return "msg:" + u.messageID - } - if u.usage == nil { - return "" - } - return fmt.Sprintf("%s|%s|%d|%d|%d|%d|%d", - u.sessionID, - u.timestamp.UTC().Format(time.RFC3339Nano), - u.usage.InputTokens, - u.usage.OutputTokens, - u.usage.CacheReadInputTokens, - u.usage.CacheCreationInputTokens, - u.usage.ReasoningTokens, - ) - } - toolDedupKey := func(u parsedUsage, idx int, item jsonlContent) string { - base := u.requestID - if base == "" { - base = u.messageID - } - if base == "" { - base = u.sessionID + "|" + u.timestamp.UTC().Format(time.RFC3339Nano) - } - if item.ID != "" { - return base + "|tool|" + item.ID - } - name := strings.ToLower(strings.TrimSpace(item.Name)) - if name == "" { - name = "unknown" - } - return fmt.Sprintf("%s|tool|%s|%d", base, name, idx) - } - for _, fpath := range jsonlFiles { - entries := parseJSONLFile(fpath) - for _, entry := range entries { - if entry.Type != "assistant" || entry.Message == nil { - continue - } - ts, ok := parseJSONLTimestamp(entry.Timestamp) - if !ok { - continue - } - model := entry.Message.Model - if model == "" { - model = "unknown" - } - allUsages = append(allUsages, parsedUsage{ - timestamp: ts, - model: model, - usage: entry.Message.Usage, - requestID: entry.RequestID, - messageID: entry.Message.ID, - sessionID: entry.SessionID, - cwd: entry.CWD, - sourcePath: fpath, - content: entry.Message.Content, - }) - } + allUsages = append(allUsages, parseConversationRecords(fpath)...) } sort.Slice(allUsages, func(i, j int) bool { @@ -1040,7 +959,7 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna if u.usage == nil { continue } - key := usageDedupKey(u) + key := conversationUsageDedupKey(u) if key != "" { if seenForBlock[key] { continue @@ -1064,7 +983,7 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna if item.Type != "tool_use" { continue } - toolKey := toolDedupKey(u, idx, item) + toolKey := conversationToolDedupKey(u, idx, item) if seenToolKeys[toolKey] { continue } @@ -1108,7 +1027,7 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna if u.usage == nil { continue } - usageKey := usageDedupKey(u) + usageKey := conversationUsageDedupKey(u) if usageKey != "" && seenUsageKeys[usageKey] { continue } diff --git a/internal/providers/claude_code/conversation_records.go b/internal/providers/claude_code/conversation_records.go new file mode 100644 index 0000000..98ede9b --- /dev/null +++ b/internal/providers/claude_code/conversation_records.go @@ -0,0 +1,125 @@ +package claude_code + +import ( + "bufio" + "encoding/json" + "fmt" + "os" + "strings" + "time" +) + +type conversationRecord struct { + lineNumber int + timestamp time.Time + model string + usage *jsonlUsage + requestID string + messageID string + sessionID string + cwd string + sourcePath string + content []jsonlContent +} + +func parseConversationRecords(path string) []conversationRecord { + f, err := os.Open(path) + if err != nil { + return nil + } + defer f.Close() + + var records []conversationRecord + scanner := bufio.NewScanner(f) + scanner.Buffer(make([]byte, 0, 256*1024), 10*1024*1024) + lineNumber := 0 + + for scanner.Scan() { + lineNumber++ + line := scanner.Bytes() + if len(line) == 0 { + continue + } + + var entry jsonlEntry + if err := json.Unmarshal(line, &entry); err != nil { + continue + } + if entry.Type != "assistant" || entry.Message == nil { + continue + } + ts, ok := parseJSONLTimestamp(entry.Timestamp) + if !ok { + continue + } + model := entry.Message.Model + if model == "" { + model = "unknown" + } + records = append(records, conversationRecord{ + lineNumber: lineNumber, + timestamp: ts, + model: model, + usage: entry.Message.Usage, + requestID: entry.RequestID, + messageID: entry.Message.ID, + sessionID: entry.SessionID, + cwd: entry.CWD, + sourcePath: path, + content: entry.Message.Content, + }) + } + return records +} + +func conversationUsageDedupKey(record conversationRecord) string { + if record.requestID != "" { + return "req:" + record.requestID + } + if record.messageID != "" { + return "msg:" + record.messageID + } + if record.usage == nil { + return "" + } + return fmt.Sprintf("%s|%s|%d|%d|%d|%d|%d", + record.sessionID, + record.timestamp.UTC().Format(time.RFC3339Nano), + record.usage.InputTokens, + record.usage.OutputTokens, + record.usage.CacheReadInputTokens, + record.usage.CacheCreationInputTokens, + record.usage.ReasoningTokens, + ) +} + +func conversationToolDedupKey(record conversationRecord, idx int, item jsonlContent) string { + base := record.requestID + if base == "" { + base = record.messageID + } + if base == "" { + base = record.sessionID + "|" + record.timestamp.UTC().Format(time.RFC3339Nano) + } + if item.ID != "" { + return base + "|tool|" + item.ID + } + name := strings.ToLower(strings.TrimSpace(item.Name)) + if name == "" { + name = "unknown" + } + return fmt.Sprintf("%s|tool|%s|%d", base, name, idx) +} + +func conversationTotalTokens(usage *jsonlUsage) int64 { + if usage == nil { + return 0 + } + return int64( + usage.InputTokens + + usage.OutputTokens + + usage.CacheReadInputTokens + + usage.CacheCreationInputTokens + + usage.ReasoningTokens, + ) +} diff --git a/internal/providers/claude_code/telemetry_usage.go b/internal/providers/claude_code/telemetry_usage.go index 3c3c6fb..66be76f 100644 --- a/internal/providers/claude_code/telemetry_usage.go +++ b/internal/providers/claude_code/telemetry_usage.go @@ -1,7 +1,6 @@ package claude_code import ( - "bufio" "context" "encoding/json" "fmt" @@ -14,8 +13,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/providers/shared" ) -const telemetryScannerBufferSize = 8 * 1024 * 1024 - func (p *Provider) System() string { return p.ID() } func (p *Provider) DefaultCollectOptions() shared.TelemetryCollectOptions { @@ -68,31 +65,16 @@ func DefaultTelemetryProjectsDirs() (string, string) { // ParseTelemetryConversationFile parses a Claude Code conversation JSONL file // and emits message/tool telemetry events. func ParseTelemetryConversationFile(path string) ([]shared.TelemetryEvent, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - seenUsage := make(map[string]bool) seenTools := make(map[string]bool) var out []shared.TelemetryEvent - - scanner := bufio.NewScanner(f) - scanner.Buffer(make([]byte, 0, 512*1024), telemetryScannerBufferSize) - lineNumber := 0 - - for scanner.Scan() { - lineNumber++ - var entry jsonlEntry - if err := json.Unmarshal(scanner.Bytes(), &entry); err != nil { - continue - } - if entry.Type != "assistant" || entry.Message == nil || entry.Message.Usage == nil { + records := parseConversationRecords(path) + for _, record := range records { + if record.usage == nil { continue } - usageKey := claudeTelemetryUsageDedupKey(entry) + usageKey := conversationUsageDedupKey(record) if usageKey != "" && seenUsage[usageKey] { continue } @@ -100,31 +82,21 @@ func ParseTelemetryConversationFile(path string) ([]shared.TelemetryEvent, error seenUsage[usageKey] = true } - ts := time.Now().UTC() - if parsed, err := shared.ParseTimestampString(entry.Timestamp); err == nil { - ts = parsed - } - - model := strings.TrimSpace(entry.Message.Model) + ts := record.timestamp + model := strings.TrimSpace(record.model) if model == "" { model = "unknown" } - usage := entry.Message.Usage - totalTokens := int64( - usage.InputTokens + - usage.OutputTokens + - usage.CacheReadInputTokens + - usage.CacheCreationInputTokens + - usage.ReasoningTokens, - ) + usage := record.usage + totalTokens := conversationTotalTokens(usage) cost := estimateCost(model, usage) - turnID := core.FirstNonEmpty(entry.RequestID, entry.Message.ID) + turnID := core.FirstNonEmpty(record.requestID, record.messageID) if turnID == "" { - turnID = fmt.Sprintf("%s:%d", strings.TrimSpace(entry.SessionID), lineNumber) + turnID = fmt.Sprintf("%s:%d", strings.TrimSpace(record.sessionID), record.lineNumber) } - messageID := strings.TrimSpace(entry.Message.ID) + messageID := strings.TrimSpace(record.messageID) if messageID == "" { messageID = turnID } @@ -134,8 +106,8 @@ func ParseTelemetryConversationFile(path string) ([]shared.TelemetryEvent, error Channel: shared.TelemetryChannelJSONL, OccurredAt: ts, AccountID: "claude-code", - WorkspaceID: shared.SanitizeWorkspace(entry.CWD), - SessionID: strings.TrimSpace(entry.SessionID), + WorkspaceID: shared.SanitizeWorkspace(record.cwd), + SessionID: strings.TrimSpace(record.sessionID), TurnID: turnID, MessageID: messageID, ProviderID: "anthropic", @@ -154,15 +126,15 @@ func ParseTelemetryConversationFile(path string) ([]shared.TelemetryEvent, error Status: shared.TelemetryStatusOK, Payload: map[string]any{ "file": path, - "line": lineNumber, + "line": record.lineNumber, }, }) - for idx, part := range entry.Message.Content { + for idx, part := range record.content { if part.Type != "tool_use" { continue } - toolKey := claudeTelemetryToolDedupKey(entry, idx, part) + toolKey := conversationToolDedupKey(record, idx, part) if toolKey != "" && seenTools[toolKey] { continue } @@ -186,8 +158,8 @@ func ParseTelemetryConversationFile(path string) ([]shared.TelemetryEvent, error Channel: shared.TelemetryChannelJSONL, OccurredAt: ts, AccountID: "claude-code", - WorkspaceID: shared.SanitizeWorkspace(entry.CWD), - SessionID: strings.TrimSpace(entry.SessionID), + WorkspaceID: shared.SanitizeWorkspace(record.cwd), + SessionID: strings.TrimSpace(record.sessionID), TurnID: turnID, MessageID: messageID, ToolCallID: strings.TrimSpace(part.ID), @@ -202,62 +174,15 @@ func ParseTelemetryConversationFile(path string) ([]shared.TelemetryEvent, error Status: shared.TelemetryStatusOK, Payload: map[string]any{ "source_file": path, - "line": lineNumber, + "line": record.lineNumber, "file": toolFilePath, }, }) } } - - if err := scanner.Err(); err != nil { - return out, err - } return out, nil } -func claudeTelemetryUsageDedupKey(entry jsonlEntry) string { - if id := strings.TrimSpace(entry.RequestID); id != "" { - return "req:" + id - } - if entry.Message != nil { - if id := strings.TrimSpace(entry.Message.ID); id != "" { - return "msg:" + id - } - if entry.Message.Usage != nil { - u := entry.Message.Usage - return fmt.Sprintf("fp:%s|%s|%s|%d|%d|%d|%d|%d", - strings.TrimSpace(entry.SessionID), - strings.TrimSpace(entry.Timestamp), - strings.TrimSpace(entry.Message.Model), - u.InputTokens, - u.OutputTokens, - u.CacheReadInputTokens, - u.CacheCreationInputTokens, - u.ReasoningTokens, - ) - } - } - return "" -} - -func claudeTelemetryToolDedupKey(entry jsonlEntry, idx int, part jsonlContent) string { - base := strings.TrimSpace(entry.RequestID) - if base == "" && entry.Message != nil { - base = strings.TrimSpace(entry.Message.ID) - } - if base == "" { - base = strings.TrimSpace(entry.SessionID) + "|" + strings.TrimSpace(entry.Timestamp) - } - if id := strings.TrimSpace(part.ID); id != "" { - return base + "|tool:" + id - } - name := strings.ToLower(strings.TrimSpace(part.Name)) - if name == "" { - name = "unknown" - } - return fmt.Sprintf("%s|tool:%s|%d", base, name, idx) -} - // ParseTelemetryHookPayload parses Claude Code hook stdin payloads. func ParseTelemetryHookPayload(raw []byte, opts shared.TelemetryCollectOptions) ([]shared.TelemetryEvent, error) { trimmed := strings.TrimSpace(string(raw)) diff --git a/internal/providers/codex/codex.go b/internal/providers/codex/codex.go index 9200366..6d7033c 100644 --- a/internal/providers/codex/codex.go +++ b/internal/providers/codex/codex.go @@ -63,32 +63,6 @@ func New() *Provider { } } -type sessionEvent struct { - Timestamp string `json:"timestamp"` - Type string `json:"type"` - Payload json.RawMessage `json:"payload"` -} - -type eventPayload struct { - Type string `json:"type"` - Info *tokenInfo `json:"info,omitempty"` - RateLimits *rateLimits `json:"rate_limits,omitempty"` -} - -type tokenInfo struct { - TotalTokenUsage tokenUsage `json:"total_token_usage"` - LastTokenUsage tokenUsage `json:"last_token_usage"` - ModelContextWindow int `json:"model_context_window"` -} - -type tokenUsage struct { - InputTokens int `json:"input_tokens"` - CachedInputTokens int `json:"cached_input_tokens"` - OutputTokens int `json:"output_tokens"` - ReasoningOutputTokens int `json:"reasoning_output_tokens"` - TotalTokens int `json:"total_tokens"` -} - type rateLimits struct { Primary *rateLimitBucket `json:"primary,omitempty"` Secondary *rateLimitBucket `json:"secondary,omitempty"` @@ -174,16 +148,6 @@ type usageCredits struct { Balance any `json:"balance"` } -type sessionMetaPayload struct { - Source string `json:"source,omitempty"` - Originator string `json:"originator,omitempty"` - Model string `json:"model,omitempty"` -} - -type turnContextPayload struct { - Model string `json:"model,omitempty"` -} - type usageEntry struct { Name string Data tokenUsage @@ -227,18 +191,7 @@ type countEntry struct { } func (p *Provider) DetailWidget() core.DetailWidget { - return core.DetailWidget{ - Sections: []core.DetailSection{ - {Name: "Usage", Order: 1, Style: core.DetailSectionStyleUsage}, - {Name: "Models", Order: 2, Style: core.DetailSectionStyleModels}, - {Name: "Languages", Order: 3, Style: core.DetailSectionStyleLanguages}, - {Name: "MCP Usage", Order: 4, Style: core.DetailSectionStyleMCP}, - {Name: "Spending", Order: 5, Style: core.DetailSectionStyleSpending}, - {Name: "Trends", Order: 6, Style: core.DetailSectionStyleTrends}, - {Name: "Tokens", Order: 7, Style: core.DetailSectionStyleTokens}, - {Name: "Activity", Order: 8, Style: core.DetailSectionStyleActivity}, - }, - } + return core.CodingToolDetailWidget(true) } func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.UsageSnapshot, error) { @@ -877,56 +830,25 @@ func (p *Provider) readSessionUsageBreakdowns(sessionsDir string, snap *core.Usa var previous tokenUsage var hasPrevious bool var countedSession bool - - file, err := os.Open(path) - if err != nil { - return nil - } - defer file.Close() - - scanner := bufio.NewScanner(file) - buf := make([]byte, 0, 512*1024) - scanner.Buffer(buf, maxScannerBufferSize) - - for scanner.Scan() { - line := scanner.Bytes() - if !bytes.Contains(line, []byte(`"type":"event_msg"`)) && - !bytes.Contains(line, []byte(`"type":"turn_context"`)) && - !bytes.Contains(line, []byte(`"type":"session_meta"`)) && - !bytes.Contains(line, []byte(`"type":"response_item"`)) { - continue - } - - var event sessionEvent - if err := json.Unmarshal(line, &event); err != nil { - continue - } - - switch event.Type { - case "session_meta": - var meta sessionMetaPayload - if json.Unmarshal(event.Payload, &meta) == nil { - sessionClient = classifyClient(meta.Source, meta.Originator) - if meta.Model != "" { - currentModel = meta.Model - } - } - case "turn_context": - var tc turnContextPayload - if json.Unmarshal(event.Payload, &tc) == nil && strings.TrimSpace(tc.Model) != "" { - currentModel = tc.Model + return walkSessionFile(path, func(record sessionLine) error { + switch { + case record.SessionMeta != nil: + sessionClient = classifyClient(record.SessionMeta.Source, record.SessionMeta.Originator) + if record.SessionMeta.Model != "" { + currentModel = record.SessionMeta.Model } - case "event_msg": - var payload eventPayload - if json.Unmarshal(event.Payload, &payload) != nil { - continue + case record.TurnContext != nil: + if strings.TrimSpace(record.TurnContext.Model) != "" { + currentModel = record.TurnContext.Model } + case record.EventPayload != nil: + payload := record.EventPayload if payload.Type == "user_message" { promptCount++ - continue + return nil } if payload.Type != "token_count" || payload.Info == nil { - continue + return nil } total := payload.Info.TotalTokenUsage @@ -941,12 +863,12 @@ func (p *Provider) readSessionUsageBreakdowns(sessionsDir string, snap *core.Usa hasPrevious = true if delta.TotalTokens <= 0 { - continue + return nil } modelName := normalizeModelName(currentModel) clientName := normalizeClientName(sessionClient) - day := dayFromTimestamp(event.Timestamp) + day := dayFromTimestamp(record.Timestamp) if day == "" { day = defaultDay } @@ -968,11 +890,8 @@ func (p *Provider) readSessionUsageBreakdowns(sessionsDir string, snap *core.Usa clientSessions[clientName]++ countedSession = true } - case "response_item": - var item responseItemPayload - if json.Unmarshal(event.Payload, &item) != nil { - continue - } + case record.ResponseItem != nil: + item := record.ResponseItem switch item.Type { case "function_call": tool := normalizeToolName(item.Name) @@ -1000,9 +919,9 @@ func (p *Provider) readSessionUsageBreakdowns(sessionsDir string, snap *core.Usa setToolCallOutcome(item.CallID, item.Output, callOutcome) } } - } - return nil + return nil + }) }) if walkErr != nil { return fmt.Errorf("walking session files: %w", walkErr) @@ -1931,43 +1850,18 @@ func findLatestSessionFile(sessionsDir string) (string, error) { } func findLastTokenCount(path string) (*eventPayload, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - var lastPayload *eventPayload - - scanner := bufio.NewScanner(f) - buf := make([]byte, 0, 256*1024) - scanner.Buffer(buf, maxScannerBufferSize) - - for scanner.Scan() { - line := scanner.Bytes() - if !bytes.Contains(line, []byte(`"type":"event_msg"`)) { - continue - } - - var event sessionEvent - if err := json.Unmarshal(line, &event); err != nil { - continue - } - if event.Type != "event_msg" { - continue - } - - var payload eventPayload - if err := json.Unmarshal(event.Payload, &payload); err != nil { - continue - } - - if payload.Type == "token_count" { - lastPayload = &payload + if err := walkSessionFile(path, func(record sessionLine) error { + if record.EventPayload == nil || record.EventPayload.Type != "token_count" { + return nil } + payload := *record.EventPayload + lastPayload = &payload + return nil + }); err != nil { + return nil, err } - - return lastPayload, scanner.Err() + return lastPayload, nil } func (p *Provider) readDailySessionCounts(sessionsDir string, snap *core.UsageSnapshot) { diff --git a/internal/providers/codex/session_decoder.go b/internal/providers/codex/session_decoder.go new file mode 100644 index 0000000..977014e --- /dev/null +++ b/internal/providers/codex/session_decoder.go @@ -0,0 +1,128 @@ +package codex + +import ( + "bufio" + "bytes" + "encoding/json" + "os" +) + +type sessionEvent struct { + Timestamp string `json:"timestamp"` + Type string `json:"type"` + Payload json.RawMessage `json:"payload"` +} + +type eventPayload struct { + Type string `json:"type"` + Info *tokenInfo `json:"info,omitempty"` + RateLimits *rateLimits `json:"rate_limits,omitempty"` + RequestID string `json:"request_id,omitempty"` + MessageID string `json:"message_id,omitempty"` +} + +type tokenInfo struct { + TotalTokenUsage tokenUsage `json:"total_token_usage"` + LastTokenUsage tokenUsage `json:"last_token_usage"` + ModelContextWindow int `json:"model_context_window"` +} + +type tokenUsage struct { + InputTokens int `json:"input_tokens"` + CachedInputTokens int `json:"cached_input_tokens"` + OutputTokens int `json:"output_tokens"` + ReasoningOutputTokens int `json:"reasoning_output_tokens"` + TotalTokens int `json:"total_tokens"` +} + +type sessionMetaPayload struct { + ID string `json:"id,omitempty"` + SessionID string `json:"session_id,omitempty"` + Source string `json:"source,omitempty"` + Originator string `json:"originator,omitempty"` + Model string `json:"model,omitempty"` + CWD string `json:"cwd,omitempty"` + ModelProvider string `json:"model_provider,omitempty"` +} + +type turnContextPayload struct { + Model string `json:"model,omitempty"` + TurnID string `json:"turn_id,omitempty"` +} + +type sessionLine struct { + Timestamp string + LineNumber int + SessionMeta *sessionMetaPayload + TurnContext *turnContextPayload + EventPayload *eventPayload + ResponseItem *responseItemPayload +} + +func walkSessionFile(path string, fn func(sessionLine) error) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + scanner.Buffer(make([]byte, 0, 512*1024), maxScannerBufferSize) + lineNumber := 0 + + for scanner.Scan() { + lineNumber++ + line := scanner.Bytes() + if !bytes.Contains(line, []byte(`"type":"event_msg"`)) && + !bytes.Contains(line, []byte(`"type":"turn_context"`)) && + !bytes.Contains(line, []byte(`"type":"session_meta"`)) && + !bytes.Contains(line, []byte(`"type":"response_item"`)) { + continue + } + + var event sessionEvent + if err := json.Unmarshal(line, &event); err != nil { + continue + } + + record := sessionLine{ + Timestamp: event.Timestamp, + LineNumber: lineNumber, + } + + switch event.Type { + case "session_meta": + var meta sessionMetaPayload + if json.Unmarshal(event.Payload, &meta) != nil { + continue + } + record.SessionMeta = &meta + case "turn_context": + var tc turnContextPayload + if json.Unmarshal(event.Payload, &tc) != nil { + continue + } + record.TurnContext = &tc + case "event_msg": + var payload eventPayload + if json.Unmarshal(event.Payload, &payload) != nil { + continue + } + record.EventPayload = &payload + case "response_item": + var item responseItemPayload + if json.Unmarshal(event.Payload, &item) != nil { + continue + } + record.ResponseItem = &item + default: + continue + } + + if err := fn(record); err != nil { + return err + } + } + + return scanner.Err() +} diff --git a/internal/providers/codex/telemetry_usage.go b/internal/providers/codex/telemetry_usage.go index df307cd..a106736 100644 --- a/internal/providers/codex/telemetry_usage.go +++ b/internal/providers/codex/telemetry_usage.go @@ -1,7 +1,6 @@ package codex import ( - "bufio" "context" "encoding/json" "fmt" @@ -15,38 +14,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/providers/shared" ) -type telemetrySessionEvent struct { - Timestamp string `json:"timestamp"` - Type string `json:"type"` - Payload json.RawMessage `json:"payload"` -} - -type telemetrySessionMeta struct { - ID string `json:"id"` - SessionID string `json:"session_id"` - Model string `json:"model"` - CWD string `json:"cwd"` - Source string `json:"source"` - Originator string `json:"originator"` - ModelProvider string `json:"model_provider"` -} - -type telemetryTurnContext struct { - Model string `json:"model"` - TurnID string `json:"turn_id"` -} - -type telemetryTokenInfo struct { - TotalTokenUsage tokenUsage `json:"total_token_usage"` -} - -type telemetryEventPayload struct { - Type string `json:"type"` - Info *telemetryTokenInfo `json:"info"` - RequestID string `json:"request_id,omitempty"` - MessageID string `json:"message_id,omitempty"` -} - const ( codexTelemetryProviderID = "codex" codexTelemetryUpstreamModel = "openai" @@ -104,12 +71,6 @@ func DefaultTelemetrySessionsDir() string { // ParseTelemetrySessionFile parses a Codex session JSONL file into normalized telemetry events. func ParseTelemetrySessionFile(path string) ([]shared.TelemetryEvent, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - sessionID := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) model := "" upstreamProviderID := codexTelemetryUpstreamModel @@ -124,52 +85,36 @@ func ParseTelemetrySessionFile(path string) ([]shared.TelemetryEvent, error) { toolByCallID := make(map[string]int) var out []shared.TelemetryEvent - scanner := bufio.NewScanner(f) - scanner.Buffer(make([]byte, 0, 512*1024), maxScannerBufferSize) - lineNumber := 0 - - for scanner.Scan() { - lineNumber++ - var ev telemetrySessionEvent - if err := json.Unmarshal(scanner.Bytes(), &ev); err != nil { - continue - } - - switch ev.Type { - case "session_meta": - var meta telemetrySessionMeta - if json.Unmarshal(ev.Payload, &meta) == nil { - sid := core.FirstNonEmpty(meta.SessionID, meta.ID) - if sid != "" { - sessionID = sid - } - if strings.TrimSpace(meta.Model) != "" { - model = strings.TrimSpace(meta.Model) - } - if strings.TrimSpace(meta.ModelProvider) != "" { - upstreamProviderID = strings.TrimSpace(meta.ModelProvider) - } - if ws := shared.SanitizeWorkspace(meta.CWD); ws != "" { - workspaceID = ws - } - clientSource = strings.TrimSpace(meta.Source) - clientOriginator = strings.TrimSpace(meta.Originator) - clientName = classifyClient(clientSource, clientOriginator) + if err := walkSessionFile(path, func(record sessionLine) error { + switch { + case record.SessionMeta != nil: + sid := core.FirstNonEmpty(record.SessionMeta.SessionID, record.SessionMeta.ID) + if sid != "" { + sessionID = sid } - case "turn_context": - var tc telemetryTurnContext - if json.Unmarshal(ev.Payload, &tc) == nil { - if strings.TrimSpace(tc.Model) != "" { - model = strings.TrimSpace(tc.Model) - } - if strings.TrimSpace(tc.TurnID) != "" { - currentTurnID = strings.TrimSpace(tc.TurnID) - } + if strings.TrimSpace(record.SessionMeta.Model) != "" { + model = strings.TrimSpace(record.SessionMeta.Model) + } + if strings.TrimSpace(record.SessionMeta.ModelProvider) != "" { + upstreamProviderID = strings.TrimSpace(record.SessionMeta.ModelProvider) } - case "event_msg": - var payload telemetryEventPayload - if json.Unmarshal(ev.Payload, &payload) != nil || payload.Type != "token_count" || payload.Info == nil { - continue + if ws := shared.SanitizeWorkspace(record.SessionMeta.CWD); ws != "" { + workspaceID = ws + } + clientSource = strings.TrimSpace(record.SessionMeta.Source) + clientOriginator = strings.TrimSpace(record.SessionMeta.Originator) + clientName = classifyClient(clientSource, clientOriginator) + case record.TurnContext != nil: + if strings.TrimSpace(record.TurnContext.Model) != "" { + model = strings.TrimSpace(record.TurnContext.Model) + } + if strings.TrimSpace(record.TurnContext.TurnID) != "" { + currentTurnID = strings.TrimSpace(record.TurnContext.TurnID) + } + case record.EventPayload != nil: + payload := record.EventPayload + if payload.Type != "token_count" || payload.Info == nil { + return nil } total := payload.Info.TotalTokenUsage @@ -184,12 +129,12 @@ func ParseTelemetrySessionFile(path string) ([]shared.TelemetryEvent, error) { hasPrevious = true if delta.TotalTokens <= 0 { - continue + return nil } turnIndex++ occurredAt := time.Now().UTC() - if ts, err := shared.ParseTimestampString(ev.Timestamp); err == nil { + if ts, err := shared.ParseTimestampString(record.Timestamp); err == nil { occurredAt = ts } @@ -228,21 +173,17 @@ func ParseTelemetrySessionFile(path string) ([]shared.TelemetryEvent, error) { Status: shared.TelemetryStatusOK, Payload: map[string]any{ "source_file": path, - "line": lineNumber, + "line": record.LineNumber, "upstream_provider": upstreamProviderID, "client": clientName, "client_source": clientSource, "client_originator": clientOriginator, }, }) - case "response_item": - var item responseItemPayload - if json.Unmarshal(ev.Payload, &item) != nil { - continue - } - + case record.ResponseItem != nil: + item := record.ResponseItem occurredAt := time.Now().UTC() - if ts, err := shared.ParseTimestampString(ev.Timestamp); err == nil { + if ts, err := shared.ParseTimestampString(record.Timestamp); err == nil { occurredAt = ts } @@ -256,13 +197,13 @@ func ParseTelemetrySessionFile(path string) ([]shared.TelemetryEvent, error) { toolName = "unknown" } - turnID := fmt.Sprintf("%s:tool:%d", sessionID, lineNumber) + turnID := fmt.Sprintf("%s:tool:%d", sessionID, record.LineNumber) if strings.TrimSpace(currentTurnID) != "" { turnID = strings.TrimSpace(currentTurnID) } callID := strings.TrimSpace(item.CallID) - messageID := core.FirstNonEmpty(callID, turnID, fmt.Sprintf("%s:%d", sessionID, lineNumber)) - eventPayload := codexBuildToolPayload(path, lineNumber, item) + messageID := core.FirstNonEmpty(callID, turnID, fmt.Sprintf("%s:%d", sessionID, record.LineNumber)) + eventPayload := codexBuildToolPayload(path, record.LineNumber, *item) if strings.TrimSpace(upstreamProviderID) != "" { eventPayload["upstream_provider"] = strings.TrimSpace(upstreamProviderID) } @@ -302,7 +243,7 @@ func ParseTelemetrySessionFile(path string) ([]shared.TelemetryEvent, error) { callID := strings.TrimSpace(item.CallID) idx, ok := toolByCallID[callID] if !ok || idx < 0 || idx >= len(out) { - continue + return nil } switch inferToolCallOutcome(item.Output) { case 2: @@ -314,9 +255,8 @@ func ParseTelemetrySessionFile(path string) ([]shared.TelemetryEvent, error) { } } } - } - - if err := scanner.Err(); err != nil { + return nil + }); err != nil { return out, err } return out, nil diff --git a/internal/providers/copilot/copilot.go b/internal/providers/copilot/copilot.go index 2b29439..e50a025 100644 --- a/internal/providers/copilot/copilot.go +++ b/internal/providers/copilot/copilot.go @@ -57,18 +57,7 @@ func New() *Provider { } func (p *Provider) DetailWidget() core.DetailWidget { - return core.DetailWidget{ - Sections: []core.DetailSection{ - {Name: "Usage", Order: 1, Style: core.DetailSectionStyleUsage}, - {Name: "Models", Order: 2, Style: core.DetailSectionStyleModels}, - {Name: "Languages", Order: 3, Style: core.DetailSectionStyleLanguages}, - {Name: "MCP Usage", Order: 4, Style: core.DetailSectionStyleMCP}, - {Name: "Spending", Order: 5, Style: core.DetailSectionStyleSpending}, - {Name: "Trends", Order: 6, Style: core.DetailSectionStyleTrends}, - {Name: "Tokens", Order: 7, Style: core.DetailSectionStyleTokens}, - {Name: "Activity", Order: 8, Style: core.DetailSectionStyleActivity}, - }, - } + return core.CodingToolDetailWidget(true) } type ghUser struct { diff --git a/internal/providers/cursor/cursor.go b/internal/providers/cursor/cursor.go index 2904eab..3811859 100644 --- a/internal/providers/cursor/cursor.go +++ b/internal/providers/cursor/cursor.go @@ -165,20 +165,13 @@ type composerModelUsage struct { } func (p *Provider) DetailWidget() core.DetailWidget { - return core.DetailWidget{ - Sections: []core.DetailSection{ - {Name: "Usage", Order: 1, Style: core.DetailSectionStyleUsage}, - {Name: "Models", Order: 2, Style: core.DetailSectionStyleModels}, - {Name: "Languages", Order: 3, Style: core.DetailSectionStyleLanguages}, - {Name: "Spending", Order: 4, Style: core.DetailSectionStyleSpending}, - {Name: "Trends", Order: 5, Style: core.DetailSectionStyleTrends}, - {Name: "Tokens", Order: 6, Style: core.DetailSectionStyleTokens}, - {Name: "Activity", Order: 7, Style: core.DetailSectionStyleActivity}, - }, - } + return core.CodingToolDetailWidget(false) } func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.UsageSnapshot, error) { + if strings.TrimSpace(acct.Provider) == "" { + acct.Provider = p.ID() + } snap := core.UsageSnapshot{ ProviderID: p.ID(), AccountID: acct.ID, @@ -198,8 +191,9 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa } } - trackingDBPath := acct.Path("tracking_db", acct.Binary) - stateDBPath := acct.Path("state_db", acct.BaseURL) + acct.NormalizeRuntimePaths() + trackingDBPath := acct.Path("tracking_db", "") + stateDBPath := acct.Path("state_db", "") // If the token was not persisted (json:"-"), try to extract it fresh // from the Cursor state DB so daemon polls can access the API. diff --git a/internal/providers/gemini_cli/gemini_cli.go b/internal/providers/gemini_cli/gemini_cli.go index 3f9e64f..813b56f 100644 --- a/internal/providers/gemini_cli/gemini_cli.go +++ b/internal/providers/gemini_cli/gemini_cli.go @@ -67,18 +67,7 @@ func New() *Provider { } func (p *Provider) DetailWidget() core.DetailWidget { - return core.DetailWidget{ - Sections: []core.DetailSection{ - {Name: "Usage", Order: 1, Style: core.DetailSectionStyleUsage}, - {Name: "Models", Order: 2, Style: core.DetailSectionStyleModels}, - {Name: "Languages", Order: 3, Style: core.DetailSectionStyleLanguages}, - {Name: "MCP Usage", Order: 4, Style: core.DetailSectionStyleMCP}, - {Name: "Spending", Order: 5, Style: core.DetailSectionStyleSpending}, - {Name: "Trends", Order: 6, Style: core.DetailSectionStyleTrends}, - {Name: "Tokens", Order: 7, Style: core.DetailSectionStyleTokens}, - {Name: "Activity", Order: 8, Style: core.DetailSectionStyleActivity}, - }, - } + return core.CodingToolDetailWidget(true) } type oauthCreds struct { diff --git a/internal/telemetry/read_model.go b/internal/telemetry/read_model.go index 04569c3..edb992c 100644 --- a/internal/telemetry/read_model.go +++ b/internal/telemetry/read_model.go @@ -41,8 +41,8 @@ type storedLimitEnvelope struct { type ReadModelOptions struct { ProviderLinks map[string]string - TimeWindowHours int // 0 = no filter (all data) - TimeWindow string // raw value like "7d", "1h" for metric labels + TimeWindowHours int + TimeWindow core.TimeWindow } // ApplyCanonicalTelemetryView hydrates snapshots from canonical telemetry streams. diff --git a/internal/telemetry/usage_view.go b/internal/telemetry/usage_view.go index dabedb7..820f9e6 100644 --- a/internal/telemetry/usage_view.go +++ b/internal/telemetry/usage_view.go @@ -163,7 +163,7 @@ func applyCanonicalUsageViewWithDB( snaps map[string]core.UsageSnapshot, providerLinks map[string]string, timeWindowHours int, - timeWindow string, + timeWindow core.TimeWindow, ) (map[string]core.UsageSnapshot, error) { if db == nil { return snaps, nil @@ -215,7 +215,7 @@ func applyCanonicalUsageViewWithDB( if hasTelemetry && agg != nil { // Telemetry is active but no events in this time window. // Strip stale all-time metrics so TUI shows "no data" placeholders. - windowLabel := "all" + windowLabel := core.TimeWindowAll if timeWindowHours > 0 && timeWindow != "" { windowLabel = timeWindow } @@ -227,7 +227,7 @@ func applyCanonicalUsageViewWithDB( continue } - windowLabel := "all" + windowLabel := core.TimeWindowAll if timeWindowHours > 0 && timeWindow != "" { windowLabel = timeWindow } @@ -238,11 +238,12 @@ func applyCanonicalUsageViewWithDB( return out, nil } -func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, timeWindow string) { +func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, timeWindow core.TimeWindow) { if snap == nil || agg == nil { return } authoritativeCost := usageAuthoritativeCost(*snap) + windowLabel := string(timeWindow) snap.EnsureMaps() if snap.DailySeries == nil { snap.DailySeries = make(map[string][]core.TimePoint) @@ -309,18 +310,18 @@ func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, modelCostTotal := 0.0 for _, model := range agg.Models { mk := sanitizeMetricID(model.Model) - snap.Metrics["model_"+mk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(model.InputTokens), Unit: "tokens", Window: timeWindow} - snap.Metrics["model_"+mk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(model.OutputTokens), Unit: "tokens", Window: timeWindow} - snap.Metrics["model_"+mk+"_cached_tokens"] = core.Metric{Used: core.Float64Ptr(model.CachedTokens), Unit: "tokens", Window: timeWindow} - snap.Metrics["model_"+mk+"_reasoning_tokens"] = core.Metric{Used: core.Float64Ptr(model.Reasoning), Unit: "tokens", Window: timeWindow} - snap.Metrics["model_"+mk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(model.CostUSD), Unit: "USD", Window: timeWindow} - snap.Metrics["model_"+mk+"_requests"] = core.Metric{Used: core.Float64Ptr(model.Requests), Unit: "requests", Window: timeWindow} + snap.Metrics["model_"+mk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(model.InputTokens), Unit: "tokens", Window: windowLabel} + snap.Metrics["model_"+mk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(model.OutputTokens), Unit: "tokens", Window: windowLabel} + snap.Metrics["model_"+mk+"_cached_tokens"] = core.Metric{Used: core.Float64Ptr(model.CachedTokens), Unit: "tokens", Window: windowLabel} + snap.Metrics["model_"+mk+"_reasoning_tokens"] = core.Metric{Used: core.Float64Ptr(model.Reasoning), Unit: "tokens", Window: windowLabel} + snap.Metrics["model_"+mk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(model.CostUSD), Unit: "USD", Window: windowLabel} + snap.Metrics["model_"+mk+"_requests"] = core.Metric{Used: core.Float64Ptr(model.Requests), Unit: "requests", Window: windowLabel} snap.Metrics["model_"+mk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(model.Requests1d), Unit: "requests", Window: "1d"} modelCostTotal += model.CostUSD snap.ModelUsage = append(snap.ModelUsage, core.ModelUsageRecord{ RawModelID: model.Model, RawSource: "telemetry", - Window: timeWindow, + Window: windowLabel, InputTokens: core.Float64Ptr(model.InputTokens), OutputTokens: core.Float64Ptr(model.OutputTokens), CachedTokens: core.Float64Ptr(model.CachedTokens), @@ -348,7 +349,7 @@ func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, // it as "unattributed" would be misleading for the selected time range. if delta := authoritativeCost - modelCostTotal; authoritativeCost > 0 && delta > 0.000001 { uk := "model_unattributed" - snap.Metrics[uk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(delta), Unit: "USD", Window: timeWindow} + snap.Metrics[uk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(delta), Unit: "USD", Window: windowLabel} snap.SetDiagnostic("telemetry_unattributed_model_cost_usd", fmt.Sprintf("%.6f", delta)) } } @@ -357,15 +358,15 @@ func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, providerCostTotal := 0.0 for _, provider := range agg.Providers { pk := sanitizeMetricID(provider.Provider) - snap.Metrics["provider_"+pk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(provider.CostUSD), Unit: "USD", Window: timeWindow} - snap.Metrics["provider_"+pk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(provider.Input), Unit: "tokens", Window: timeWindow} - snap.Metrics["provider_"+pk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(provider.Output), Unit: "tokens", Window: timeWindow} - snap.Metrics["provider_"+pk+"_requests"] = core.Metric{Used: core.Float64Ptr(provider.Requests), Unit: "requests", Window: timeWindow} + snap.Metrics["provider_"+pk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(provider.CostUSD), Unit: "USD", Window: windowLabel} + snap.Metrics["provider_"+pk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(provider.Input), Unit: "tokens", Window: windowLabel} + snap.Metrics["provider_"+pk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(provider.Output), Unit: "tokens", Window: windowLabel} + snap.Metrics["provider_"+pk+"_requests"] = core.Metric{Used: core.Float64Ptr(provider.Requests), Unit: "requests", Window: windowLabel} providerCostTotal += provider.CostUSD } if delta := authoritativeCost - providerCostTotal; authoritativeCost > 0 && delta > 0.000001 { uk := "provider_unattributed" - snap.Metrics[uk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(delta), Unit: "USD", Window: timeWindow} + snap.Metrics[uk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(delta), Unit: "USD", Window: windowLabel} snap.SetDiagnostic("telemetry_unattributed_provider_cost_usd", fmt.Sprintf("%.6f", delta)) } } @@ -378,20 +379,20 @@ func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, // to Go's random map iteration order. snap.Metrics["source_"+sk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(source.Requests1d), Unit: "requests", Window: "1d"} - snap.Metrics["client_"+sk+"_total_tokens"] = core.Metric{Used: core.Float64Ptr(source.Tokens), Unit: "tokens", Window: timeWindow} - snap.Metrics["client_"+sk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(source.Input), Unit: "tokens", Window: timeWindow} - snap.Metrics["client_"+sk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(source.Output), Unit: "tokens", Window: timeWindow} - snap.Metrics["client_"+sk+"_cached_tokens"] = core.Metric{Used: core.Float64Ptr(source.Cached), Unit: "tokens", Window: timeWindow} - snap.Metrics["client_"+sk+"_reasoning_tokens"] = core.Metric{Used: core.Float64Ptr(source.Reasoning), Unit: "tokens", Window: timeWindow} - snap.Metrics["client_"+sk+"_requests"] = core.Metric{Used: core.Float64Ptr(source.Requests), Unit: "requests", Window: timeWindow} - snap.Metrics["client_"+sk+"_sessions"] = core.Metric{Used: core.Float64Ptr(source.Sessions), Unit: "sessions", Window: timeWindow} + snap.Metrics["client_"+sk+"_total_tokens"] = core.Metric{Used: core.Float64Ptr(source.Tokens), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(source.Input), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(source.Output), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_cached_tokens"] = core.Metric{Used: core.Float64Ptr(source.Cached), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_reasoning_tokens"] = core.Metric{Used: core.Float64Ptr(source.Reasoning), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_requests"] = core.Metric{Used: core.Float64Ptr(source.Requests), Unit: "requests", Window: windowLabel} + snap.Metrics["client_"+sk+"_sessions"] = core.Metric{Used: core.Float64Ptr(source.Sessions), Unit: "sessions", Window: windowLabel} } for _, project := range agg.Projects { pk := sanitizeMetricID(project.Project) if pk == "" { continue } - snap.Metrics["project_"+pk+"_requests"] = core.Metric{Used: core.Float64Ptr(project.Requests), Unit: "requests", Window: timeWindow} + snap.Metrics["project_"+pk+"_requests"] = core.Metric{Used: core.Float64Ptr(project.Requests), Unit: "requests", Window: windowLabel} snap.Metrics["project_"+pk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(project.Requests1d), Unit: "requests", Window: "1d"} } @@ -401,7 +402,7 @@ func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, var totalToolCallsAborted float64 for _, tool := range agg.Tools { tk := sanitizeMetricID(tool.Tool) - snap.Metrics["tool_"+tk] = core.Metric{Used: core.Float64Ptr(tool.Calls), Unit: "calls", Window: timeWindow} + snap.Metrics["tool_"+tk] = core.Metric{Used: core.Float64Ptr(tool.Calls), Unit: "calls", Window: windowLabel} snap.Metrics["tool_"+tk+"_today"] = core.Metric{Used: core.Float64Ptr(tool.Calls1d), Unit: "calls", Window: "1d"} totalToolCalls += tool.Calls totalToolCallsOK += tool.CallsOK @@ -409,73 +410,73 @@ func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, totalToolCallsAborted += tool.CallsAborted } if totalToolCalls > 0 { - snap.Metrics["tool_calls_total"] = core.Metric{Used: core.Float64Ptr(totalToolCalls), Unit: "calls", Window: timeWindow} - snap.Metrics["tool_completed"] = core.Metric{Used: core.Float64Ptr(totalToolCallsOK), Unit: "calls", Window: timeWindow} - snap.Metrics["tool_errored"] = core.Metric{Used: core.Float64Ptr(totalToolCallsError), Unit: "calls", Window: timeWindow} - snap.Metrics["tool_cancelled"] = core.Metric{Used: core.Float64Ptr(totalToolCallsAborted), Unit: "calls", Window: timeWindow} + snap.Metrics["tool_calls_total"] = core.Metric{Used: core.Float64Ptr(totalToolCalls), Unit: "calls", Window: windowLabel} + snap.Metrics["tool_completed"] = core.Metric{Used: core.Float64Ptr(totalToolCallsOK), Unit: "calls", Window: windowLabel} + snap.Metrics["tool_errored"] = core.Metric{Used: core.Float64Ptr(totalToolCallsError), Unit: "calls", Window: windowLabel} + snap.Metrics["tool_cancelled"] = core.Metric{Used: core.Float64Ptr(totalToolCallsAborted), Unit: "calls", Window: windowLabel} successRate := 0.0 if totalToolCalls > 0 { successRate = (totalToolCallsOK / totalToolCalls) * 100 } - snap.Metrics["tool_success_rate"] = core.Metric{Used: core.Float64Ptr(successRate), Unit: "%", Window: timeWindow} + snap.Metrics["tool_success_rate"] = core.Metric{Used: core.Float64Ptr(successRate), Unit: "%", Window: windowLabel} } // MCP server metrics. var mcpTotalCalls, mcpTotalCalls1d float64 for _, srv := range agg.MCPServers { sk := sanitizeMetricID(srv.Server) - snap.Metrics["mcp_"+sk+"_total"] = core.Metric{Used: core.Float64Ptr(srv.Calls), Unit: "calls", Window: timeWindow} + snap.Metrics["mcp_"+sk+"_total"] = core.Metric{Used: core.Float64Ptr(srv.Calls), Unit: "calls", Window: windowLabel} snap.Metrics["mcp_"+sk+"_total_today"] = core.Metric{Used: core.Float64Ptr(srv.Calls1d), Unit: "calls", Window: "1d"} mcpTotalCalls += srv.Calls mcpTotalCalls1d += srv.Calls1d for _, fn := range srv.Functions { fk := sanitizeMetricID(fn.Function) - snap.Metrics["mcp_"+sk+"_"+fk] = core.Metric{Used: core.Float64Ptr(fn.Calls), Unit: "calls", Window: timeWindow} + snap.Metrics["mcp_"+sk+"_"+fk] = core.Metric{Used: core.Float64Ptr(fn.Calls), Unit: "calls", Window: windowLabel} } } if mcpTotalCalls > 0 { - snap.Metrics["mcp_calls_total"] = core.Metric{Used: core.Float64Ptr(mcpTotalCalls), Unit: "calls", Window: timeWindow} + snap.Metrics["mcp_calls_total"] = core.Metric{Used: core.Float64Ptr(mcpTotalCalls), Unit: "calls", Window: windowLabel} snap.Metrics["mcp_calls_total_today"] = core.Metric{Used: core.Float64Ptr(mcpTotalCalls1d), Unit: "calls", Window: "1d"} - snap.Metrics["mcp_servers_active"] = core.Metric{Used: core.Float64Ptr(float64(len(agg.MCPServers))), Unit: "servers", Window: timeWindow} + snap.Metrics["mcp_servers_active"] = core.Metric{Used: core.Float64Ptr(float64(len(agg.MCPServers))), Unit: "servers", Window: windowLabel} } for _, lang := range agg.Languages { lk := sanitizeMetricID(lang.Language) - snap.Metrics["lang_"+lk] = core.Metric{Used: core.Float64Ptr(lang.Requests), Unit: "requests", Window: timeWindow} + snap.Metrics["lang_"+lk] = core.Metric{Used: core.Float64Ptr(lang.Requests), Unit: "requests", Window: windowLabel} } // Emit windowed activity metrics. act := agg.Activity if act.Messages > 0 { - snap.Metrics["messages_today"] = core.Metric{Used: core.Float64Ptr(act.Messages), Unit: "messages", Window: timeWindow} + snap.Metrics["messages_today"] = core.Metric{Used: core.Float64Ptr(act.Messages), Unit: "messages", Window: windowLabel} } if act.Sessions > 0 { - snap.Metrics["sessions_today"] = core.Metric{Used: core.Float64Ptr(act.Sessions), Unit: "sessions", Window: timeWindow} + snap.Metrics["sessions_today"] = core.Metric{Used: core.Float64Ptr(act.Sessions), Unit: "sessions", Window: windowLabel} } if act.ToolCalls > 0 { - snap.Metrics["tool_calls_today"] = core.Metric{Used: core.Float64Ptr(act.ToolCalls), Unit: "calls", Window: timeWindow} - snap.Metrics["7d_tool_calls"] = core.Metric{Used: core.Float64Ptr(act.ToolCalls), Unit: "calls", Window: timeWindow} + snap.Metrics["tool_calls_today"] = core.Metric{Used: core.Float64Ptr(act.ToolCalls), Unit: "calls", Window: windowLabel} + snap.Metrics["7d_tool_calls"] = core.Metric{Used: core.Float64Ptr(act.ToolCalls), Unit: "calls", Window: windowLabel} } if act.InputTokens > 0 { - snap.Metrics["today_input_tokens"] = core.Metric{Used: core.Float64Ptr(act.InputTokens), Unit: "tokens", Window: timeWindow} + snap.Metrics["today_input_tokens"] = core.Metric{Used: core.Float64Ptr(act.InputTokens), Unit: "tokens", Window: windowLabel} } if act.OutputTokens > 0 { - snap.Metrics["today_output_tokens"] = core.Metric{Used: core.Float64Ptr(act.OutputTokens), Unit: "tokens", Window: timeWindow} + snap.Metrics["today_output_tokens"] = core.Metric{Used: core.Float64Ptr(act.OutputTokens), Unit: "tokens", Window: windowLabel} } if act.TotalCost > 0 { - snap.Metrics["today_api_cost"] = core.Metric{Used: core.Float64Ptr(act.TotalCost), Unit: "USD", Window: timeWindow} + snap.Metrics["today_api_cost"] = core.Metric{Used: core.Float64Ptr(act.TotalCost), Unit: "USD", Window: windowLabel} } // Emit windowed code stats. cs := agg.CodeStats if cs.FilesChanged > 0 { - snap.Metrics["composer_files_changed"] = core.Metric{Used: core.Float64Ptr(cs.FilesChanged), Unit: "files", Window: timeWindow} + snap.Metrics["composer_files_changed"] = core.Metric{Used: core.Float64Ptr(cs.FilesChanged), Unit: "files", Window: windowLabel} } if cs.LinesAdded > 0 { - snap.Metrics["composer_lines_added"] = core.Metric{Used: core.Float64Ptr(cs.LinesAdded), Unit: "lines", Window: timeWindow} + snap.Metrics["composer_lines_added"] = core.Metric{Used: core.Float64Ptr(cs.LinesAdded), Unit: "lines", Window: windowLabel} } if cs.LinesRemoved > 0 { - snap.Metrics["composer_lines_removed"] = core.Metric{Used: core.Float64Ptr(cs.LinesRemoved), Unit: "lines", Window: timeWindow} + snap.Metrics["composer_lines_removed"] = core.Metric{Used: core.Float64Ptr(cs.LinesRemoved), Unit: "lines", Window: windowLabel} } // Emit window-level aggregate metrics for the TUI header/tile display. @@ -486,13 +487,13 @@ func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, windowTokens += model.TotalTokens } if windowRequests > 0 { - snap.Metrics["window_requests"] = core.Metric{Used: core.Float64Ptr(windowRequests), Unit: "requests", Window: timeWindow} + snap.Metrics["window_requests"] = core.Metric{Used: core.Float64Ptr(windowRequests), Unit: "requests", Window: windowLabel} } if windowCost > 0 { - snap.Metrics["window_cost"] = core.Metric{Used: core.Float64Ptr(windowCost), Unit: "USD", Window: timeWindow} + snap.Metrics["window_cost"] = core.Metric{Used: core.Float64Ptr(windowCost), Unit: "USD", Window: windowLabel} } if windowTokens > 0 { - snap.Metrics["window_tokens"] = core.Metric{Used: core.Float64Ptr(windowTokens), Unit: "tokens", Window: timeWindow} + snap.Metrics["window_tokens"] = core.Metric{Used: core.Float64Ptr(windowTokens), Unit: "tokens", Window: windowLabel} } snap.DailySeries["analytics_cost"] = pointsFromDaily(agg.Daily, func(v telemetryDayPoint) float64 { return v.CostUSD }) diff --git a/internal/tui/model.go b/internal/tui/model.go index f375467..2aec87e 100644 --- a/internal/tui/model.go +++ b/internal/tui/model.go @@ -1,7 +1,6 @@ package tui import ( - "context" "fmt" "log" "sort" @@ -13,7 +12,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/config" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/integrations" - "github.com/janekbaraniewski/openusage/internal/providers" "github.com/samber/lo" ) @@ -49,7 +47,11 @@ const ( maxLeftWidth = 38 ) -type SnapshotsMsg map[string]core.UsageSnapshot +type SnapshotsMsg struct { + Snapshots map[string]core.UsageSnapshot + TimeWindow core.TimeWindow + RequestID uint64 +} type DaemonStatus string @@ -115,6 +117,19 @@ type settingsState struct { apiKeyStatus string // "validating...", "valid ✓", "invalid ✗", etc. } +type Services interface { + SaveTheme(themeName string) error + SaveDashboardProviders(providers []config.DashboardProviderConfig) error + SaveDashboardView(view string) error + SaveDashboardWidgetSections(sections []config.DashboardWidgetSection) error + SaveDashboardHideSectionsWithNoData(hide bool) error + SaveTimeWindow(window string) error + ValidateAPIKey(accountID, providerID, apiKey string) (bool, string) + SaveCredential(accountID, apiKey string) error + DeleteCredential(accountID string) error + InstallIntegration(id integrations.ID) ([]integrations.Status, error) +} + type Model struct { snapshots map[string]core.UsageSnapshot sortedIDs []string @@ -140,9 +155,9 @@ type Model struct { analyticsFilter filterState analyticsSortBy int // 0=cost↓, 1=name↑, 2=tokens↓ - animFrame int // monotonically increasing frame counter - refreshing bool // true when a manual refresh is in progress - hasData bool // true after the first SnapshotsMsg arrives + animFrame int // monotonically increasing frame counter + refreshing bool + hasData bool experimentalAnalytics bool // when false, only the Dashboard screen is available @@ -156,12 +171,14 @@ type Model struct { widgetSections []config.DashboardWidgetSection hideSectionsWithNoData bool - timeWindow core.TimeWindow + timeWindow core.TimeWindow + lastSnapshotRequestID uint64 + services Services onAddAccount func(core.AccountConfig) - onRefresh func() + onRefresh func(core.TimeWindow) onInstallDaemon func() error - onTimeWindowChange func(string) + onTimeWindowChange func(core.TimeWindow) } func NewModel( @@ -191,18 +208,20 @@ func (m *Model) SetOnInstallDaemon(fn func() error) { m.onInstallDaemon = fn } +func (m *Model) SetServices(services Services) { + m.services = services +} + // SetOnAddAccount sets a callback invoked when a new provider account is added via the API Keys tab. func (m *Model) SetOnAddAccount(fn func(core.AccountConfig)) { m.onAddAccount = fn } -// SetOnRefresh sets a callback invoked when the user requests a manual refresh. -func (m *Model) SetOnRefresh(fn func()) { +func (m *Model) SetOnRefresh(fn func(core.TimeWindow)) { m.onRefresh = fn } -// SetOnTimeWindowChange sets a callback invoked when the user changes the time window. -func (m *Model) SetOnTimeWindowChange(fn func(string)) { +func (m *Model) SetOnTimeWindowChange(fn func(core.TimeWindow)) { m.onTimeWindowChange = fn } @@ -249,7 +268,10 @@ type integrationInstallResultMsg struct { func (m Model) persistThemeCmd(themeName string) tea.Cmd { return func() tea.Msg { - err := config.SaveTheme(themeName) + if m.services == nil { + return themePersistedMsg{err: fmt.Errorf("theme service unavailable")} + } + err := m.services.SaveTheme(themeName) if err != nil { log.Printf("theme persist: %v", err) } @@ -260,7 +282,10 @@ func (m Model) persistThemeCmd(themeName string) tea.Cmd { func (m Model) persistDashboardPrefsCmd() tea.Cmd { providers := m.dashboardConfigProviders() return func() tea.Msg { - err := config.SaveDashboardProviders(providers) + if m.services == nil { + return dashboardPrefsPersistedMsg{err: fmt.Errorf("dashboard settings service unavailable")} + } + err := m.services.SaveDashboardProviders(providers) if err != nil { log.Printf("dashboard settings persist: %v", err) } @@ -271,7 +296,10 @@ func (m Model) persistDashboardPrefsCmd() tea.Cmd { func (m Model) persistDashboardViewCmd() tea.Cmd { view := string(m.configuredDashboardView()) return func() tea.Msg { - err := config.SaveDashboardView(view) + if m.services == nil { + return dashboardViewPersistedMsg{err: fmt.Errorf("dashboard view service unavailable")} + } + err := m.services.SaveDashboardView(view) if err != nil { log.Printf("dashboard view persist: %v", err) } @@ -282,7 +310,10 @@ func (m Model) persistDashboardViewCmd() tea.Cmd { func (m Model) persistDashboardWidgetSectionsCmd() tea.Cmd { sections := m.dashboardWidgetSectionConfigEntries() return func() tea.Msg { - err := config.SaveDashboardWidgetSections(sections) + if m.services == nil { + return dashboardWidgetSectionsPersistedMsg{err: fmt.Errorf("dashboard sections service unavailable")} + } + err := m.services.SaveDashboardWidgetSections(sections) if err != nil { log.Printf("dashboard widget sections persist: %v", err) } @@ -293,7 +324,10 @@ func (m Model) persistDashboardWidgetSectionsCmd() tea.Cmd { func (m Model) persistDashboardHideSectionsWithNoDataCmd() tea.Cmd { hide := m.hideSectionsWithNoData return func() tea.Msg { - err := config.SaveDashboardHideSectionsWithNoData(hide) + if m.services == nil { + return dashboardHideSectionsWithNoDataPersistedMsg{err: fmt.Errorf("dashboard empty-state service unavailable")} + } + err := m.services.SaveDashboardHideSectionsWithNoData(hide) if err != nil { log.Printf("dashboard hide sections with no data persist: %v", err) } @@ -303,7 +337,10 @@ func (m Model) persistDashboardHideSectionsWithNoDataCmd() tea.Cmd { func (m Model) persistTimeWindowCmd(window string) tea.Cmd { return func() tea.Msg { - err := config.SaveTimeWindow(window) + if m.services == nil { + return timeWindowPersistedMsg{err: fmt.Errorf("time window service unavailable")} + } + err := m.services.SaveTimeWindow(window) if err != nil { log.Printf("time window persist: %v", err) } @@ -313,61 +350,43 @@ func (m Model) persistTimeWindowCmd(window string) tea.Cmd { func (m Model) validateKeyCmd(accountID, providerID, apiKey string) tea.Cmd { return func() tea.Msg { - var provider core.UsageProvider - for _, p := range providers.AllProviders() { - if p.ID() == providerID { - provider = p - break - } - } - if provider == nil { - return validateKeyResultMsg{AccountID: accountID, Valid: false, Error: "unknown provider"} - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - acct := core.AccountConfig{ - ID: accountID, - Provider: providerID, - Token: apiKey, - } - snap, err := provider.Fetch(ctx, acct) - if err != nil { - return validateKeyResultMsg{AccountID: accountID, Valid: false, Error: err.Error()} - } - if snap.Status == core.StatusAuth || snap.Status == core.StatusError { - msg := snap.Message - if msg == "" { - msg = string(snap.Status) - } - return validateKeyResultMsg{AccountID: accountID, Valid: false, Error: msg} + if m.services == nil { + return validateKeyResultMsg{AccountID: accountID, Valid: false, Error: "validation service unavailable"} } - return validateKeyResultMsg{AccountID: accountID, Valid: true} + valid, errMsg := m.services.ValidateAPIKey(accountID, providerID, apiKey) + return validateKeyResultMsg{AccountID: accountID, Valid: valid, Error: errMsg} } } func (m Model) saveCredentialCmd(accountID, apiKey string) tea.Cmd { return func() tea.Msg { - err := config.SaveCredential(accountID, apiKey) + if m.services == nil { + return credentialSavedMsg{AccountID: accountID, Err: fmt.Errorf("credential service unavailable")} + } + err := m.services.SaveCredential(accountID, apiKey) return credentialSavedMsg{AccountID: accountID, Err: err} } } func (m Model) deleteCredentialCmd(accountID string) tea.Cmd { return func() tea.Msg { - err := config.DeleteCredential(accountID) + if m.services == nil { + return credentialDeletedMsg{AccountID: accountID, Err: fmt.Errorf("credential service unavailable")} + } + err := m.services.DeleteCredential(accountID) return credentialDeletedMsg{AccountID: accountID, Err: err} } } func (m Model) installIntegrationCmd(id integrations.ID) tea.Cmd { return func() tea.Msg { - manager := integrations.NewDefaultManager() - err := manager.Install(id) + if m.services == nil { + return integrationInstallResultMsg{IntegrationID: id, Err: fmt.Errorf("integration service unavailable")} + } + statuses, err := m.services.InstallIntegration(id) return integrationInstallResultMsg{ IntegrationID: id, - Statuses: manager.ListStatuses(), + Statuses: statuses, Err: err, } } @@ -412,19 +431,28 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return m, nil case SnapshotsMsg: - if m.refreshing && m.hasData && !snapshotsReady(msg) { - // During a time-window change the daemon may return empty - // template snapshots while it recomputes. Keep the old data - // visible so tiles don't flash to the loading screen. + msgWindow := msg.TimeWindow + if msgWindow == "" { + msgWindow = core.TimeWindow30d + } + if msgWindow != m.timeWindow { + return m, nil + } + if msg.RequestID > 0 && msg.RequestID < m.lastSnapshotRequestID { + return m, nil + } + if m.refreshing && m.hasData && !snapshotsReady(msg.Snapshots) { return m, nil } - m.snapshots = msg + m.snapshots = msg.Snapshots m.refreshing = false - if len(msg) > 0 || snapshotsReady(msg) { + if msg.RequestID > m.lastSnapshotRequestID { + m.lastSnapshotRequestID = msg.RequestID + } + if len(msg.Snapshots) > 0 || snapshotsReady(msg.Snapshots) { m.hasData = true m.daemon.status = DaemonRunning } - // Stamp display decision into snapshot diagnostics for the Info tab. for id, snap := range m.snapshots { info := computeDisplayInfo(snap, dashboardWidget(snap.ProviderID)) if info.reason != "" { @@ -1110,21 +1138,26 @@ func (m Model) handleTilesKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { func (m Model) cycleTimeWindow() (tea.Model, tea.Cmd) { next := core.NextTimeWindow(m.timeWindow) - m.timeWindow = next - if m.onTimeWindowChange != nil { - m.onTimeWindowChange(string(next)) - } + m = m.beginTimeWindowRefresh(next) + return m, m.persistTimeWindowCmd(string(next)) +} + +func (m Model) requestRefresh() Model { m.refreshing = true if m.onRefresh != nil { - m.onRefresh() + m.onRefresh(m.timeWindow) } - return m, m.persistTimeWindowCmd(string(next)) + return m } -func (m Model) requestRefresh() Model { +func (m Model) beginTimeWindowRefresh(window core.TimeWindow) Model { + m.timeWindow = window + if m.onTimeWindowChange != nil { + m.onTimeWindowChange(window) + } m.refreshing = true if m.onRefresh != nil { - m.onRefresh() + m.onRefresh(window) } return m } diff --git a/internal/tui/model_display_test.go b/internal/tui/model_display_test.go index 853f31b..4e5f487 100644 --- a/internal/tui/model_display_test.go +++ b/internal/tui/model_display_test.go @@ -227,13 +227,17 @@ func TestUpdate_SnapshotsMsgMarksModelReadyOnFirstFrame(t *testing.T) { } snaps := SnapshotsMsg{ - "openrouter": { - ProviderID: "openrouter", - AccountID: "openrouter", - Status: core.StatusUnknown, - Message: "daemon warming up", - Metrics: map[string]core.Metric{}, + Snapshots: map[string]core.UsageSnapshot{ + "openrouter": { + ProviderID: "openrouter", + AccountID: "openrouter", + Status: core.StatusUnknown, + Message: "daemon warming up", + Metrics: map[string]core.Metric{}, + }, }, + TimeWindow: core.TimeWindow30d, + RequestID: 1, } updated, _ := m.Update(snaps) @@ -246,6 +250,87 @@ func TestUpdate_SnapshotsMsgMarksModelReadyOnFirstFrame(t *testing.T) { } } +func TestUpdate_SnapshotsMsgIgnoresStaleTimeWindowResponse(t *testing.T) { + m := NewModel(0.2, 0.1, false, config.DashboardConfig{}, nil, core.TimeWindow1d) + currentUsed := 1.0 + m.snapshots = map[string]core.UsageSnapshot{ + "openrouter": { + ProviderID: "openrouter", + AccountID: "openrouter", + Status: core.StatusOK, + Metrics: map[string]core.Metric{ + "requests_today": {Used: ¤tUsed, Unit: "requests", Window: "1d"}, + }, + }, + } + m.hasData = true + m.lastSnapshotRequestID = 2 + + staleUsed := 30.0 + updated, _ := m.Update(SnapshotsMsg{ + Snapshots: map[string]core.UsageSnapshot{ + "openrouter": { + ProviderID: "openrouter", + AccountID: "openrouter", + Status: core.StatusOK, + Metrics: map[string]core.Metric{ + "requests_window": {Used: &staleUsed, Unit: "requests", Window: "30d"}, + }, + }, + }, + TimeWindow: core.TimeWindow30d, + RequestID: 3, + }) + got := updated.(Model) + if metric := got.snapshots["openrouter"].Metrics["requests_today"]; metric.Used == nil || *metric.Used != 1 { + t.Fatalf("current window snapshot was replaced by stale window: %+v", got.snapshots["openrouter"].Metrics) + } +} + +func TestUpdate_SnapshotsMsgIgnoresOlderCurrentWindowResponse(t *testing.T) { + m := NewModel(0.2, 0.1, false, config.DashboardConfig{}, nil, core.TimeWindow7d) + m.hasData = true + + newUsed := 7.0 + updated, _ := m.Update(SnapshotsMsg{ + Snapshots: map[string]core.UsageSnapshot{ + "openrouter": { + ProviderID: "openrouter", + AccountID: "openrouter", + Status: core.StatusOK, + Metrics: map[string]core.Metric{ + "window_requests": {Used: &newUsed, Unit: "requests", Window: "7d"}, + }, + }, + }, + TimeWindow: core.TimeWindow7d, + RequestID: 5, + }) + got := updated.(Model) + + oldUsed := 3.0 + updated, _ = got.Update(SnapshotsMsg{ + Snapshots: map[string]core.UsageSnapshot{ + "openrouter": { + ProviderID: "openrouter", + AccountID: "openrouter", + Status: core.StatusOK, + Metrics: map[string]core.Metric{ + "window_requests": {Used: &oldUsed, Unit: "requests", Window: "7d"}, + }, + }, + }, + TimeWindow: core.TimeWindow7d, + RequestID: 4, + }) + got = updated.(Model) + + metric := got.snapshots["openrouter"].Metrics["window_requests"] + if metric.Used == nil || *metric.Used != 7 { + t.Fatalf("older request overwrote newer snapshot: %+v", metric) + } +} + func TestUpdate_AppUpdateMsgStoresNotice(t *testing.T) { m := NewModel(0.2, 0.1, false, config.DashboardConfig{}, nil, core.TimeWindow30d) diff --git a/internal/tui/model_refresh_test.go b/internal/tui/model_refresh_test.go index 8413342..b08e5c8 100644 --- a/internal/tui/model_refresh_test.go +++ b/internal/tui/model_refresh_test.go @@ -1,13 +1,20 @@ package tui -import "testing" +import ( + "testing" + + "github.com/janekbaraniewski/openusage/internal/core" +) func TestRequestRefreshInvokesCallback(t *testing.T) { m := Model{} + m.timeWindow = core.TimeWindow7d refreshCalls := 0 - m.SetOnRefresh(func() { + var gotWindow core.TimeWindow + m.SetOnRefresh(func(window core.TimeWindow) { refreshCalls++ + gotWindow = window }) updated := m.requestRefresh() @@ -17,4 +24,7 @@ func TestRequestRefreshInvokesCallback(t *testing.T) { if refreshCalls != 1 { t.Fatalf("refresh callback calls = %d, want 1", refreshCalls) } + if gotWindow != core.TimeWindow7d { + t.Fatalf("refresh callback window = %q, want %q", gotWindow, core.TimeWindow7d) + } } diff --git a/internal/tui/settings_modal.go b/internal/tui/settings_modal.go index 4633987..0ef5107 100644 --- a/internal/tui/settings_modal.go +++ b/internal/tui/settings_modal.go @@ -301,14 +301,7 @@ func (m Model) handleSettingsModalKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { case " ", "enter": if m.settings.cursor >= 0 && m.settings.cursor < twCount { tw := core.ValidTimeWindows[m.settings.cursor] - m.timeWindow = tw - if m.onTimeWindowChange != nil { - m.onTimeWindowChange(string(tw)) - } - m.refreshing = true - if m.onRefresh != nil { - m.onRefresh() - } + m = m.beginTimeWindowRefresh(tw) m.settings.status = "saving time window..." return m, m.persistTimeWindowCmd(string(tw)) } From 3172b4f41c94b0d0c48fd2ecbea5f353c8d72483 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 11:57:58 +0100 Subject: [PATCH 02/32] refactor: share cursor state db readers - parse composerData and bubbleId rows once into shared cursor state records - reuse shared state records in both the dashboard provider and telemetry collector - remove the extra telemetry bubble pass and add direct shared-reader tests - update the audit table to narrow the remaining cursor duplication scope Co-Authored-By: Claude Opus 4.6 --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 3 +- internal/providers/cursor/cursor.go | 159 ++----- internal/providers/cursor/state_records.go | 255 ++++++++++++ .../providers/cursor/state_records_test.go | 115 +++++ internal/providers/cursor/telemetry.go | 393 ++++-------------- 5 files changed, 490 insertions(+), 435 deletions(-) create mode 100644 internal/providers/cursor/state_records.go create mode 100644 internal/providers/cursor/state_records_test.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index b1d41a6..5fe4799 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -27,6 +27,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | R7 | Fixed | TUI side-effect boundary | `internal/tui/model.go`, `internal/dashboardapp/service.go`, `cmd/openusage/dashboard.go` | `tui.Model` no longer directly persists settings, saves credentials, installs integrations, or validates API keys. Those side effects now go through an injected dashboard application service. | More UI decomposition is still useful, but the highest-leak side effects are no longer hardcoded in the model. | | R8 | Fixed | Codex parser duplication | `internal/providers/codex/session_decoder.go`, `internal/providers/codex/codex.go`, `internal/providers/codex/telemetry_usage.go` | Codex session JSONL parsing now runs through one shared decoder used by both the dashboard breakdown reader and telemetry ingestion path. | Apply the same consolidation to Claude Code and Cursor. | | R9 | Fixed | Claude Code parser duplication | `internal/providers/claude_code/conversation_records.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/telemetry_usage.go` | Claude Code JSONL parsing, token total calculation, and usage/tool dedupe keys now run through one shared normalized conversation-record helper used by both the dashboard aggregator and telemetry collector. | Apply the same consolidation pattern to Cursor. | +| R10 | Fixed | Cursor state DB reader duplication | `internal/providers/cursor/state_records.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/telemetry.go` | Cursor `composerData` and `bubbleId` rows from `cursorDiskKV` are now parsed once into shared record types and projected from both the dashboard provider and telemetry collector. This also removes the extra telemetry pass that queried `bubbleId` separately for tool and token events. | Tracking DB and daily-stats duplication still remain. | ## Action Table @@ -39,7 +40,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | | A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go:160-1757` | `usage_view.go` is simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A7 | P1 | Daemon service monolith | `internal/daemon/server.go:1-1211` | `server.go` owns service startup, socket server, polling, collection, retention, cache refresh, hook handling, and HTTP endpoints. | Split into `service_runtime`, `http_handlers`, `polling`, `collection`, `cache`, and `hook_ingest` files/types. | Lower mental load and easier concurrency review. | -| A8 | P1 | Shared parser duplication | `internal/providers/cursor/cursor.go:1087-2086`, `internal/providers/cursor/telemetry.go:97-231` | Codex and Claude Code are now consolidated, but Cursor still parses overlapping local source formats in multiple flows. Snapshot and telemetry ingestion paths can still drift there. | Build one canonical decoder/projection layer for Cursor local sources and use it from both dashboard and telemetry paths. | Eliminates duplicated bugfix work and reduces format drift risk. | +| A8 | P1 | Shared parser duplication | `internal/providers/cursor/cursor.go:1087-1305`, `internal/providers/cursor/telemetry.go:107-223`, `internal/providers/cursor/cursor.go:1699-1735`, `internal/providers/cursor/telemetry.go:791-860` | Cursor state DB rows are now consolidated, but the tracking DB (`ai_code_hashes`) and daily stats (`ItemTable`) flows are still interpreted separately in dashboard and telemetry code. Snapshot and telemetry ingestion can still drift there. | Extend the same shared-record approach to tracking DB rows and daily-stats envelopes so both consumers project from one parsed source model. | Eliminates the last remaining raw-source drift risk in Cursor. | | A9 | P2 | Detached background work ownership | `internal/daemon/server.go:1108`, `internal/daemon/server.go:1126`, `internal/daemon/server.go:306-318` | Read-model cache refreshes are launched from request handlers with `context.Background()`. They are bounded by timeout but detached from service lifecycle ownership. | Give `Service` a root context and use it for detached async refreshes. Optionally expose a bounded worker pool instead of unconstrained goroutine creation. | Safer shutdown semantics and fewer background task ownership ambiguities. | | A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/cursor/cursor.go:478`, `internal/providers/cursor/cursor.go:1704-1711`, `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Providers and analytics logic read `time.Now()` directly in many places, often mixing local time and UTC. This is hard to test and easy to get subtly wrong. | Introduce a small clock abstraction in time-sensitive subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | diff --git a/internal/providers/cursor/cursor.go b/internal/providers/cursor/cursor.go index 3811859..b6bb9c9 100644 --- a/internal/providers/cursor/cursor.go +++ b/internal/providers/cursor/cursor.go @@ -1685,11 +1685,20 @@ func (p *Provider) readStateDB(ctx context.Context, dbPath string, snap *core.Us return fmt.Errorf("state DB not accessible: %w", err) } + composerRecords, err := loadComposerSessionRecords(ctx, db) + if err != nil { + log.Printf("[cursor] composerData query error: %v", err) + } + bubbleRecords, err := loadBubbleRecords(ctx, db) + if err != nil { + log.Printf("[cursor] bubbleId query error: %v", err) + } + p.readDailyStatsToday(ctx, db, snap) p.readDailyStatsSeries(ctx, db, snap) - p.readComposerSessions(ctx, db, snap) + p.readComposerSessions(composerRecords, snap) p.readStateMetadata(ctx, db, snap) - p.readToolUsage(ctx, db, snap) + p.readToolUsage(bubbleRecords, snap) return nil } @@ -1732,32 +1741,7 @@ func (p *Provider) readDailyStatsToday(ctx context.Context, db *sql.DB, snap *co } } -func (p *Provider) readComposerSessions(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - rows, err := db.QueryContext(ctx, ` - SELECT json_extract(value, '$.usageData'), - json_extract(value, '$.unifiedMode'), - json_extract(value, '$.createdAt'), - json_extract(value, '$.totalLinesAdded'), - json_extract(value, '$.totalLinesRemoved'), - json_extract(value, '$.contextTokensUsed'), - json_extract(value, '$.contextTokenLimit'), - json_extract(value, '$.filesChangedCount'), - json_extract(value, '$.subagentInfo.subagentTypeName'), - json_extract(value, '$.isAgentic'), - json_extract(value, '$.forceMode'), - json_extract(value, '$.addedFiles'), - json_extract(value, '$.removedFiles'), - json_extract(value, '$.status') - FROM cursorDiskKV - WHERE key LIKE 'composerData:%' - AND json_extract(value, '$.usageData') IS NOT NULL - AND json_extract(value, '$.usageData') != '{}'`) - if err != nil { - log.Printf("[cursor] composerData query error: %v", err) - return - } - defer rows.Close() - +func (p *Provider) readComposerSessions(records []cursorComposerSessionRecord, snap *core.UsageSnapshot) { var ( totalCostCents float64 totalRequests int @@ -1787,83 +1771,50 @@ func (p *Provider) readComposerSessions(ctx context.Context, db *sql.DB, snap *c now := time.Now() todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) - for rows.Next() { - var usageJSON sql.NullString - var mode sql.NullString - var createdAt sql.NullInt64 - var linesAdded sql.NullInt64 - var linesRemoved sql.NullInt64 - var ctxUsed sql.NullFloat64 - var ctxLimit sql.NullFloat64 - var filesChanged sql.NullInt64 - var subagentType sql.NullString - var isAgentic sql.NullBool - var forceMode sql.NullString - var addedFiles sql.NullInt64 - var removedFiles sql.NullInt64 - var status sql.NullString - if rows.Scan(&usageJSON, &mode, &createdAt, &linesAdded, &linesRemoved, - &ctxUsed, &ctxLimit, &filesChanged, &subagentType, - &isAgentic, &forceMode, &addedFiles, &removedFiles, &status) != nil { - continue - } - if !usageJSON.Valid || usageJSON.String == "" || usageJSON.String == "{}" { - continue - } - - var usage map[string]composerModelUsage - if json.Unmarshal([]byte(usageJSON.String), &usage) != nil { - continue - } - + for _, record := range records { totalSessions++ - if mode.Valid && mode.String != "" { - modeSessions[mode.String]++ + if record.Mode != "" { + modeSessions[record.Mode]++ } - if isAgentic.Valid { - if isAgentic.Bool { + if record.IsAgentic != nil { + if *record.IsAgentic { agenticSessions++ } else { nonAgenticSessions++ } } - if forceMode.Valid && forceMode.String != "" { - forceModes[forceMode.String]++ + if record.ForceMode != "" { + forceModes[record.ForceMode]++ } - if status.Valid && status.String != "" { - statusCounts[status.String]++ + if record.Status != "" { + statusCounts[record.Status]++ } - if linesAdded.Valid { - totalLinesAdded += int(linesAdded.Int64) - } - if linesRemoved.Valid { - totalLinesRemoved += int(linesRemoved.Int64) + totalLinesAdded += record.LinesAdded + totalLinesRemoved += record.LinesRemoved + if record.FilesChanged > 0 { + totalFilesChanged += record.FilesChanged } - if filesChanged.Valid && filesChanged.Int64 > 0 { - totalFilesChanged += int(filesChanged.Int64) + if record.AddedFiles > 0 { + totalFilesCreated += record.AddedFiles } - if addedFiles.Valid && addedFiles.Int64 > 0 { - totalFilesCreated += int(addedFiles.Int64) + if record.RemovedFiles > 0 { + totalFilesRemoved += record.RemovedFiles } - if removedFiles.Valid && removedFiles.Int64 > 0 { - totalFilesRemoved += int(removedFiles.Int64) - } - if ctxUsed.Valid && ctxUsed.Float64 > 0 && ctxLimit.Valid && ctxLimit.Float64 > 0 { - totalContextUsed += ctxUsed.Float64 - totalContextLimit += ctxLimit.Float64 + if record.ContextTokensUsed > 0 && record.ContextTokenLimit > 0 { + totalContextUsed += record.ContextTokensUsed + totalContextLimit += record.ContextTokenLimit contextSampleCount++ } - if subagentType.Valid && subagentType.String != "" { - subagentTypes[subagentType.String]++ + if record.SubagentType != "" { + subagentTypes[record.SubagentType]++ } var sessionDay string - if createdAt.Valid && createdAt.Int64 > 0 { - t := time.UnixMilli(createdAt.Int64) - sessionDay = t.In(now.Location()).Format("2006-01-02") + if !record.OccurredAt.IsZero() { + sessionDay = record.OccurredAt.In(now.Location()).Format("2006-01-02") } - for model, mu := range usage { + for model, mu := range record.Usage { totalCostCents += mu.CostInCents totalRequests += mu.Amount modelCosts[model] += mu.CostInCents @@ -1873,7 +1824,7 @@ func (p *Provider) readComposerSessions(ctx context.Context, db *sql.DB, snap *c dailyCost[sessionDay] += mu.CostInCents dailyRequests[sessionDay] += float64(mu.Amount) } - if createdAt.Valid && time.UnixMilli(createdAt.Int64).After(todayStart) { + if !record.OccurredAt.IsZero() && record.OccurredAt.After(todayStart) { todayCostCents += mu.CostInCents todayRequests += mu.Amount } @@ -2116,44 +2067,22 @@ func (p *Provider) readStateMetadata(ctx context.Context, db *sql.DB, snap *core } } -// readToolUsage extracts tool call statistics from the bubbleId entries -// in cursorDiskKV. Each AI-response bubble (type=2) may contain a -// toolFormerData object with the tool name, status, and other metadata. -func (p *Provider) readToolUsage(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - rows, err := db.QueryContext(ctx, ` - SELECT json_extract(value, '$.toolFormerData.name') as tool_name, - json_extract(value, '$.toolFormerData.status') as tool_status - FROM cursorDiskKV - WHERE key LIKE 'bubbleId:%' - AND json_extract(value, '$.type') = 2 - AND json_extract(value, '$.toolFormerData.name') IS NOT NULL - AND json_extract(value, '$.toolFormerData.name') != ''`) - if err != nil { - log.Printf("[cursor] tool usage query error: %v", err) - return - } - defer rows.Close() - +func (p *Provider) readToolUsage(records []cursorBubbleRecord, snap *core.UsageSnapshot) { toolCounts := make(map[string]int) statusCounts := make(map[string]int) var totalCalls int - for rows.Next() { - var toolName sql.NullString - var toolStatus sql.NullString - if rows.Scan(&toolName, &toolStatus) != nil { - continue - } - if !toolName.Valid || toolName.String == "" { + for _, record := range records { + if strings.TrimSpace(record.ToolName) == "" { continue } - name := normalizeToolName(toolName.String) + name := normalizeToolName(record.ToolName) toolCounts[name]++ totalCalls++ - if toolStatus.Valid && toolStatus.String != "" { - statusCounts[toolStatus.String]++ + if strings.TrimSpace(record.ToolStatus) != "" { + statusCounts[record.ToolStatus]++ } } diff --git a/internal/providers/cursor/state_records.go b/internal/providers/cursor/state_records.go new file mode 100644 index 0000000..f2c15b5 --- /dev/null +++ b/internal/providers/cursor/state_records.go @@ -0,0 +1,255 @@ +package cursor + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +type cursorComposerSessionRecord struct { + Key string + SessionID string + OccurredAt time.Time + Usage map[string]composerModelUsage + Mode string + ForceMode string + IsAgentic *bool + LinesAdded int + LinesRemoved int + ModelConfigName string + NewlyCreatedFiles int + AddedFiles int + RemovedFiles int + ContextTokensUsed float64 + ContextTokenLimit float64 + FilesChanged int + SubagentType string + Status string +} + +type cursorBubbleRecord struct { + Key string + BubbleID string + SessionID string + ToolName string + ToolStatus string + Model string + InputTokens int64 + OutputTokens int64 +} + +func loadComposerSessionRecords(ctx context.Context, db *sql.DB) ([]cursorComposerSessionRecord, error) { + rows, err := db.QueryContext(ctx, ` + SELECT key, + json_extract(value, '$.usageData'), + json_extract(value, '$.createdAt'), + json_extract(value, '$.unifiedMode'), + json_extract(value, '$.forceMode'), + json_extract(value, '$.isAgentic'), + json_extract(value, '$.totalLinesAdded'), + json_extract(value, '$.totalLinesRemoved'), + json_extract(value, '$.modelConfig.modelName'), + json_extract(value, '$.newlyCreatedFiles'), + json_extract(value, '$.addedFiles'), + json_extract(value, '$.removedFiles'), + json_extract(value, '$.contextTokensUsed'), + json_extract(value, '$.contextTokenLimit'), + json_extract(value, '$.filesChangedCount'), + json_extract(value, '$.subagentInfo.subagentTypeName'), + json_extract(value, '$.status') + FROM cursorDiskKV + WHERE key LIKE 'composerData:%' + AND json_extract(value, '$.usageData') IS NOT NULL + AND json_extract(value, '$.usageData') != '{}'`) + if err != nil { + return nil, fmt.Errorf("cursor: querying composerData: %w", err) + } + defer rows.Close() + + var records []cursorComposerSessionRecord + for rows.Next() { + if ctx.Err() != nil { + return records, ctx.Err() + } + + var ( + key string + usageJSON sql.NullString + createdAt sql.NullInt64 + mode sql.NullString + forceMode sql.NullString + isAgentic sql.NullBool + linesAdded sql.NullInt64 + linesRemoved sql.NullInt64 + modelConfigName sql.NullString + newlyCreated sql.NullString + addedFiles sql.NullString + removedFiles sql.NullString + ctxTokensUsed sql.NullFloat64 + ctxTokenLimit sql.NullFloat64 + filesChangedCnt sql.NullInt64 + subagentType sql.NullString + status sql.NullString + ) + if err := rows.Scan(&key, &usageJSON, &createdAt, &mode, &forceMode, &isAgentic, + &linesAdded, &linesRemoved, &modelConfigName, &newlyCreated, &addedFiles, &removedFiles, + &ctxTokensUsed, &ctxTokenLimit, &filesChangedCnt, &subagentType, &status); err != nil { + continue + } + if !usageJSON.Valid || usageJSON.String == "" || usageJSON.String == "{}" { + continue + } + + var usage map[string]composerModelUsage + if json.Unmarshal([]byte(usageJSON.String), &usage) != nil { + continue + } + + record := cursorComposerSessionRecord{ + Key: key, + SessionID: strings.TrimPrefix(key, "composerData:"), + Usage: usage, + Mode: nullableString(mode), + ForceMode: nullableString(forceMode), + LinesAdded: nullableInt(linesAdded), + LinesRemoved: nullableInt(linesRemoved), + ModelConfigName: nullableString(modelConfigName), + NewlyCreatedFiles: countJSONArrayItems(newlyCreated), + AddedFiles: countNullableInt(addedFiles), + RemovedFiles: countNullableInt(removedFiles), + ContextTokensUsed: nullableFloat(ctxTokensUsed), + ContextTokenLimit: nullableFloat(ctxTokenLimit), + FilesChanged: nullableInt(filesChangedCnt), + SubagentType: nullableString(subagentType), + Status: nullableString(status), + } + if createdAt.Valid && createdAt.Int64 > 0 { + record.OccurredAt = shared.UnixAuto(createdAt.Int64) + } + if isAgentic.Valid { + value := isAgentic.Bool + record.IsAgentic = &value + } + + records = append(records, record) + } + + return records, rows.Err() +} + +func loadBubbleRecords(ctx context.Context, db *sql.DB) ([]cursorBubbleRecord, error) { + rows, err := db.QueryContext(ctx, ` + SELECT key, + json_extract(value, '$.toolFormerData.name'), + json_extract(value, '$.toolFormerData.status'), + json_extract(value, '$.conversationId'), + json_extract(value, '$.tokenCount.inputTokens'), + json_extract(value, '$.tokenCount.outputTokens'), + json_extract(value, '$.model') + FROM cursorDiskKV + WHERE key LIKE 'bubbleId:%' + AND json_extract(value, '$.type') = 2`) + if err != nil { + return nil, fmt.Errorf("cursor: querying bubbleId records: %w", err) + } + defer rows.Close() + + var records []cursorBubbleRecord + for rows.Next() { + if ctx.Err() != nil { + return records, ctx.Err() + } + + var ( + key string + toolName sql.NullString + toolStatus sql.NullString + conversationID sql.NullString + inputTokens sql.NullInt64 + outputTokens sql.NullInt64 + model sql.NullString + ) + if err := rows.Scan(&key, &toolName, &toolStatus, &conversationID, &inputTokens, &outputTokens, &model); err != nil { + continue + } + + records = append(records, cursorBubbleRecord{ + Key: key, + BubbleID: strings.TrimPrefix(key, "bubbleId:"), + SessionID: nullableString(conversationID), + ToolName: nullableString(toolName), + ToolStatus: nullableString(toolStatus), + Model: nullableString(model), + InputTokens: nullableInt64(inputTokens), + OutputTokens: nullableInt64(outputTokens), + }) + } + + return records, rows.Err() +} + +func composerSessionTimestampMap(records []cursorComposerSessionRecord) map[string]time.Time { + out := make(map[string]time.Time, len(records)) + for _, record := range records { + if record.SessionID == "" || record.OccurredAt.IsZero() { + continue + } + out[record.SessionID] = record.OccurredAt + } + return out +} + +func nullableString(value sql.NullString) string { + if !value.Valid { + return "" + } + return value.String +} + +func nullableInt(value sql.NullInt64) int { + if !value.Valid { + return 0 + } + return int(value.Int64) +} + +func nullableInt64(value sql.NullInt64) int64 { + if !value.Valid { + return 0 + } + return value.Int64 +} + +func nullableFloat(value sql.NullFloat64) float64 { + if !value.Valid { + return 0 + } + return value.Float64 +} + +func countJSONArrayItems(s sql.NullString) int { + if !s.Valid || s.String == "" || s.String == "[]" { + return 0 + } + var arr []any + if json.Unmarshal([]byte(s.String), &arr) != nil { + return 0 + } + return len(arr) +} + +func countNullableInt(s sql.NullString) int { + if !s.Valid || s.String == "" { + return 0 + } + var n int + if _, err := fmt.Sscanf(s.String, "%d", &n); err == nil { + return n + } + return countJSONArrayItems(s) +} diff --git a/internal/providers/cursor/state_records_test.go b/internal/providers/cursor/state_records_test.go new file mode 100644 index 0000000..452b04b --- /dev/null +++ b/internal/providers/cursor/state_records_test.go @@ -0,0 +1,115 @@ +package cursor + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "path/filepath" + "testing" + "time" +) + +func TestLoadStateRecords(t *testing.T) { + dbPath := filepath.Join(t.TempDir(), "state.vscdb") + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatalf("open state db: %v", err) + } + defer db.Close() + + if _, err := db.Exec(`CREATE TABLE cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`); err != nil { + t.Fatalf("create cursorDiskKV: %v", err) + } + + usageJSON, err := json.Marshal(map[string]composerModelUsage{ + "claude-4.5-sonnet": {CostInCents: 123.0, Amount: 2}, + }) + if err != nil { + t.Fatalf("marshal usage json: %v", err) + } + + createdAt := time.Date(2026, 3, 9, 10, 0, 0, 0, time.UTC).UnixMilli() + composerValue := fmt.Sprintf(`{ + "usageData": %s, + "createdAt": %d, + "unifiedMode": "agent", + "forceMode": "manual", + "isAgentic": true, + "totalLinesAdded": 10, + "totalLinesRemoved": 2, + "modelConfig": {"modelName": "claude-4.5-sonnet"}, + "newlyCreatedFiles": ["a.go"], + "addedFiles": 3, + "removedFiles": 1, + "contextTokensUsed": 120, + "contextTokenLimit": 1000, + "filesChangedCount": 4, + "subagentInfo": {"subagentTypeName": "research"}, + "status": "completed" + }`, string(usageJSON), createdAt) + if _, err := db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES (?, ?)`, "composerData:session-1", composerValue); err != nil { + t.Fatalf("insert composerData: %v", err) + } + + bubbleValue := `{ + "type": 2, + "toolFormerData": {"name": "read_file_v2", "status": "completed"}, + "conversationId": "session-1", + "tokenCount": {"inputTokens": 9, "outputTokens": 3}, + "model": "claude-4.5-sonnet" + }` + if _, err := db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES (?, ?)`, "bubbleId:bubble-1", bubbleValue); err != nil { + t.Fatalf("insert bubbleId: %v", err) + } + + composerRecords, err := loadComposerSessionRecords(context.Background(), db) + if err != nil { + t.Fatalf("loadComposerSessionRecords: %v", err) + } + if len(composerRecords) != 1 { + t.Fatalf("composer records = %d, want 1", len(composerRecords)) + } + record := composerRecords[0] + if record.SessionID != "session-1" { + t.Fatalf("session id = %q, want session-1", record.SessionID) + } + if record.OccurredAt.UnixMilli() != createdAt { + t.Fatalf("occurredAt = %d, want %d", record.OccurredAt.UnixMilli(), createdAt) + } + if record.Mode != "agent" || record.ForceMode != "manual" { + t.Fatalf("modes = %q/%q", record.Mode, record.ForceMode) + } + if record.IsAgentic == nil || !*record.IsAgentic { + t.Fatalf("isAgentic = %#v, want true", record.IsAgentic) + } + if record.NewlyCreatedFiles != 1 || record.AddedFiles != 3 || record.RemovedFiles != 1 { + t.Fatalf("file counts = %+v", record) + } + if record.ContextTokensUsed != 120 || record.ContextTokenLimit != 1000 { + t.Fatalf("context usage = %.0f/%.0f", record.ContextTokensUsed, record.ContextTokenLimit) + } + + bubbleRecords, err := loadBubbleRecords(context.Background(), db) + if err != nil { + t.Fatalf("loadBubbleRecords: %v", err) + } + if len(bubbleRecords) != 1 { + t.Fatalf("bubble records = %d, want 1", len(bubbleRecords)) + } + bubble := bubbleRecords[0] + if bubble.BubbleID != "bubble-1" { + t.Fatalf("bubble id = %q, want bubble-1", bubble.BubbleID) + } + if bubble.ToolName != "read_file_v2" || bubble.ToolStatus != "completed" { + t.Fatalf("tool payload = %+v", bubble) + } + if bubble.SessionID != "session-1" || bubble.InputTokens != 9 || bubble.OutputTokens != 3 { + t.Fatalf("bubble tokens/session = %+v", bubble) + } + + timestamps := composerSessionTimestampMap(composerRecords) + if ts, ok := timestamps["session-1"]; !ok || ts.UnixMilli() != createdAt { + t.Fatalf("timestamp map = %+v, want session-1 => %d", timestamps, createdAt) + } +} diff --git a/internal/providers/cursor/telemetry.go b/internal/providers/cursor/telemetry.go index 52ae1be..6d6c0c9 100644 --- a/internal/providers/cursor/telemetry.go +++ b/internal/providers/cursor/telemetry.go @@ -243,22 +243,28 @@ func collectStateDBEvents(ctx context.Context, dbPath string) ([]shared.Telemetr } var out []shared.TelemetryEvent + composerRecords, err := loadComposerSessionRecords(ctx, db) + if err != nil { + composerRecords = nil + } + bubbleRecords, err := loadBubbleRecords(ctx, db) + if err != nil { + bubbleRecords = nil + } + sessionTimestamps := composerSessionTimestampMap(composerRecords) - // Collect composer session usage events. - composerEvents, err := collectComposerEvents(ctx, db, dbPath) - if err == nil { + composerEvents := composerEventsFromRecords(composerRecords, dbPath) + if len(composerEvents) > 0 { out = append(out, composerEvents...) } - // Collect tool usage events from bubble data. - toolEvents, err := collectToolEvents(ctx, db, dbPath) - if err == nil { + toolEvents := toolEventsFromBubbleRecords(bubbleRecords, sessionTimestamps, dbPath) + if len(toolEvents) > 0 { out = append(out, toolEvents...) } - // Collect token counts from bubble entries and attach to composer sessions. - tokenEvents, err := collectBubbleTokenEvents(ctx, db, dbPath) - if err == nil { + tokenEvents := bubbleTokenEventsFromRecords(bubbleRecords, sessionTimestamps, dbPath) + if len(tokenEvents) > 0 { out = append(out, tokenEvents...) } @@ -272,93 +278,21 @@ func collectStateDBEvents(ctx context.Context, dbPath string) ([]shared.Telemetr return out, nil } - -// collectComposerEvents extracts usage data from composerData entries. -// Each composer session has a usageData map with per-model cost and request counts, -// plus session metadata like mode, model config, and file changes. -func collectComposerEvents(ctx context.Context, db *sql.DB, dbPath string) ([]shared.TelemetryEvent, error) { - rows, err := db.QueryContext(ctx, ` - SELECT key, - json_extract(value, '$.usageData'), - json_extract(value, '$.createdAt'), - json_extract(value, '$.unifiedMode'), - json_extract(value, '$.forceMode'), - json_extract(value, '$.isAgentic'), - json_extract(value, '$.totalLinesAdded'), - json_extract(value, '$.totalLinesRemoved'), - json_extract(value, '$.modelConfig.modelName'), - json_extract(value, '$.newlyCreatedFiles'), - json_extract(value, '$.addedFiles'), - json_extract(value, '$.removedFiles'), - json_extract(value, '$.contextTokensUsed'), - json_extract(value, '$.contextTokenLimit'), - json_extract(value, '$.filesChangedCount') - FROM cursorDiskKV - WHERE key LIKE 'composerData:%' - AND json_extract(value, '$.usageData') IS NOT NULL - AND json_extract(value, '$.usageData') != '{}'`) - if err != nil { - return nil, fmt.Errorf("cursor: querying composerData: %w", err) - } - defer rows.Close() - +func composerEventsFromRecords(records []cursorComposerSessionRecord, dbPath string) []shared.TelemetryEvent { var out []shared.TelemetryEvent - for rows.Next() { - if ctx.Err() != nil { - return out, ctx.Err() - } - - var ( - key string - usageJSON sql.NullString - createdAt sql.NullInt64 - mode sql.NullString - forceMode sql.NullString - isAgentic sql.NullBool - linesAdded sql.NullInt64 - linesRemoved sql.NullInt64 - modelConfigName sql.NullString - newlyCreated sql.NullString - addedFiles sql.NullString - removedFiles sql.NullString - ctxTokensUsed sql.NullInt64 - ctxTokenLimit sql.NullInt64 - filesChangedCnt sql.NullInt64 - ) - if err := rows.Scan(&key, &usageJSON, &createdAt, &mode, &forceMode, &isAgentic, - &linesAdded, &linesRemoved, &modelConfigName, &newlyCreated, &addedFiles, &removedFiles, - &ctxTokensUsed, &ctxTokenLimit, &filesChangedCnt); err != nil { - continue - } - if !usageJSON.Valid || usageJSON.String == "" || usageJSON.String == "{}" { - continue - } - - sessionID := strings.TrimPrefix(key, "composerData:") - - var usage map[string]composerModelUsage - if json.Unmarshal([]byte(usageJSON.String), &usage) != nil { - continue - } - - occurredAt := time.Now().UTC() - if createdAt.Valid && createdAt.Int64 > 0 { - occurredAt = shared.UnixAuto(createdAt.Int64) - } - - for model, mu := range usage { - if mu.Amount <= 0 && mu.CostInCents <= 0 { + for _, record := range records { + for model, usage := range record.Usage { + if usage.Amount <= 0 && usage.CostInCents <= 0 { continue } - costUSD := mu.CostInCents / 100.0 - messageID := fmt.Sprintf("cursor-composer:%s:%s", sessionID, sanitizeCursorMetricName(model)) - + costUSD := usage.CostInCents / 100.0 + messageID := fmt.Sprintf("cursor-composer:%s:%s", record.SessionID, sanitizeCursorMetricName(model)) payload := map[string]any{ "source": map[string]any{ "db_path": dbPath, "table": "cursorDiskKV", - "key": key, + "key": record.Key, }, "client": "IDE", "cursor_source": "composer", @@ -366,52 +300,49 @@ func collectComposerEvents(ctx context.Context, db *sql.DB, dbPath string) ([]sh if upstream := inferProviderFromModel(model); upstream != "cursor" { payload["upstream_provider"] = upstream } - if mode.Valid && mode.String != "" { - payload["mode"] = mode.String + if record.Mode != "" { + payload["mode"] = record.Mode } - if forceMode.Valid && forceMode.String != "" { - payload["force_mode"] = forceMode.String + if record.ForceMode != "" { + payload["force_mode"] = record.ForceMode } - if isAgentic.Valid { - payload["is_agentic"] = isAgentic.Bool + if record.IsAgentic != nil { + payload["is_agentic"] = *record.IsAgentic } - if linesAdded.Valid && linesAdded.Int64 > 0 { - payload["lines_added"] = linesAdded.Int64 + if record.LinesAdded > 0 { + payload["lines_added"] = record.LinesAdded } - if linesRemoved.Valid && linesRemoved.Int64 > 0 { - payload["lines_removed"] = linesRemoved.Int64 + if record.LinesRemoved > 0 { + payload["lines_removed"] = record.LinesRemoved } - if modelConfigName.Valid && modelConfigName.String != "" { - payload["model_config"] = modelConfigName.String + if record.ModelConfigName != "" { + payload["model_config"] = record.ModelConfigName } - newFileCount := countJSONArrayItems(newlyCreated) - addedCount := countNullableInt(addedFiles) - removedCount := countNullableInt(removedFiles) - if newFileCount > 0 { - payload["newly_created_files"] = newFileCount + if record.NewlyCreatedFiles > 0 { + payload["newly_created_files"] = record.NewlyCreatedFiles } - if addedCount > 0 { - payload["added_files"] = addedCount + if record.AddedFiles > 0 { + payload["added_files"] = record.AddedFiles } - if removedCount > 0 { - payload["removed_files"] = removedCount + if record.RemovedFiles > 0 { + payload["removed_files"] = record.RemovedFiles } - if ctxTokensUsed.Valid && ctxTokensUsed.Int64 > 0 { - payload["context_tokens_used"] = ctxTokensUsed.Int64 + if record.ContextTokensUsed > 0 { + payload["context_tokens_used"] = record.ContextTokensUsed } - if ctxTokenLimit.Valid && ctxTokenLimit.Int64 > 0 { - payload["context_token_limit"] = ctxTokenLimit.Int64 + if record.ContextTokenLimit > 0 { + payload["context_token_limit"] = record.ContextTokenLimit } - if filesChangedCnt.Valid && filesChangedCnt.Int64 > 0 { - payload["files_changed"] = filesChangedCnt.Int64 + if record.FilesChanged > 0 { + payload["files_changed"] = record.FilesChanged } out = append(out, shared.TelemetryEvent{ SchemaVersion: telemetryCursorSQLiteSchema, Channel: shared.TelemetryChannelSQLite, - OccurredAt: occurredAt, + OccurredAt: record.OccurredAt, AccountID: "", - SessionID: sessionID, + SessionID: record.SessionID, MessageID: messageID, ProviderID: "cursor", AgentName: "cursor", @@ -419,148 +350,52 @@ func collectComposerEvents(ctx context.Context, db *sql.DB, dbPath string) ([]sh ModelRaw: model, TokenUsage: core.TokenUsage{ CostUSD: core.Float64Ptr(costUSD), - Requests: core.Int64Ptr(int64(mu.Amount)), + Requests: core.Int64Ptr(int64(usage.Amount)), }, Status: shared.TelemetryStatusOK, Payload: payload, }) } } - - return out, rows.Err() + return out } -// collectToolEvents extracts tool call data from bubbleId entries in the -// state database. Each AI response bubble (type=2) may contain toolFormerData. -func collectToolEvents(ctx context.Context, db *sql.DB, dbPath string) ([]shared.TelemetryEvent, error) { - // Pre-query composerData to build a map of conversationId → createdAt - // so tool events can be assigned meaningful timestamps. - sessionTimestamps := buildSessionTimestampMap(ctx, db) - - rows, err := db.QueryContext(ctx, ` - SELECT key, - json_extract(value, '$.toolFormerData.name'), - json_extract(value, '$.toolFormerData.status'), - json_extract(value, '$.conversationId') - FROM cursorDiskKV - WHERE key LIKE 'bubbleId:%' - AND json_extract(value, '$.type') = 2 - AND json_extract(value, '$.toolFormerData.name') IS NOT NULL - AND json_extract(value, '$.toolFormerData.name') != ''`) - if err != nil { - return nil, fmt.Errorf("cursor: querying bubbleId tool data: %w", err) - } - defer rows.Close() - +func toolEventsFromBubbleRecords(records []cursorBubbleRecord, sessionTimestamps map[string]time.Time, dbPath string) []shared.TelemetryEvent { var out []shared.TelemetryEvent - for rows.Next() { - if ctx.Err() != nil { - return out, ctx.Err() - } - - var ( - key string - toolNameRaw sql.NullString - toolStatusRaw sql.NullString - conversationID sql.NullString - ) - if err := rows.Scan(&key, &toolNameRaw, &toolStatusRaw, &conversationID); err != nil { - continue - } - if !toolNameRaw.Valid || toolNameRaw.String == "" { + for _, record := range records { + if strings.TrimSpace(record.ToolName) == "" { continue } - - toolName := normalizeToolName(toolNameRaw.String) - toolCallID := strings.TrimPrefix(key, "bubbleId:") - - status := shared.TelemetryStatusOK - if toolStatusRaw.Valid { - status = mapCursorToolStatus(toolStatusRaw.String) - } - - sessionID := "" - if conversationID.Valid && conversationID.String != "" { - sessionID = conversationID.String - } - - // Derive timestamp from the parent composer session's createdAt. - // If no matching session is found, use zero time so the telemetry - // store can handle it appropriately. - var occurredAt time.Time - if sessionID != "" { - if ts, ok := sessionTimestamps[sessionID]; ok { - occurredAt = ts - } - } - + status := mapCursorToolStatus(record.ToolStatus) + occurredAt := sessionTimestamps[record.SessionID] out = append(out, shared.TelemetryEvent{ SchemaVersion: telemetryCursorSQLiteSchema, Channel: shared.TelemetryChannelSQLite, OccurredAt: occurredAt, AccountID: "", - SessionID: sessionID, - ToolCallID: toolCallID, + SessionID: record.SessionID, + ToolCallID: record.BubbleID, ProviderID: "cursor", AgentName: "cursor", EventType: shared.TelemetryEventTypeToolUsage, TokenUsage: core.TokenUsage{ Requests: core.Int64Ptr(1), }, - ToolName: strings.ToLower(toolName), + ToolName: strings.ToLower(normalizeToolName(record.ToolName)), Status: status, Payload: map[string]any{ "source": map[string]any{ "db_path": dbPath, "table": "cursorDiskKV", - "key": key, + "key": record.Key, }, "client": "IDE", - "raw_tool_name": toolNameRaw.String, - "raw_tool_status": toolStatusRaw.String, + "raw_tool_name": record.ToolName, + "raw_tool_status": record.ToolStatus, }, }) } - - return out, rows.Err() -} - -// buildSessionTimestampMap queries composerData entries from cursorDiskKV and -// returns a map of sessionID (composerData key suffix) → createdAt time. -// This is used to assign meaningful timestamps to tool events (bubbleId entries) -// that reference a conversationId matching a composer session. -func buildSessionTimestampMap(ctx context.Context, db *sql.DB) map[string]time.Time { - m := make(map[string]time.Time) - - rows, err := db.QueryContext(ctx, ` - SELECT key, json_extract(value, '$.createdAt') - FROM cursorDiskKV - WHERE key LIKE 'composerData:%' - AND json_extract(value, '$.createdAt') IS NOT NULL`) - if err != nil { - return m - } - defer rows.Close() - - for rows.Next() { - if ctx.Err() != nil { - return m - } - var ( - key string - createdAt sql.NullInt64 - ) - if err := rows.Scan(&key, &createdAt); err != nil { - continue - } - if !createdAt.Valid || createdAt.Int64 <= 0 { - continue - } - sessionID := strings.TrimPrefix(key, "composerData:") - m[sessionID] = shared.UnixAuto(createdAt.Int64) - } - - return m + return out } // appendCursorDedupEvents appends events to the output slice, deduplicating @@ -690,83 +525,29 @@ func normalizeFileExtension(ext string) string { // state DB. Each AI response bubble (type=2) may have a tokenCount with // inputTokens/outputTokens. These are emitted as message_usage events linked // to their parent composer session via conversationId. -func collectBubbleTokenEvents(ctx context.Context, db *sql.DB, dbPath string) ([]shared.TelemetryEvent, error) { - sessionTimestamps := buildSessionTimestampMap(ctx, db) - - rows, err := db.QueryContext(ctx, ` - SELECT key, - json_extract(value, '$.tokenCount.inputTokens'), - json_extract(value, '$.tokenCount.outputTokens'), - json_extract(value, '$.conversationId'), - json_extract(value, '$.model') - FROM cursorDiskKV - WHERE key LIKE 'bubbleId:%' - AND json_extract(value, '$.type') = 2 - AND json_extract(value, '$.tokenCount') IS NOT NULL - AND json_extract(value, '$.tokenCount.inputTokens') > 0`) - if err != nil { - return nil, fmt.Errorf("cursor: querying bubbleId tokens: %w", err) - } - defer rows.Close() - +func bubbleTokenEventsFromRecords(records []cursorBubbleRecord, sessionTimestamps map[string]time.Time, dbPath string) []shared.TelemetryEvent { var out []shared.TelemetryEvent - for rows.Next() { - if ctx.Err() != nil { - return out, ctx.Err() - } - - var ( - key string - inputTokens sql.NullInt64 - outputTokens sql.NullInt64 - conversationID sql.NullString - model sql.NullString - ) - if err := rows.Scan(&key, &inputTokens, &outputTokens, &conversationID, &model); err != nil { + for _, record := range records { + if record.InputTokens <= 0 { continue } - if !inputTokens.Valid || inputTokens.Int64 <= 0 { - continue - } - - bubbleID := strings.TrimPrefix(key, "bubbleId:") - messageID := fmt.Sprintf("cursor-bubble-tokens:%s", bubbleID) - - sessionID := "" - if conversationID.Valid && conversationID.String != "" { - sessionID = conversationID.String - } - - var occurredAt time.Time - if sessionID != "" { - if ts, ok := sessionTimestamps[sessionID]; ok { - occurredAt = ts - } - } - - modelRaw := "" - if model.Valid { - modelRaw = model.String - } - + messageID := fmt.Sprintf("cursor-bubble-tokens:%s", record.BubbleID) + occurredAt := sessionTimestamps[record.SessionID] var inTok, outTok *int64 - if inputTokens.Valid && inputTokens.Int64 > 0 { - inTok = core.Int64Ptr(inputTokens.Int64) + inTok = core.Int64Ptr(record.InputTokens) + if record.OutputTokens > 0 { + outTok = core.Int64Ptr(record.OutputTokens) } - if outputTokens.Valid && outputTokens.Int64 > 0 { - outTok = core.Int64Ptr(outputTokens.Int64) - } - out = append(out, shared.TelemetryEvent{ SchemaVersion: telemetryCursorSQLiteSchema, Channel: shared.TelemetryChannelSQLite, OccurredAt: occurredAt, - SessionID: sessionID, + SessionID: record.SessionID, MessageID: messageID, ProviderID: "cursor", AgentName: "cursor", EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: modelRaw, + ModelRaw: record.Model, TokenUsage: core.TokenUsage{ InputTokens: inTok, OutputTokens: outTok, @@ -777,15 +558,14 @@ func collectBubbleTokenEvents(ctx context.Context, db *sql.DB, dbPath string) ([ "source": map[string]any{ "db_path": dbPath, "table": "cursorDiskKV", - "key": key, + "key": record.Key, }, "client": "IDE", "cursor_source": "composer", }, }) } - - return out, rows.Err() + return out } // collectDailyStatsEvents extracts daily code tracking stats from ItemTable. @@ -958,31 +738,6 @@ type cursorDailyStats struct { ComposerAcceptedLines int `json:"composerAcceptedLines"` } -// countJSONArrayItems parses a nullable string as a JSON array and returns its length. -func countJSONArrayItems(s sql.NullString) int { - if !s.Valid || s.String == "" || s.String == "[]" { - return 0 - } - var arr []any - if json.Unmarshal([]byte(s.String), &arr) != nil { - return 0 - } - return len(arr) -} - -// countNullableInt parses a nullable string as an integer count. -// Handles both integer values and JSON array strings. -func countNullableInt(s sql.NullString) int { - if !s.Valid || s.String == "" { - return 0 - } - var n int - if _, err := fmt.Sscanf(s.String, "%d", &n); err == nil { - return n - } - return countJSONArrayItems(s) -} - func truncateString(s string, maxLen int) string { if len(s) <= maxLen { return s From 94fc9e5fce046b56754328641b6bd7a55b11531c Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 11:59:20 +0100 Subject: [PATCH 03/32] fix: bind daemon refresh work to service lifecycle - store the daemon service root context on the service instance - use the service-owned context for async read-model cache refreshes from HTTP handlers - keep timeout bounds while removing detached context.Background ownership - update the audit table to mark the ownership cleanup as fixed Co-Authored-By: Claude Opus 4.6 --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 2 +- internal/daemon/server.go | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 5fe4799..5b2bc04 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -28,6 +28,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | R8 | Fixed | Codex parser duplication | `internal/providers/codex/session_decoder.go`, `internal/providers/codex/codex.go`, `internal/providers/codex/telemetry_usage.go` | Codex session JSONL parsing now runs through one shared decoder used by both the dashboard breakdown reader and telemetry ingestion path. | Apply the same consolidation to Claude Code and Cursor. | | R9 | Fixed | Claude Code parser duplication | `internal/providers/claude_code/conversation_records.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/telemetry_usage.go` | Claude Code JSONL parsing, token total calculation, and usage/tool dedupe keys now run through one shared normalized conversation-record helper used by both the dashboard aggregator and telemetry collector. | Apply the same consolidation pattern to Cursor. | | R10 | Fixed | Cursor state DB reader duplication | `internal/providers/cursor/state_records.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/telemetry.go` | Cursor `composerData` and `bubbleId` rows from `cursorDiskKV` are now parsed once into shared record types and projected from both the dashboard provider and telemetry collector. This also removes the extra telemetry pass that queried `bubbleId` separately for tool and token events. | Tracking DB and daily-stats duplication still remain. | +| R11 | Fixed | Detached read-model refresh ownership | `internal/daemon/server.go` | Async read-model cache refreshes triggered from HTTP handlers now inherit the daemon service root context instead of launching from `context.Background()`. | If a worker pool is added later, reuse the same service-owned context there too. | ## Action Table @@ -41,7 +42,6 @@ This table captures every issue found in this pass. It is broad and high-signal, | A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go:160-1757` | `usage_view.go` is simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A7 | P1 | Daemon service monolith | `internal/daemon/server.go:1-1211` | `server.go` owns service startup, socket server, polling, collection, retention, cache refresh, hook handling, and HTTP endpoints. | Split into `service_runtime`, `http_handlers`, `polling`, `collection`, `cache`, and `hook_ingest` files/types. | Lower mental load and easier concurrency review. | | A8 | P1 | Shared parser duplication | `internal/providers/cursor/cursor.go:1087-1305`, `internal/providers/cursor/telemetry.go:107-223`, `internal/providers/cursor/cursor.go:1699-1735`, `internal/providers/cursor/telemetry.go:791-860` | Cursor state DB rows are now consolidated, but the tracking DB (`ai_code_hashes`) and daily stats (`ItemTable`) flows are still interpreted separately in dashboard and telemetry code. Snapshot and telemetry ingestion can still drift there. | Extend the same shared-record approach to tracking DB rows and daily-stats envelopes so both consumers project from one parsed source model. | Eliminates the last remaining raw-source drift risk in Cursor. | -| A9 | P2 | Detached background work ownership | `internal/daemon/server.go:1108`, `internal/daemon/server.go:1126`, `internal/daemon/server.go:306-318` | Read-model cache refreshes are launched from request handlers with `context.Background()`. They are bounded by timeout but detached from service lifecycle ownership. | Give `Service` a root context and use it for detached async refreshes. Optionally expose a bounded worker pool instead of unconstrained goroutine creation. | Safer shutdown semantics and fewer background task ownership ambiguities. | | A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/cursor/cursor.go:478`, `internal/providers/cursor/cursor.go:1704-1711`, `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Providers and analytics logic read `time.Now()` directly in many places, often mixing local time and UTC. This is hard to test and easy to get subtly wrong. | Introduce a small clock abstraction in time-sensitive subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A13 | P3 | Logging/throttling utilities are ad hoc | `internal/daemon/server.go:194-237`, `internal/daemon/runtime.go:198-207`, `internal/core/trace.go:14-27` | Log throttling and trace behavior are implemented in several small local patterns instead of one reusable utility. | Consolidate throttled logging and trace controls into a shared helper package. | Small cleanup, but it reduces low-grade duplication. | diff --git a/internal/daemon/server.go b/internal/daemon/server.go index 563eb1d..b856d4d 100644 --- a/internal/daemon/server.go +++ b/internal/daemon/server.go @@ -28,6 +28,7 @@ import ( type Service struct { cfg Config + ctx context.Context store *telemetry.Store pipeline *telemetry.Pipeline @@ -100,6 +101,7 @@ func startService(ctx context.Context, cfg Config) (*Service, error) { svc := &Service{ cfg: cfg, + ctx: ctx, store: store, pipeline: telemetry.NewPipeline(store, telemetry.NewSpool(cfg.SpoolDir)), quotaIngest: telemetry.NewQuotaSnapshotIngestor(store), @@ -318,6 +320,13 @@ func (s *Service) refreshReadModelCacheAsync( }() } +func (s *Service) backgroundContext() context.Context { + if s != nil && s.ctx != nil { + return s.ctx + } + return context.Background() +} + func (s *Service) runReadModelCacheLoop(ctx context.Context) { if s == nil { return @@ -387,7 +396,7 @@ func (s *Service) runSpoolMaintenanceLoop(ctx context.Context) { s.infof("spool_loop_start", "flush_interval=%s cleanup_interval=%s", 5*time.Second, 60*time.Second) s.flushSpoolBacklog(ctx, 10000) - s.cleanupSpool(ctx) + s.cleanupSpool() for { select { @@ -397,7 +406,7 @@ func (s *Service) runSpoolMaintenanceLoop(ctx context.Context) { case <-flushTicker.C: s.flushSpoolBacklog(ctx, 10000) case <-cleanupTicker.C: - s.cleanupSpool(ctx) + s.cleanupSpool() } } } @@ -423,7 +432,7 @@ func (s *Service) flushSpoolBacklog(ctx context.Context, maxTotal int) { } } -func (s *Service) cleanupSpool(ctx context.Context) { +func (s *Service) cleanupSpool() { if s == nil || strings.TrimSpace(s.cfg.SpoolDir) == "" { return } @@ -1105,7 +1114,7 @@ func (s *Service) handleReadModel(w http.ResponseWriter, r *http.Request) { } writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: cached}) if time.Since(cachedAt) > 2*time.Second { - s.refreshReadModelCacheAsync(context.Background(), cacheKey, req, 60*time.Second) + s.refreshReadModelCacheAsync(s.backgroundContext(), cacheKey, req, 60*time.Second) } return } @@ -1123,7 +1132,7 @@ func (s *Service) handleReadModel(w http.ResponseWriter, r *http.Request) { s.warnf("read_model_cache_miss_compute_error", "error=%v", err) } - s.refreshReadModelCacheAsync(context.Background(), cacheKey, req, 60*time.Second) + s.refreshReadModelCacheAsync(s.backgroundContext(), cacheKey, req, 60*time.Second) snapshots = ReadModelTemplatesFromRequest(req, DisabledAccountsFromConfig()) writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: snapshots}) durationMs := time.Since(started).Milliseconds() From 7cec271b2c07a9f2ffc601cca4846f39ceaeadc4 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 12:20:43 +0100 Subject: [PATCH 04/32] refactor: finish cursor parser cleanup and share log throttling --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 20 +- internal/core/clock.go | 22 ++ internal/core/log_throttle.go | 80 ++++ internal/core/log_throttle_test.go | 42 +++ internal/daemon/runtime.go | 20 +- internal/daemon/server.go | 44 +-- internal/providers/cursor/cursor.go | 351 +++++++----------- internal/providers/cursor/telemetry.go | 145 ++------ internal/providers/cursor/tracking_records.go | 173 +++++++++ 9 files changed, 510 insertions(+), 387 deletions(-) create mode 100644 internal/core/clock.go create mode 100644 internal/core/log_throttle.go create mode 100644 internal/core/log_throttle_test.go create mode 100644 internal/providers/cursor/tracking_records.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 5b2bc04..fdce356 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -29,6 +29,9 @@ This table captures every issue found in this pass. It is broad and high-signal, | R9 | Fixed | Claude Code parser duplication | `internal/providers/claude_code/conversation_records.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/telemetry_usage.go` | Claude Code JSONL parsing, token total calculation, and usage/tool dedupe keys now run through one shared normalized conversation-record helper used by both the dashboard aggregator and telemetry collector. | Apply the same consolidation pattern to Cursor. | | R10 | Fixed | Cursor state DB reader duplication | `internal/providers/cursor/state_records.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/telemetry.go` | Cursor `composerData` and `bubbleId` rows from `cursorDiskKV` are now parsed once into shared record types and projected from both the dashboard provider and telemetry collector. This also removes the extra telemetry pass that queried `bubbleId` separately for tool and token events. | Tracking DB and daily-stats duplication still remain. | | R11 | Fixed | Detached read-model refresh ownership | `internal/daemon/server.go` | Async read-model cache refreshes triggered from HTTP handlers now inherit the daemon service root context instead of launching from `context.Background()`. | If a worker pool is added later, reuse the same service-owned context there too. | +| R12 | Fixed | Cursor tracking and daily-stats reader duplication | `internal/providers/cursor/tracking_records.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/telemetry.go` | Cursor `ai_code_hashes` rows and `ItemTable` daily-stats envelopes now parse through shared record loaders, including compatibility for older tracking DB schemas with missing columns. Dashboard and telemetry projections now read the same normalized source records. | Keep compatibility coverage for older Cursor schemas. | +| R13 | Fixed | Ad hoc daemon log throttling | `internal/core/log_throttle.go`, `internal/daemon/server.go`, `internal/daemon/runtime.go` | Daemon service and dashboard runtime now use a shared throttling helper instead of separate timestamp/mutex patterns for repeated log suppression. | Reuse the same helper if more throttled log sites are added. | +| R14 | Fixed | Cursor time-source injection | `internal/core/clock.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/tracking_records.go`, `internal/providers/cursor/telemetry.go` | Cursor provider and its shared SQLite readers now use an injectable clock path instead of direct `time.Now()` calls in the main time-sensitive flow. | Extend the same pattern to other provider/analytics subsystems over time. | ## Action Table @@ -41,24 +44,21 @@ This table captures every issue found in this pass. It is broad and high-signal, | A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | | A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go:160-1757` | `usage_view.go` is simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A7 | P1 | Daemon service monolith | `internal/daemon/server.go:1-1211` | `server.go` owns service startup, socket server, polling, collection, retention, cache refresh, hook handling, and HTTP endpoints. | Split into `service_runtime`, `http_handlers`, `polling`, `collection`, `cache`, and `hook_ingest` files/types. | Lower mental load and easier concurrency review. | -| A8 | P1 | Shared parser duplication | `internal/providers/cursor/cursor.go:1087-1305`, `internal/providers/cursor/telemetry.go:107-223`, `internal/providers/cursor/cursor.go:1699-1735`, `internal/providers/cursor/telemetry.go:791-860` | Cursor state DB rows are now consolidated, but the tracking DB (`ai_code_hashes`) and daily stats (`ItemTable`) flows are still interpreted separately in dashboard and telemetry code. Snapshot and telemetry ingestion can still drift there. | Extend the same shared-record approach to tracking DB rows and daily-stats envelopes so both consumers project from one parsed source model. | Eliminates the last remaining raw-source drift risk in Cursor. | -| A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/cursor/cursor.go:478`, `internal/providers/cursor/cursor.go:1704-1711`, `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Providers and analytics logic read `time.Now()` directly in many places, often mixing local time and UTC. This is hard to test and easy to get subtly wrong. | Introduce a small clock abstraction in time-sensitive subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | +| A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Cursor’s main time-sensitive path now uses an injectable clock, but several other providers and analytics helpers still read `time.Now()` directly, often mixing local time and UTC. | Extend the clock abstraction to the remaining provider and analytics subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | -| A13 | P3 | Logging/throttling utilities are ad hoc | `internal/daemon/server.go:194-237`, `internal/daemon/runtime.go:198-207`, `internal/core/trace.go:14-27` | Log throttling and trace behavior are implemented in several small local patterns instead of one reusable utility. | Consolidate throttled logging and trace controls into a shared helper package. | Small cleanup, but it reduces low-grade duplication. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | | A15 | P3 | Performance optimization opportunity in render path | `internal/tui/model.go:441-450`, `internal/tui/tiles_composition.go:302-322`, `internal/tui/detail.go:752-1046`, `internal/tui/analytics.go:663-729` | The UI recomputes display/composition structures from raw metric maps repeatedly during rendering. It is correct, but the work is duplicated across views and frames. | Cache derived display/composition sections per snapshot update instead of rebuilding them in each view path. | Lower render cost and less duplicated parsing logic. | ## Suggested Execution Order -1. A1, A8 -2. A2, A3 -3. A6, A7 -4. A4, A5 -5. A9, A11, A15 -6. A10, A12, A13, A14 +1. A2, A3 +2. A6, A7 +3. A4, A5 +4. A1, A11 +5. A12, A14, A15 ## Notes - The highest-risk remaining issues are architectural rather than immediately broken behavior. -- The biggest drift risks are still the duplicated raw-source parsers and the metric-prefix parsing in the TUI. +- The biggest remaining drift risk is the metric-prefix parsing still spread across the TUI render path. - The race pass completed cleanly for the core dashboard/daemon/telemetry packages after the timeframe fix. diff --git a/internal/core/clock.go b/internal/core/clock.go new file mode 100644 index 0000000..0b12e5d --- /dev/null +++ b/internal/core/clock.go @@ -0,0 +1,22 @@ +package core + +import "time" + +type Clock interface { + Now() time.Time +} + +type SystemClock struct{} + +func (SystemClock) Now() time.Time { + return time.Now() +} + +type FuncClock func() time.Time + +func (f FuncClock) Now() time.Time { + if f == nil { + return time.Now() + } + return f() +} diff --git a/internal/core/log_throttle.go b/internal/core/log_throttle.go new file mode 100644 index 0000000..d565e77 --- /dev/null +++ b/internal/core/log_throttle.go @@ -0,0 +1,80 @@ +package core + +import ( + "strings" + "sync" + "time" +) + +type LogThrottle struct { + mu sync.Mutex + lastAt map[string]time.Time + maxKeys int + maxAge time.Duration +} + +func NewLogThrottle(maxKeys int, maxAge time.Duration) *LogThrottle { + if maxKeys <= 0 { + maxKeys = 1 + } + if maxAge <= 0 { + maxAge = time.Minute + } + return &LogThrottle{ + lastAt: make(map[string]time.Time), + maxKeys: maxKeys, + maxAge: maxAge, + } +} + +func (t *LogThrottle) Allow(key string, interval time.Duration, now time.Time) bool { + if t == nil { + return false + } + key = strings.TrimSpace(key) + if key == "" { + key = "default" + } + if now.IsZero() { + now = time.Now() + } + + t.mu.Lock() + defer t.mu.Unlock() + + if interval > 0 { + if last, ok := t.lastAt[key]; ok && now.Sub(last) < interval { + return false + } + } + t.lastAt[key] = now + t.pruneLocked(now) + return true +} + +func (t *LogThrottle) pruneLocked(now time.Time) { + if len(t.lastAt) <= t.maxKeys { + return + } + + for key, ts := range t.lastAt { + if now.Sub(ts) > t.maxAge { + delete(t.lastAt, key) + } + } + + for len(t.lastAt) > t.maxKeys { + oldestKey := "" + oldestTime := now + for key, ts := range t.lastAt { + if ts.Before(oldestTime) { + oldestKey = key + oldestTime = ts + } + } + if oldestKey == "" { + break + } + delete(t.lastAt, oldestKey) + } +} diff --git a/internal/core/log_throttle_test.go b/internal/core/log_throttle_test.go new file mode 100644 index 0000000..2bc1f46 --- /dev/null +++ b/internal/core/log_throttle_test.go @@ -0,0 +1,42 @@ +package core + +import ( + "testing" + "time" +) + +func TestLogThrottleAllow(t *testing.T) { + throttle := NewLogThrottle(4, time.Minute) + now := time.Date(2026, 3, 9, 12, 0, 0, 0, time.UTC) + + if !throttle.Allow("read_model", 2*time.Second, now) { + t.Fatal("first call should be allowed") + } + if throttle.Allow("read_model", 2*time.Second, now.Add(time.Second)) { + t.Fatal("second call inside interval should be blocked") + } + if !throttle.Allow("read_model", 2*time.Second, now.Add(3*time.Second)) { + t.Fatal("call after interval should be allowed") + } +} + +func TestLogThrottlePrunesOldestEntries(t *testing.T) { + throttle := NewLogThrottle(2, time.Minute) + base := time.Date(2026, 3, 9, 12, 0, 0, 0, time.UTC) + + if !throttle.Allow("a", 0, base) { + t.Fatal("expected a") + } + if !throttle.Allow("b", 0, base.Add(time.Second)) { + t.Fatal("expected b") + } + if !throttle.Allow("c", 0, base.Add(2*time.Second)) { + t.Fatal("expected c") + } + if len(throttle.lastAt) != 2 { + t.Fatalf("len(lastAt) = %d, want 2", len(throttle.lastAt)) + } + if _, ok := throttle.lastAt["a"]; ok { + t.Fatal("oldest entry should have been pruned") + } +} diff --git a/internal/daemon/runtime.go b/internal/daemon/runtime.go index 9041671..313fa93 100644 --- a/internal/daemon/runtime.go +++ b/internal/daemon/runtime.go @@ -20,8 +20,7 @@ type ViewRuntime struct { ensureMu sync.Mutex lastEnsureAttempt time.Time - readModelMu sync.Mutex - lastReadModelErrLog time.Time + logThrottle *core.LogThrottle stateMu sync.RWMutex state DaemonState @@ -34,10 +33,11 @@ func NewViewRuntime( verbose bool, ) *ViewRuntime { return &ViewRuntime{ - client: client, - socketPath: strings.TrimSpace(socketPath), - verbose: verbose, - state: DaemonState{Status: DaemonStatusConnecting}, + client: client, + socketPath: strings.TrimSpace(socketPath), + verbose: verbose, + logThrottle: core.NewLogThrottle(8, time.Minute), + state: DaemonState{Status: DaemonStatusConnecting}, } } @@ -199,13 +199,7 @@ func (r *ViewRuntime) fetchReadModel( } func (r *ViewRuntime) throttledLogError(err error) { - r.readModelMu.Lock() - shouldLog := time.Since(r.lastReadModelErrLog) > 2*time.Second - if shouldLog { - r.lastReadModelErrLog = time.Now() - } - r.readModelMu.Unlock() - if shouldLog { + if r != nil && r.logThrottle.Allow("read_model_error", 2*time.Second, time.Now()) { log.Printf("daemon read-model error: %v", err) } } diff --git a/internal/daemon/server.go b/internal/daemon/server.go index b856d4d..77c478a 100644 --- a/internal/daemon/server.go +++ b/internal/daemon/server.go @@ -36,9 +36,8 @@ type Service struct { collectors []telemetry.Collector providerByID map[string]core.UsageProvider - spoolMu sync.Mutex // guards spool filesystem operations (read/write/cleanup) - logMu sync.Mutex - lastLogAt map[string]time.Time + spoolMu sync.Mutex // guards spool filesystem operations (read/write/cleanup) + logThrottle *core.LogThrottle rmCache *readModelCache } @@ -107,7 +106,7 @@ func startService(ctx context.Context, cfg Config) (*Service, error) { quotaIngest: telemetry.NewQuotaSnapshotIngestor(store), collectors: buildCollectors(), providerByID: providersByID(), - lastLogAt: map[string]time.Time{}, + logThrottle: core.NewLogThrottle(200, 10*time.Minute), rmCache: newReadModelCache(), } @@ -236,42 +235,7 @@ func (s *Service) shouldLog(key string, interval time.Duration) bool { if s == nil { return false } - s.logMu.Lock() - defer s.logMu.Unlock() - now := time.Now() - if interval > 0 { - if last, ok := s.lastLogAt[key]; ok && now.Sub(last) < interval { - return false - } - } - s.lastLogAt[key] = now - // Prevent unbounded growth in long-running daemon. - const maxLogKeys = 200 - const maxLogAge = 10 * time.Minute - if len(s.lastLogAt) > maxLogKeys { - // First pass: remove stale entries. - for k, t := range s.lastLogAt { - if now.Sub(t) > maxLogAge { - delete(s.lastLogAt, k) - } - } - // If still over limit, remove oldest entries until at cap. - for len(s.lastLogAt) > maxLogKeys { - oldestKey := "" - oldestTime := now - for k, t := range s.lastLogAt { - if t.Before(oldestTime) { - oldestKey = k - oldestTime = t - } - } - if oldestKey == "" { - break - } - delete(s.lastLogAt, oldestKey) - } - } - return true + return s.logThrottle.Allow(key, interval, time.Now()) } // --- Read-model cache --- diff --git a/internal/providers/cursor/cursor.go b/internal/providers/cursor/cursor.go index b6bb9c9..a6d378b 100644 --- a/internal/providers/cursor/cursor.go +++ b/internal/providers/cursor/cursor.go @@ -29,6 +29,7 @@ var cursorAPIBase = "https://api2.cursor.sh" type Provider struct { providerbase.Base mu sync.RWMutex + clock core.Clock modelAggregationCache map[string]cachedModelAggregation } @@ -60,6 +61,7 @@ func New() *Provider { }, Dashboard: dashboardWidget(), }), + clock: core.SystemClock{}, modelAggregationCache: make(map[string]cachedModelAggregation), } } @@ -168,6 +170,13 @@ func (p *Provider) DetailWidget() core.DetailWidget { return core.CodingToolDetailWidget(false) } +func (p *Provider) now() time.Time { + if p != nil && p.clock != nil { + return p.clock.Now() + } + return time.Now() +} + func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.UsageSnapshot, error) { if strings.TrimSpace(acct.Provider) == "" { acct.Provider = p.ID() @@ -175,7 +184,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa snap := core.UsageSnapshot{ ProviderID: p.ID(), AccountID: acct.ID, - Timestamp: time.Now(), + Timestamp: p.now(), Status: core.StatusOK, Metrics: make(map[string]core.Metric), Resets: make(map[string]time.Time), @@ -238,18 +247,20 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa var hasLocalData bool if trackingDBPath != "" { + before := cursorSnapshotDataSignature(&snap) if err := p.readTrackingDB(ctx, trackingDBPath, &snap); err != nil { log.Printf("[cursor] tracking DB error: %v", err) snap.Raw["tracking_db_error"] = err.Error() - } else { + } else if cursorSnapshotDataSignature(&snap) != before { hasLocalData = true } } if stateDBPath != "" { + before := cursorSnapshotDataSignature(&snap) if err := p.readStateDB(ctx, stateDBPath, &snap); err != nil { log.Printf("[cursor] state DB error: %v", err) snap.Raw["state_db_error"] = err.Error() - } else { + } else if cursorSnapshotDataSignature(&snap) != before { hasLocalData = true } } @@ -306,6 +317,27 @@ func mergeAPIIntoSnapshot(dst, src *core.UsageSnapshot) { } } +type cursorSnapshotSignature struct { + metrics int + resets int + raw int + dailySeries int + modelUsage int +} + +func cursorSnapshotDataSignature(snap *core.UsageSnapshot) cursorSnapshotSignature { + if snap == nil { + return cursorSnapshotSignature{} + } + return cursorSnapshotSignature{ + metrics: len(snap.Metrics), + resets: len(snap.Resets), + raw: len(snap.Raw), + dailySeries: len(snap.DailySeries), + modelUsage: len(snap.ModelUsage), + } +} + func (p *Provider) buildLocalOnlyMessage(snap *core.UsageSnapshot) { var parts []string @@ -469,7 +501,7 @@ func (p *Provider) fetchFromAPI(ctx context.Context, token string, snap *core.Us Unit: "%", Window: "billing-cycle", } - daysRemaining := cycleEnd.Sub(time.Now()).Hours() / 24 + daysRemaining := cycleEnd.Sub(p.now()).Hours() / 24 if daysRemaining < 0 { daysRemaining = 0 } @@ -1085,12 +1117,15 @@ func (p *Provider) readTrackingDB(ctx context.Context, dbPath string, snap *core } defer db.Close() - var totalRequests int - err = db.QueryRowContext(ctx, `SELECT COUNT(*) FROM ai_code_hashes`).Scan(&totalRequests) - if err != nil { - return fmt.Errorf("querying total requests: %w", err) + if !cursorTableExists(ctx, db, "ai_code_hashes") { + return nil } + trackingRecords, err := loadTrackingRecords(ctx, db, p.clock) + if err != nil { + return err + } + totalRequests := len(trackingRecords) if totalRequests > 0 { total := float64(totalRequests) snap.Metrics["total_ai_requests"] = core.Metric{ @@ -1100,13 +1135,14 @@ func (p *Provider) readTrackingDB(ctx context.Context, dbPath string, snap *core } } - timeExpr := chooseTrackingTimeExpr(ctx, db) - now := time.Now() - todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).UnixMilli() - var todayCount int - err = db.QueryRowContext(ctx, - fmt.Sprintf(`SELECT COUNT(*) FROM ai_code_hashes WHERE %s >= ?`, timeExpr), todayStart).Scan(&todayCount) - if err == nil && todayCount > 0 { + today := p.now().Format("2006-01-02") + todayCount := 0 + for _, record := range trackingRecords { + if record.OccurredDay == today { + todayCount++ + } + } + if todayCount > 0 { tc := float64(todayCount) snap.Metrics["requests_today"] = core.Metric{ Used: &tc, @@ -1115,10 +1151,10 @@ func (p *Provider) readTrackingDB(ctx context.Context, dbPath string, snap *core } } - p.readTrackingSourceBreakdown(ctx, db, snap, todayStart, timeExpr) - p.readTrackingDailyRequests(ctx, db, snap, timeExpr) - p.readTrackingModelBreakdown(ctx, db, snap, todayStart, timeExpr) - p.readTrackingLanguageBreakdown(ctx, db, snap) + p.readTrackingSourceBreakdown(trackingRecords, snap, today) + p.readTrackingDailyRequests(trackingRecords, snap) + p.readTrackingModelBreakdown(trackingRecords, snap, today) + p.readTrackingLanguageBreakdown(trackingRecords, snap) p.readScoredCommits(ctx, db, snap) p.readDeletedFiles(ctx, db, snap) p.readTrackedFileContent(ctx, db, snap) @@ -1264,31 +1300,9 @@ func (p *Provider) readTrackedFileContent(ctx context.Context, db *sql.DB, snap } func chooseTrackingTimeExpr(ctx context.Context, db *sql.DB) string { - rows, err := db.QueryContext(ctx, `PRAGMA table_info(ai_code_hashes)`) - if err != nil { - return "createdAt" - } - defer rows.Close() - - hasCreatedAt := false - hasTimestamp := false - for rows.Next() { - var cid int - var name string - var dataType string - var notNull int - var dfltValue sql.NullString - var pk int - if rows.Scan(&cid, &name, &dataType, ¬Null, &dfltValue, &pk) != nil { - continue - } - switch strings.ToLower(strings.TrimSpace(name)) { - case "createdat": - hasCreatedAt = true - case "timestamp": - hasTimestamp = true - } - } + columns := cursorTableColumns(ctx, db, "ai_code_hashes") + hasCreatedAt := columns["createdat"] + hasTimestamp := columns["timestamp"] switch { case hasCreatedAt && hasTimestamp: @@ -1298,35 +1312,26 @@ func chooseTrackingTimeExpr(ctx context.Context, db *sql.DB) string { case hasTimestamp: return "timestamp" default: - return "createdAt" + return "0" } } -func (p *Provider) readTrackingSourceBreakdown(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot, todayStart int64, timeExpr string) { - rows, err := db.QueryContext(ctx, ` - SELECT COALESCE(source, ''), COUNT(*) - FROM ai_code_hashes - GROUP BY COALESCE(source, '') - ORDER BY COUNT(*) DESC`) - if err != nil { - return - } - defer rows.Close() - +func (p *Provider) readTrackingSourceBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot, today string) { clientTotals := map[string]float64{ "ide": 0, "cli_agents": 0, "other": 0, } + sourceTotals := make(map[string]int) + todaySourceTotals := make(map[string]int) var sourceSummary []string - - for rows.Next() { - var source string - var count int - if rows.Scan(&source, &count) != nil || count <= 0 { - continue + for _, record := range records { + sourceTotals[record.Source]++ + if record.OccurredDay == today { + todaySourceTotals[record.Source]++ } - + } + for source, count := range sourceTotals { value := float64(count) sourceKey := sanitizeCursorMetricName(source) snap.Metrics["source_"+sourceKey+"_requests"] = core.Metric{ @@ -1364,22 +1369,9 @@ func (p *Provider) readTrackingSourceBreakdown(ctx context.Context, db *sql.DB, } } - todayRows, err := db.QueryContext(ctx, fmt.Sprintf(` - SELECT COALESCE(source, ''), COUNT(*) - FROM ai_code_hashes - WHERE %s >= ? - GROUP BY COALESCE(source, '') - ORDER BY COUNT(*) DESC`, timeExpr), todayStart) - if err != nil { - return - } - defer todayRows.Close() - var todaySummary []string - for todayRows.Next() { - var source string - var count int - if todayRows.Scan(&source, &count) != nil || count <= 0 { + for source, count := range todaySourceTotals { + if count <= 0 { continue } value := float64(count) @@ -1396,17 +1388,7 @@ func (p *Provider) readTrackingSourceBreakdown(ctx context.Context, db *sql.DB, } } -func (p *Provider) readTrackingDailyRequests(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot, timeExpr string) { - rows, err := db.QueryContext(ctx, fmt.Sprintf(` - SELECT COALESCE(source, ''), strftime('%%Y-%%m-%%d', (%s)/1000, 'unixepoch', 'localtime') as day, COUNT(*) - FROM ai_code_hashes - GROUP BY COALESCE(source, ''), day - ORDER BY day ASC`, timeExpr)) - if err != nil { - return - } - defer rows.Close() - +func (p *Provider) readTrackingDailyRequests(records []cursorTrackingRecord, snap *core.UsageSnapshot) { totalByDay := make(map[string]float64) byClientDay := map[string]map[string]float64{ "ide": make(map[string]float64), @@ -1415,19 +1397,16 @@ func (p *Provider) readTrackingDailyRequests(ctx context.Context, db *sql.DB, sn } bySourceDay := make(map[string]map[string]float64) - for rows.Next() { - var source string - var day string - var count int - if rows.Scan(&source, &day, &count) != nil || count <= 0 || day == "" { + for _, record := range records { + day := record.OccurredDay + if day == "" { continue } - - v := float64(count) + v := 1.0 totalByDay[day] += v - clientKey := cursorClientBucket(source) + clientKey := cursorClientBucket(record.Source) byClientDay[clientKey][day] += v - sourceKey := sanitizeCursorMetricName(source) + sourceKey := sanitizeCursorMetricName(record.Source) if bySourceDay[sourceKey] == nil { bySourceDay[sourceKey] = make(map[string]float64) } @@ -1451,22 +1430,26 @@ func (p *Provider) readTrackingDailyRequests(ctx context.Context, db *sql.DB, sn } } -func (p *Provider) readTrackingModelBreakdown(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot, todayStart int64, timeExpr string) { - rows, err := db.QueryContext(ctx, ` - SELECT COALESCE(model, ''), COUNT(*) - FROM ai_code_hashes - GROUP BY COALESCE(model, '') - ORDER BY COUNT(*) DESC`) - if err != nil { - return - } - defer rows.Close() - +func (p *Provider) readTrackingModelBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot, today string) { + modelTotals := make(map[string]int) + todayModelTotals := make(map[string]int) + byModelDay := make(map[string]map[string]float64) var modelSummary []string - for rows.Next() { - var model string - var count int - if rows.Scan(&model, &count) != nil || count <= 0 { + for _, record := range records { + modelTotals[record.Model]++ + if record.OccurredDay == today { + todayModelTotals[record.Model]++ + } + modelKey := sanitizeCursorMetricName(record.Model) + if byModelDay[modelKey] == nil { + byModelDay[modelKey] = make(map[string]float64) + } + if record.OccurredDay != "" { + byModelDay[modelKey][record.OccurredDay]++ + } + } + for model, count := range modelTotals { + if count <= 0 { continue } @@ -1483,53 +1466,17 @@ func (p *Provider) readTrackingModelBreakdown(ctx context.Context, db *sql.DB, s snap.Raw["model_usage"] = strings.Join(modelSummary, " · ") } - todayRows, err := db.QueryContext(ctx, fmt.Sprintf(` - SELECT COALESCE(model, ''), COUNT(*) - FROM ai_code_hashes - WHERE %s >= ? - GROUP BY COALESCE(model, '') - ORDER BY COUNT(*) DESC`, timeExpr), todayStart) - if err == nil { - defer todayRows.Close() - for todayRows.Next() { - var model string - var count int - if todayRows.Scan(&model, &count) != nil || count <= 0 { - continue - } - value := float64(count) - modelKey := sanitizeCursorMetricName(model) - snap.Metrics["model_"+modelKey+"_requests_today"] = core.Metric{ - Used: &value, - Unit: "requests", - Window: "1d", - } - } - } - - dailyRows, err := db.QueryContext(ctx, fmt.Sprintf(` - SELECT COALESCE(model, ''), strftime('%%Y-%%m-%%d', (%s)/1000, 'unixepoch', 'localtime') as day, COUNT(*) - FROM ai_code_hashes - GROUP BY COALESCE(model, ''), day - ORDER BY day ASC`, timeExpr)) - if err != nil { - return - } - defer dailyRows.Close() - - byModelDay := make(map[string]map[string]float64) - for dailyRows.Next() { - var model string - var day string - var count int - if dailyRows.Scan(&model, &day, &count) != nil || count <= 0 || day == "" { + for model, count := range todayModelTotals { + if count <= 0 { continue } modelKey := sanitizeCursorMetricName(model) - if byModelDay[modelKey] == nil { - byModelDay[modelKey] = make(map[string]float64) + value := float64(count) + snap.Metrics["model_"+modelKey+"_requests_today"] = core.Metric{ + Used: &value, + Unit: "requests", + Window: "1d", } - byModelDay[modelKey][day] += float64(count) } for modelKey, pointsByDay := range byModelDay { if len(pointsByDay) < 2 { @@ -1539,26 +1486,16 @@ func (p *Provider) readTrackingModelBreakdown(ctx context.Context, db *sql.DB, s } } -func (p *Provider) readTrackingLanguageBreakdown(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - rows, err := db.QueryContext(ctx, ` - SELECT COALESCE(fileExtension, ''), COUNT(*) - FROM ai_code_hashes - WHERE fileExtension IS NOT NULL AND fileExtension != '' - GROUP BY COALESCE(fileExtension, '') - ORDER BY COUNT(*) DESC`) - if err != nil { - return - } - defer rows.Close() - +func (p *Provider) readTrackingLanguageBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot) { + langTotals := make(map[string]int) var langSummary []string - for rows.Next() { - var ext string - var count int - if rows.Scan(&ext, &count) != nil || count <= 0 { + for _, record := range records { + if strings.TrimSpace(record.FileExt) == "" { continue } - + langTotals[record.FileExt]++ + } + for ext, count := range langTotals { value := float64(count) langName := extensionToLanguage(ext) langKey := sanitizeCursorMetricName(langName) @@ -1685,6 +1622,10 @@ func (p *Provider) readStateDB(ctx context.Context, dbPath string, snap *core.Us return fmt.Errorf("state DB not accessible: %w", err) } + dailyStatsRecords, err := loadDailyStatsRecords(ctx, db) + if err != nil { + dailyStatsRecords = nil + } composerRecords, err := loadComposerSessionRecords(ctx, db) if err != nil { log.Printf("[cursor] composerData query error: %v", err) @@ -1694,8 +1635,8 @@ func (p *Provider) readStateDB(ctx context.Context, dbPath string, snap *core.Us log.Printf("[cursor] bubbleId query error: %v", err) } - p.readDailyStatsToday(ctx, db, snap) - p.readDailyStatsSeries(ctx, db, snap) + p.readDailyStatsToday(dailyStatsRecords, snap) + p.readDailyStatsSeries(dailyStatsRecords, snap) p.readComposerSessions(composerRecords, snap) p.readStateMetadata(ctx, db, snap) p.readToolUsage(bubbleRecords, snap) @@ -1703,27 +1644,21 @@ func (p *Provider) readStateDB(ctx context.Context, dbPath string, snap *core.Us return nil } -func (p *Provider) readDailyStatsToday(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - today := time.Now().Format("2006-01-02") - key := fmt.Sprintf("aiCodeTracking.dailyStats.v1.5.%s", today) - - var value string - err := db.QueryRowContext(ctx, `SELECT value FROM ItemTable WHERE key = ?`, key).Scan(&value) - if err != nil { - if err == sql.ErrNoRows { - yesterday := time.Now().AddDate(0, 0, -1).Format("2006-01-02") - key = fmt.Sprintf("aiCodeTracking.dailyStats.v1.5.%s", yesterday) - err = db.QueryRowContext(ctx, `SELECT value FROM ItemTable WHERE key = ?`, key).Scan(&value) - if err != nil { - return +func (p *Provider) readDailyStatsToday(records []cursorDailyStatsRecord, snap *core.UsageSnapshot) { + today := p.now().Format("2006-01-02") + yesterday := p.now().AddDate(0, 0, -1).Format("2006-01-02") + var stats *dailyStats + for i := range records { + switch records[i].Date { + case today: + stats = &records[i].Stats + case yesterday: + if stats == nil { + stats = &records[i].Stats } - } else { - return } } - - var stats dailyStats - if json.Unmarshal([]byte(value), &stats) != nil { + if stats == nil { return } @@ -1768,7 +1703,7 @@ func (p *Provider) readComposerSessions(records []cursorComposerSessionRecord, s todayRequests int ) - now := time.Now() + now := p.now() todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) for _, record := range records { @@ -2218,30 +2153,10 @@ func normalizeCursorMCPName(name string) string { return name } -func (p *Provider) readDailyStatsSeries(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - rows, err := db.QueryContext(ctx, - `SELECT key, value FROM ItemTable WHERE key LIKE 'aiCodeTracking.dailyStats.v1.5.%' ORDER BY key ASC`) - if err != nil { - return - } - defer rows.Close() - - prefix := "aiCodeTracking.dailyStats.v1.5." - for rows.Next() { - var k, v string - if rows.Scan(&k, &v) != nil { - continue - } - dateStr := strings.TrimPrefix(k, prefix) - if len(dateStr) != 10 { // "2025-01-15" - continue - } - - var ds dailyStats - if json.Unmarshal([]byte(v), &ds) != nil { - continue - } - +func (p *Provider) readDailyStatsSeries(records []cursorDailyStatsRecord, snap *core.UsageSnapshot) { + for _, record := range records { + ds := record.Stats + dateStr := record.Date if ds.TabSuggestedLines > 0 || ds.TabAcceptedLines > 0 { snap.DailySeries["tab_suggested"] = append(snap.DailySeries["tab_suggested"], core.TimePoint{Date: dateStr, Value: float64(ds.TabSuggestedLines)}) diff --git a/internal/providers/cursor/telemetry.go b/internal/providers/cursor/telemetry.go index 6d6c0c9..b23bd7a 100644 --- a/internal/providers/cursor/telemetry.go +++ b/internal/providers/cursor/telemetry.go @@ -3,7 +3,6 @@ package cursor import ( "context" "database/sql" - "encoding/json" "fmt" "os" "path/filepath" @@ -119,98 +118,57 @@ func collectTrackingDBEvents(ctx context.Context, dbPath string) ([]shared.Telem // Collect scored commits from the same DB connection. var commitEvents []shared.TelemetryEvent if cursorTableExists(ctx, db, "scored_commits") { - commitEvents, _ = queryScoredCommits(ctx, db, dbPath) + commitEvents, _ = queryScoredCommits(ctx, db, dbPath, core.SystemClock{}) } if !cursorTableExists(ctx, db, "ai_code_hashes") { return nil, commitEvents, nil } - timeExpr := chooseTrackingTimeExpr(ctx, db) - - rows, err := db.QueryContext(ctx, fmt.Sprintf(` - SELECT COALESCE(source, ''), - COALESCE(model, ''), - COALESCE(fileExtension, ''), - COALESCE(fileName, ''), - COALESCE(requestId, ''), - COALESCE(conversationId, ''), - COALESCE(%s, 0), - rowid - FROM ai_code_hashes - ORDER BY %s ASC`, timeExpr, timeExpr)) + records, err := loadTrackingRecords(ctx, db, core.SystemClock{}) if err != nil { - return nil, commitEvents, fmt.Errorf("cursor: querying ai_code_hashes: %w", err) + return nil, commitEvents, err } - defer rows.Close() var out []shared.TelemetryEvent - for rows.Next() { - if ctx.Err() != nil { - return out, commitEvents, ctx.Err() - } - - var ( - source string - model string - fileExt string - fileName string - requestID string - conversationID string - timestamp int64 - rowID int64 - ) - if err := rows.Scan(&source, &model, &fileExt, &fileName, &requestID, &conversationID, ×tamp, &rowID); err != nil { - continue - } - - occurredAt := time.Now().UTC() - if timestamp > 0 { - occurredAt = shared.UnixAuto(timestamp) - } - - messageID := fmt.Sprintf("cursor-tracking:%d", rowID) - - clientBucket := cursorSourceToClientBucket(source) - - // Use conversationId as session ID to link tracking events to composer sessions. - sessionID := strings.TrimSpace(conversationID) - + for _, record := range records { + messageID := fmt.Sprintf("cursor-tracking:%d", record.RowID) + clientBucket := cursorSourceToClientBucket(record.Source) payload := map[string]any{ "source": map[string]any{ "db_path": dbPath, "table": "ai_code_hashes", - "row_id": rowID, + "row_id": record.RowID, }, "client": clientBucket, - "cursor_source": source, + "cursor_source": record.Source, } - if fileExt != "" { - payload["file_extension"] = fileExt + if record.FileExt != "" { + payload["file_extension"] = record.FileExt } - if fileName != "" { - payload["file"] = fileName - } else if fileExt != "" { - payload["file"] = "example" + normalizeFileExtension(fileExt) + if record.FileName != "" { + payload["file"] = record.FileName + } else if record.FileExt != "" { + payload["file"] = "example" + normalizeFileExtension(record.FileExt) } - if upstream := inferProviderFromModel(model); upstream != "cursor" { + if upstream := inferProviderFromModel(record.Model); upstream != "cursor" { payload["upstream_provider"] = upstream } - if requestID != "" { - payload["request_id"] = requestID + if record.RequestID != "" { + payload["request_id"] = record.RequestID } out = append(out, shared.TelemetryEvent{ SchemaVersion: telemetryCursorSQLiteSchema, Channel: shared.TelemetryChannelSQLite, - OccurredAt: occurredAt, + OccurredAt: record.OccurredAt, AccountID: "", - SessionID: sessionID, + SessionID: strings.TrimSpace(record.SessionID), MessageID: messageID, ProviderID: "cursor", - AgentName: cursorAgentName(source), + AgentName: cursorAgentName(record.Source), EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: model, + ModelRaw: record.Model, TokenUsage: core.TokenUsage{ Requests: core.Int64Ptr(1), }, @@ -219,7 +177,7 @@ func collectTrackingDBEvents(ctx context.Context, dbPath string) ([]shared.Telem }) } - return out, commitEvents, rows.Err() + return out, commitEvents, nil } // collectStateDBEvents reads composerData and bubbleId entries from the @@ -572,39 +530,19 @@ func bubbleTokenEventsFromRecords(records []cursorBubbleRecord, sessionTimestamp // Keys like "aiCodeTracking.dailyStats.v1.5.2025-11-23" contain tab/composer // suggested/accepted line counts per day. func collectDailyStatsEvents(ctx context.Context, db *sql.DB, dbPath string) ([]shared.TelemetryEvent, error) { - rows, err := db.QueryContext(ctx, ` - SELECT key, value FROM ItemTable - WHERE key LIKE 'aiCodeTracking.dailyStats.%'`) + records, err := loadDailyStatsRecords(ctx, db) if err != nil { - return nil, fmt.Errorf("cursor: querying dailyStats: %w", err) + return nil, err } - defer rows.Close() var out []shared.TelemetryEvent - for rows.Next() { - if ctx.Err() != nil { - return out, ctx.Err() - } - - var key, rawJSON string - if err := rows.Scan(&key, &rawJSON); err != nil { - continue - } - - var stats cursorDailyStats - if json.Unmarshal([]byte(rawJSON), &stats) != nil { - continue - } - if stats.Date == "" { - continue - } - - dayTime, err := time.Parse("2006-01-02", stats.Date) + for _, record := range records { + dayTime, err := time.Parse("2006-01-02", record.Date) if err != nil { continue } - messageID := fmt.Sprintf("cursor-daily-stats:%s", stats.Date) + messageID := fmt.Sprintf("cursor-daily-stats:%s", record.Date) out = append(out, shared.TelemetryEvent{ SchemaVersion: telemetryCursorSQLiteSchema, @@ -622,25 +560,28 @@ func collectDailyStatsEvents(ctx context.Context, db *sql.DB, dbPath string) ([] "source": map[string]any{ "db_path": dbPath, "table": "ItemTable", - "key": key, + "key": record.Key, }, "daily_stats": map[string]any{ - "date": stats.Date, - "tab_suggested_lines": stats.TabSuggestedLines, - "tab_accepted_lines": stats.TabAcceptedLines, - "composer_suggested_lines": stats.ComposerSuggestedLines, - "composer_accepted_lines": stats.ComposerAcceptedLines, + "date": record.Date, + "tab_suggested_lines": record.Stats.TabSuggestedLines, + "tab_accepted_lines": record.Stats.TabAcceptedLines, + "composer_suggested_lines": record.Stats.ComposerSuggestedLines, + "composer_accepted_lines": record.Stats.ComposerAcceptedLines, }, }, }) } - return out, rows.Err() + return out, nil } // queryScoredCommits reads scored_commits from an already-open tracking DB // and produces telemetry events with AI contribution percentages per commit. -func queryScoredCommits(ctx context.Context, db *sql.DB, dbPath string) ([]shared.TelemetryEvent, error) { +func queryScoredCommits(ctx context.Context, db *sql.DB, dbPath string, clock core.Clock) ([]shared.TelemetryEvent, error) { + if clock == nil { + clock = core.SystemClock{} + } rows, err := db.QueryContext(ctx, ` SELECT commitHash, branchName, scoredAt, COALESCE(linesAdded, 0), COALESCE(linesDeleted, 0), @@ -684,7 +625,7 @@ func queryScoredCommits(ctx context.Context, db *sql.DB, dbPath string) ([]share continue } - occurredAt := time.Now().UTC() + occurredAt := clock.Now().UTC() if scoredAt > 0 { occurredAt = shared.UnixAuto(scoredAt) } @@ -730,14 +671,6 @@ func queryScoredCommits(ctx context.Context, db *sql.DB, dbPath string) ([]share return out, rows.Err() } -type cursorDailyStats struct { - Date string `json:"date"` - TabSuggestedLines int `json:"tabSuggestedLines"` - TabAcceptedLines int `json:"tabAcceptedLines"` - ComposerSuggestedLines int `json:"composerSuggestedLines"` - ComposerAcceptedLines int `json:"composerAcceptedLines"` -} - func truncateString(s string, maxLen int) string { if len(s) <= maxLen { return s diff --git a/internal/providers/cursor/tracking_records.go b/internal/providers/cursor/tracking_records.go new file mode 100644 index 0000000..4225309 --- /dev/null +++ b/internal/providers/cursor/tracking_records.go @@ -0,0 +1,173 @@ +package cursor + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +type cursorTrackingRecord struct { + Source string + Model string + FileExt string + FileName string + RequestID string + SessionID string + OccurredAt time.Time + OccurredDay string + RowID int64 +} + +type cursorDailyStatsRecord struct { + Date string + Stats dailyStats + Key string +} + +func loadTrackingRecords(ctx context.Context, db *sql.DB, clock core.Clock) ([]cursorTrackingRecord, error) { + if clock == nil { + clock = core.SystemClock{} + } + columns := cursorTableColumns(ctx, db, "ai_code_hashes") + timeExpr := chooseTrackingTimeExpr(ctx, db) + rows, err := db.QueryContext(ctx, fmt.Sprintf(` + SELECT %s, + %s, + %s, + %s, + %s, + %s, + COALESCE(%s, 0), + rowid + FROM ai_code_hashes + ORDER BY %s ASC`, + cursorTrackingTextColumnExpr(columns, "source"), + cursorTrackingTextColumnExpr(columns, "model"), + cursorTrackingTextColumnExpr(columns, "fileExtension"), + cursorTrackingTextColumnExpr(columns, "fileName"), + cursorTrackingTextColumnExpr(columns, "requestId"), + cursorTrackingTextColumnExpr(columns, "conversationId"), + timeExpr, + timeExpr)) + if err != nil { + return nil, fmt.Errorf("cursor: querying ai_code_hashes: %w", err) + } + defer rows.Close() + + var records []cursorTrackingRecord + for rows.Next() { + if ctx.Err() != nil { + return records, ctx.Err() + } + + var ( + record cursorTrackingRecord + timestamp int64 + ) + if err := rows.Scan( + &record.Source, + &record.Model, + &record.FileExt, + &record.FileName, + &record.RequestID, + &record.SessionID, + ×tamp, + &record.RowID, + ); err != nil { + continue + } + + record.OccurredAt = clock.Now().UTC() + if timestamp > 0 { + record.OccurredAt = shared.UnixAuto(timestamp) + } + record.OccurredDay = record.OccurredAt.Local().Format("2006-01-02") + records = append(records, record) + } + + return records, rows.Err() +} + +func cursorTrackingTextColumnExpr(columns map[string]bool, name string) string { + if columns[strings.ToLower(strings.TrimSpace(name))] { + return fmt.Sprintf("COALESCE(%s, '')", name) + } + return "''" +} + +func cursorTableColumns(ctx context.Context, db *sql.DB, table string) map[string]bool { + rows, err := db.QueryContext(ctx, fmt.Sprintf(`PRAGMA table_info(%s)`, strings.TrimSpace(table))) + if err != nil { + return nil + } + defer rows.Close() + + columns := make(map[string]bool) + for rows.Next() { + var ( + cid int + name string + dataType string + notNull int + dfltValue sql.NullString + pk int + ) + if rows.Scan(&cid, &name, &dataType, ¬Null, &dfltValue, &pk) != nil { + continue + } + columns[strings.ToLower(strings.TrimSpace(name))] = true + } + return columns +} + +func loadDailyStatsRecords(ctx context.Context, db *sql.DB) ([]cursorDailyStatsRecord, error) { + rows, err := db.QueryContext(ctx, ` + SELECT key, value FROM ItemTable + WHERE key LIKE 'aiCodeTracking.dailyStats.%' + ORDER BY key ASC`) + if err != nil { + return nil, fmt.Errorf("cursor: querying dailyStats: %w", err) + } + defer rows.Close() + + const prefix = "aiCodeTracking.dailyStats.v1.5." + var records []cursorDailyStatsRecord + for rows.Next() { + if ctx.Err() != nil { + return records, ctx.Err() + } + + var key string + var rawJSON string + if err := rows.Scan(&key, &rawJSON); err != nil { + continue + } + + dateStr := strings.TrimPrefix(key, prefix) + if len(dateStr) != 10 { + continue + } + + var stats dailyStats + if json.Unmarshal([]byte(rawJSON), &stats) != nil { + continue + } + if stats.Date == "" { + stats.Date = dateStr + } + + records = append(records, cursorDailyStatsRecord{ + Date: dateStr, + Stats: stats, + Key: key, + }) + } + + return records, rows.Err() +} From c99a533e3e7df13580462b6ad380f54eab877720 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 12:30:46 +0100 Subject: [PATCH 05/32] refactor: extract tui breakdowns and split daemon server --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 10 +- internal/core/usage_breakdowns.go | 148 +++++++++ internal/core/usage_breakdowns_test.go | 68 +++++ internal/daemon/server.go | 286 ------------------ internal/daemon/server_http.go | 178 +++++++++++ internal/daemon/server_logging.go | 36 +++ internal/daemon/server_read_model.go | 99 ++++++ internal/tui/detail.go | 151 +++------ internal/tui/tiles_composition.go | 130 ++------ 9 files changed, 596 insertions(+), 510 deletions(-) create mode 100644 internal/core/usage_breakdowns.go create mode 100644 internal/core/usage_breakdowns_test.go create mode 100644 internal/daemon/server_http.go create mode 100644 internal/daemon/server_logging.go create mode 100644 internal/daemon/server_read_model.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index fdce356..b9cef27 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -32,6 +32,8 @@ This table captures every issue found in this pass. It is broad and high-signal, | R12 | Fixed | Cursor tracking and daily-stats reader duplication | `internal/providers/cursor/tracking_records.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/telemetry.go` | Cursor `ai_code_hashes` rows and `ItemTable` daily-stats envelopes now parse through shared record loaders, including compatibility for older tracking DB schemas with missing columns. Dashboard and telemetry projections now read the same normalized source records. | Keep compatibility coverage for older Cursor schemas. | | R13 | Fixed | Ad hoc daemon log throttling | `internal/core/log_throttle.go`, `internal/daemon/server.go`, `internal/daemon/runtime.go` | Daemon service and dashboard runtime now use a shared throttling helper instead of separate timestamp/mutex patterns for repeated log suppression. | Reuse the same helper if more throttled log sites are added. | | R14 | Fixed | Cursor time-source injection | `internal/core/clock.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/tracking_records.go`, `internal/providers/cursor/telemetry.go` | Cursor provider and its shared SQLite readers now use an injectable clock path instead of direct `time.Now()` calls in the main time-sensitive flow. | Extend the same pattern to other provider/analytics subsystems over time. | +| R15 | Fixed | TUI language and MCP parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/detail.go`, `internal/tui/tiles_composition.go` | Repeated `lang_` and `mcp_` metric-key parsing moved into shared core extractors so both detail and composition views consume the same typed breakdown data instead of re-parsing raw keys independently. | Extend the same pattern to the remaining client/project/tool/provider mix extractors. | +| R16 | Fixed | Daemon server responsibility split | `internal/daemon/server.go`, `internal/daemon/server_logging.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_http.go` | Logging, read-model cache flow, and HTTP handlers now live in dedicated files instead of one monolithic daemon server file. | Continue the same split for polling / collection / retention loops. | ## Action Table @@ -39,11 +41,11 @@ This table captures every issue found in this pass. It is broad and high-signal, | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go:393-584`, `internal/dashboardapp/service.go` | The side effects are now injected, but `Model` still owns a very large amount of event-handling and state-transition logic. | Continue splitting update/action logic into smaller TUI units and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P1 | UI metric-prefix parsing | `internal/tui/tiles_composition.go:302-322`, `internal/tui/tiles_composition.go:913-1527`, `internal/tui/detail.go:371-432`, `internal/tui/analytics.go:663-729` | Rendering code is still parsing raw metric key conventions (`model_`, `usage_client_`, `usage_source_`, `mcp_`, `lang_`) directly. This duplicates interpretation logic across views. | Introduce typed composition DTOs in `internal/core` or `internal/telemetry`; renderers should consume structured sections rather than re-parse maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A3 | P1 | UI metric-prefix parsing | `internal/tui/tiles_composition.go:257-1579`, `internal/tui/analytics.go:663-729`, `internal/core/usage_breakdowns.go` | MCP and language breakdowns now use shared extractors, but client / source / project / provider / model composition logic still parses raw key conventions directly inside the TUI. | Continue promoting those mix extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go:307-2188` | `openrouter.go` mixes auth probing, credits, keys, analytics parsing, generation pagination, provider resolution, metadata enrichment, and output projection in one 2800+ LOC file. | Split into subpackages/files: `api_client`, `analytics`, `generations`, `provider_resolution`, `projection`, `types`. | Easier maintenance, smaller diff surface, faster targeted testing. | | A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | | A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go:160-1757` | `usage_view.go` is simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | -| A7 | P1 | Daemon service monolith | `internal/daemon/server.go:1-1211` | `server.go` owns service startup, socket server, polling, collection, retention, cache refresh, hook handling, and HTTP endpoints. | Split into `service_runtime`, `http_handlers`, `polling`, `collection`, `cache`, and `hook_ingest` files/types. | Lower mental load and easier concurrency review. | +| A7 | P2 | Daemon service monolith | `internal/daemon/server.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The daemon is materially less coupled after the logging/cache/http split, but polling, collection, retention, and spool maintenance still live together in the main server file. | Continue splitting the loop-heavy runtime into `polling`, `collection`, `retention`, and `spool` units. | Lower mental load and easier concurrency review. | | A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Cursor’s main time-sensitive path now uses an injectable clock, but several other providers and analytics helpers still read `time.Now()` directly, often mixing local time and UTC. | Extend the clock abstraction to the remaining provider and analytics subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | @@ -52,8 +54,8 @@ This table captures every issue found in this pass. It is broad and high-signal, ## Suggested Execution Order 1. A2, A3 -2. A6, A7 -3. A4, A5 +2. A6, A5 +3. A4, A7 4. A1, A11 5. A12, A14, A15 diff --git a/internal/core/usage_breakdowns.go b/internal/core/usage_breakdowns.go new file mode 100644 index 0000000..899af6b --- /dev/null +++ b/internal/core/usage_breakdowns.go @@ -0,0 +1,148 @@ +package core + +import ( + "sort" + "strings" +) + +type LanguageUsageEntry struct { + Name string + Requests float64 +} + +type MCPFunctionUsageEntry struct { + RawName string + Calls float64 +} + +type MCPServerUsageEntry struct { + RawName string + Calls float64 + Functions []MCPFunctionUsageEntry +} + +func ExtractLanguageUsage(s UsageSnapshot) ([]LanguageUsageEntry, map[string]bool) { + byLang := make(map[string]float64) + usedKeys := make(map[string]bool) + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "lang_") { + continue + } + name := strings.TrimSpace(strings.TrimPrefix(key, "lang_")) + if name == "" { + continue + } + byLang[name] += *metric.Used + usedKeys[key] = true + } + + if len(byLang) == 0 { + return nil, nil + } + + out := make([]LanguageUsageEntry, 0, len(byLang)) + for name, requests := range byLang { + if requests <= 0 { + continue + } + out = append(out, LanguageUsageEntry{ + Name: name, + Requests: requests, + }) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func ExtractMCPUsage(s UsageSnapshot) ([]MCPServerUsageEntry, map[string]bool) { + usedKeys := make(map[string]bool) + serverMap := make(map[string]*MCPServerUsageEntry) + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "mcp_") { + continue + } + usedKeys[key] = true + if key == "mcp_calls_total" || key == "mcp_calls_total_today" || key == "mcp_servers_active" { + continue + } + if strings.HasSuffix(key, "_today") { + continue + } + + rest := strings.TrimPrefix(key, "mcp_") + if !strings.HasSuffix(rest, "_total") { + continue + } + + rawServerName := strings.TrimSpace(strings.TrimSuffix(rest, "_total")) + if rawServerName == "" { + continue + } + serverMap[rawServerName] = &MCPServerUsageEntry{ + RawName: rawServerName, + Calls: *metric.Used, + } + } + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "mcp_") { + continue + } + if key == "mcp_calls_total" || key == "mcp_calls_total_today" || key == "mcp_servers_active" { + continue + } + if strings.HasSuffix(key, "_today") || strings.HasSuffix(key, "_total") { + continue + } + + rest := strings.TrimPrefix(key, "mcp_") + for rawServerName, server := range serverMap { + prefix := rawServerName + "_" + if !strings.HasPrefix(rest, prefix) { + continue + } + funcName := strings.TrimSpace(strings.TrimPrefix(rest, prefix)) + if funcName == "" { + break + } + server.Functions = append(server.Functions, MCPFunctionUsageEntry{ + RawName: funcName, + Calls: *metric.Used, + }) + break + } + } + + if len(serverMap) == 0 { + return nil, usedKeys + } + + out := make([]MCPServerUsageEntry, 0, len(serverMap)) + for _, server := range serverMap { + if server.Calls <= 0 { + continue + } + sort.Slice(server.Functions, func(i, j int) bool { + if server.Functions[i].Calls != server.Functions[j].Calls { + return server.Functions[i].Calls > server.Functions[j].Calls + } + return server.Functions[i].RawName < server.Functions[j].RawName + }) + out = append(out, *server) + } + + sort.Slice(out, func(i, j int) bool { + if out[i].Calls != out[j].Calls { + return out[i].Calls > out[j].Calls + } + return out[i].RawName < out[j].RawName + }) + return out, usedKeys +} diff --git a/internal/core/usage_breakdowns_test.go b/internal/core/usage_breakdowns_test.go new file mode 100644 index 0000000..c6d121b --- /dev/null +++ b/internal/core/usage_breakdowns_test.go @@ -0,0 +1,68 @@ +package core + +import "testing" + +func TestExtractLanguageUsage(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "lang_go": {Used: Float64Ptr(4)}, + "lang_typescript": {Used: Float64Ptr(2)}, + "lang_go_extra": {Used: nil}, + "requests": {Used: Float64Ptr(10)}, + }, + } + + got, used := ExtractLanguageUsage(snap) + if len(got) != 2 { + t.Fatalf("len(got) = %d, want 2", len(got)) + } + if got[0].Name != "go" || got[0].Requests != 4 { + t.Fatalf("got[0] = %#v, want go/4", got[0]) + } + if got[1].Name != "typescript" || got[1].Requests != 2 { + t.Fatalf("got[1] = %#v, want typescript/2", got[1]) + } + if !used["lang_go"] || !used["lang_typescript"] { + t.Fatalf("used keys missing expected language metrics: %#v", used) + } + if used["requests"] { + t.Fatalf("unexpected non-language metric in used keys: %#v", used) + } +} + +func TestExtractMCPUsage(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "mcp_calls_total": {Used: Float64Ptr(5)}, + "mcp_github_total": {Used: Float64Ptr(3)}, + "mcp_github_list_issues": {Used: Float64Ptr(2)}, + "mcp_github_create_issue": {Used: Float64Ptr(1)}, + "mcp_slack_total": {Used: Float64Ptr(2)}, + "mcp_slack_post_message": {Used: Float64Ptr(2)}, + "mcp_slack_post_message_today": {Used: Float64Ptr(1)}, + }, + } + + got, used := ExtractMCPUsage(snap) + if len(got) != 2 { + t.Fatalf("len(got) = %d, want 2", len(got)) + } + if got[0].RawName != "github" || got[0].Calls != 3 { + t.Fatalf("got[0] = %#v, want github/3", got[0]) + } + if len(got[0].Functions) != 2 { + t.Fatalf("len(got[0].Functions) = %d, want 2", len(got[0].Functions)) + } + if got[0].Functions[0].RawName != "list_issues" || got[0].Functions[0].Calls != 2 { + t.Fatalf("got[0].Functions[0] = %#v, want list_issues/2", got[0].Functions[0]) + } + if got[1].RawName != "slack" || got[1].Calls != 2 { + t.Fatalf("got[1] = %#v, want slack/2", got[1]) + } + if !used["mcp_github_total"] || !used["mcp_slack_post_message"] { + t.Fatalf("used keys missing expected MCP metrics: %#v", used) + } + if !used["mcp_calls_total"] { + t.Fatalf("aggregate MCP key should still be marked used") + } +} diff --git a/internal/daemon/server.go b/internal/daemon/server.go index 77c478a..6f93d01 100644 --- a/internal/daemon/server.go +++ b/internal/daemon/server.go @@ -19,11 +19,9 @@ import ( "github.com/janekbaraniewski/openusage/internal/config" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/janekbaraniewski/openusage/internal/integrations" "github.com/janekbaraniewski/openusage/internal/providers" "github.com/janekbaraniewski/openusage/internal/providers/shared" "github.com/janekbaraniewski/openusage/internal/telemetry" - "github.com/janekbaraniewski/openusage/internal/version" ) type Service struct { @@ -207,129 +205,6 @@ func (s *Service) flushBacklog(ctx context.Context, retryReqs []telemetry.Ingest return flush, enqueued, append(warnings, flushWarnings...) } -// --- Logging --- - -func (s *Service) infof(event, format string, args ...any) { - if s == nil || !s.cfg.Verbose { - return - } - if strings.TrimSpace(format) == "" { - log.Printf("daemon level=info event=%s", event) - return - } - log.Printf("daemon level=info event=%s "+format, append([]any{event}, args...)...) -} - -func (s *Service) warnf(event, format string, args ...any) { - if s == nil || !s.cfg.Verbose { - return - } - if strings.TrimSpace(format) == "" { - log.Printf("daemon level=warn event=%s", event) - return - } - log.Printf("daemon level=warn event=%s "+format, append([]any{event}, args...)...) -} - -func (s *Service) shouldLog(key string, interval time.Duration) bool { - if s == nil { - return false - } - return s.logThrottle.Allow(key, interval, time.Now()) -} - -// --- Read-model cache --- - -func (s *Service) computeReadModel( - ctx context.Context, - req ReadModelRequest, -) (map[string]core.UsageSnapshot, error) { - start := time.Now() - templates := ReadModelTemplatesFromRequest(req, DisabledAccountsFromConfig()) - if len(templates) == 0 { - return map[string]core.UsageSnapshot{}, nil - } - tw := normalizeReadModelTimeWindow(req.TimeWindow) - result, err := telemetry.ApplyCanonicalTelemetryViewWithOptions(ctx, s.cfg.DBPath, templates, telemetry.ReadModelOptions{ - ProviderLinks: req.ProviderLinks, - TimeWindowHours: tw.Hours(), - TimeWindow: tw, - }) - core.Tracef("[read_model_perf] computeReadModel TOTAL: %dms (window=%s, accounts=%d, results=%d)", - time.Since(start).Milliseconds(), tw, len(req.Accounts), len(result)) - return result, err -} - -func (s *Service) refreshReadModelCacheAsync( - parent context.Context, - cacheKey string, - req ReadModelRequest, - timeout time.Duration, -) { - if !s.rmCache.beginRefresh(cacheKey) { - return - } - go func() { - defer s.rmCache.endRefresh(cacheKey) - refreshCtx, cancel := context.WithTimeout(parent, timeout) - defer cancel() - snapshots, err := s.computeReadModel(refreshCtx, req) - if err != nil { - if s.shouldLog("read_model_cache_refresh_error", 8*time.Second) { - s.warnf("read_model_cache_refresh_error", "error=%v", err) - } - return - } - s.rmCache.set(cacheKey, snapshots) - }() -} - -func (s *Service) backgroundContext() context.Context { - if s != nil && s.ctx != nil { - return s.ctx - } - return context.Background() -} - -func (s *Service) runReadModelCacheLoop(ctx context.Context) { - if s == nil { - return - } - - interval := s.cfg.PollInterval / 2 - interval = max(5*time.Second, min(30*time.Second, interval)) - - s.infof("read_model_cache_loop_start", "interval=%s", interval) - s.refreshReadModelCacheFromConfig(ctx) - - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - s.infof("read_model_cache_loop_stop", "reason=context_done") - return - case <-ticker.C: - s.refreshReadModelCacheFromConfig(ctx) - } - } -} - -func (s *Service) refreshReadModelCacheFromConfig(ctx context.Context) { - req, err := BuildReadModelRequestFromConfig() - if err != nil { - if s.shouldLog("read_model_cache_config_error", 15*time.Second) { - s.warnf("read_model_cache_config_error", "error=%v", err) - } - return - } - if len(req.Accounts) == 0 { - return - } - cacheKey := ReadModelRequestKey(req) - s.refreshReadModelCacheAsync(ctx, cacheKey, req, 60*time.Second) -} - // --- Collection loop --- func (s *Service) runCollectLoop(ctx context.Context) { @@ -961,157 +836,6 @@ func EnsureSocketPathAvailable(socketPath string) error { return nil } -func (s *Service) handleHealth(w http.ResponseWriter, _ *http.Request) { - writeJSON(w, http.StatusOK, map[string]any{ - "status": "ok", - "daemon_version": strings.TrimSpace(version.Version), - "api_version": APIVersion, - "integration_version": integrations.IntegrationVersion, - "provider_registry_hash": ProviderRegistryHash(), - }) -} - -func (s *Service) handleHook(w http.ResponseWriter, r *http.Request) { - started := time.Now() - if r.Method != http.MethodPost { - writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed") - return - } - - sourceName := strings.TrimPrefix(strings.TrimSpace(r.URL.Path), "/v1/hook/") - sourceName = strings.TrimSpace(strings.Trim(sourceName, "/")) - if sourceName == "" { - writeJSONError(w, http.StatusBadRequest, "missing hook source") - return - } - source, ok := providers.TelemetrySourceBySystem(sourceName) - if !ok { - writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("unknown hook source %q", sourceName)) - return - } - - payload, err := io.ReadAll(r.Body) - if err != nil { - writeJSONError(w, http.StatusBadRequest, "read payload failed") - return - } - if len(strings.TrimSpace(string(payload))) == 0 { - writeJSONError(w, http.StatusBadRequest, "empty payload") - return - } - - accountID := strings.TrimSpace(r.URL.Query().Get("account_id")) - reqs, err := telemetry.ParseSourceHookPayload(source, payload, source.DefaultCollectOptions(), accountID) - if err != nil { - writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("parse hook payload: %v", err)) - return - } - if len(reqs) == 0 { - writeJSON(w, http.StatusOK, HookResponse{Source: sourceName}) - return - } - - tally, _ := s.ingestBatch(r.Context(), reqs) - var warnings []string - if tally.failed > 0 { - warnings = append(warnings, fmt.Sprintf("%d ingest failures", tally.failed)) - } - - writeJSON(w, http.StatusOK, HookResponse{ - Source: sourceName, - Enqueued: len(reqs), - Processed: tally.processed, - Ingested: tally.ingested, - Deduped: tally.deduped, - Failed: tally.failed, - Warnings: warnings, - }) - - durationMs := time.Since(started).Milliseconds() - logLevel := "hook_ingest" - shouldLog := tally.failed > 0 || s.shouldLog("hook_ingest_"+sourceName, 3*time.Second) - if !shouldLog { - return - } - if tally.failed > 0 { - s.warnf(logLevel, - "source=%s account_id=%q duration_ms=%d enqueued=%d processed=%d ingested=%d deduped=%d failed=%d", - sourceName, accountID, durationMs, - len(reqs), tally.processed, tally.ingested, tally.deduped, tally.failed, - ) - } else { - s.infof(logLevel, - "source=%s account_id=%q duration_ms=%d enqueued=%d processed=%d ingested=%d deduped=%d failed=%d", - sourceName, accountID, durationMs, - len(reqs), tally.processed, tally.ingested, tally.deduped, tally.failed, - ) - } -} - -func (s *Service) handleReadModel(w http.ResponseWriter, r *http.Request) { - started := time.Now() - if r.Method != http.MethodPost { - writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed") - return - } - - var req ReadModelRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("decode read-model request: %v", err)) - return - } - - if len(req.Accounts) == 0 { - configReq, configErr := BuildReadModelRequestFromConfig() - if configErr != nil || len(configReq.Accounts) == 0 { - writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: map[string]core.UsageSnapshot{}}) - return - } - req = configReq - } - - cacheKey := ReadModelRequestKey(req) - if cached, cachedAt, ok := s.rmCache.get(cacheKey); ok { - core.Tracef("[read_model] cache hit key=%s age=%s providers=%d", cacheKey, time.Since(cachedAt).Round(time.Millisecond), len(cached)) - for id, snap := range cached { - core.Tracef("[read_model] %s: %d metrics", id, len(snap.Metrics)) - } - writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: cached}) - if time.Since(cachedAt) > 2*time.Second { - s.refreshReadModelCacheAsync(s.backgroundContext(), cacheKey, req, 60*time.Second) - } - return - } - - computeCtx, cancel := context.WithTimeout(r.Context(), 5*time.Second) - snapshots, err := s.computeReadModel(computeCtx, req) - cancel() - if err == nil && len(snapshots) > 0 { - s.rmCache.set(cacheKey, snapshots) - writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: snapshots}) - return - } - - if err != nil && s.shouldLog("read_model_cache_miss_compute_error", 8*time.Second) { - s.warnf("read_model_cache_miss_compute_error", "error=%v", err) - } - - s.refreshReadModelCacheAsync(s.backgroundContext(), cacheKey, req, 60*time.Second) - snapshots = ReadModelTemplatesFromRequest(req, DisabledAccountsFromConfig()) - writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: snapshots}) - durationMs := time.Since(started).Milliseconds() - if durationMs >= 1200 && s.shouldLog("read_model_slow", 30*time.Second) { - s.infof( - "read_model_slow", - "duration_ms=%d requested_accounts=%d returned_snapshots=%d provider_links=%d", - durationMs, - len(req.Accounts), - len(snapshots), - len(req.ProviderLinks), - ) - } -} - // --- Helpers --- func buildCollectors() []telemetry.Collector { @@ -1172,13 +896,3 @@ func FlushInBatches(ctx context.Context, pipeline *telemetry.Pipeline, maxTotal return accum, warnings } - -func writeJSON(w http.ResponseWriter, status int, payload any) { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(status) - _ = json.NewEncoder(w).Encode(payload) -} - -func writeJSONError(w http.ResponseWriter, status int, message string) { - writeJSON(w, status, map[string]string{"error": message}) -} diff --git a/internal/daemon/server_http.go b/internal/daemon/server_http.go new file mode 100644 index 0000000..82ec5d4 --- /dev/null +++ b/internal/daemon/server_http.go @@ -0,0 +1,178 @@ +package daemon + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/integrations" + "github.com/janekbaraniewski/openusage/internal/providers" + "github.com/janekbaraniewski/openusage/internal/telemetry" + "github.com/janekbaraniewski/openusage/internal/version" +) + +func (s *Service) handleHealth(w http.ResponseWriter, _ *http.Request) { + writeJSON(w, http.StatusOK, map[string]any{ + "status": "ok", + "daemon_version": strings.TrimSpace(version.Version), + "api_version": APIVersion, + "integration_version": integrations.IntegrationVersion, + "provider_registry_hash": ProviderRegistryHash(), + }) +} + +func (s *Service) handleHook(w http.ResponseWriter, r *http.Request) { + started := time.Now() + if r.Method != http.MethodPost { + writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + sourceName := strings.TrimPrefix(strings.TrimSpace(r.URL.Path), "/v1/hook/") + sourceName = strings.TrimSpace(strings.Trim(sourceName, "/")) + if sourceName == "" { + writeJSONError(w, http.StatusBadRequest, "missing hook source") + return + } + source, ok := providers.TelemetrySourceBySystem(sourceName) + if !ok { + writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("unknown hook source %q", sourceName)) + return + } + + payload, err := io.ReadAll(r.Body) + if err != nil { + writeJSONError(w, http.StatusBadRequest, "read payload failed") + return + } + if len(strings.TrimSpace(string(payload))) == 0 { + writeJSONError(w, http.StatusBadRequest, "empty payload") + return + } + + accountID := strings.TrimSpace(r.URL.Query().Get("account_id")) + reqs, err := telemetry.ParseSourceHookPayload(source, payload, source.DefaultCollectOptions(), accountID) + if err != nil { + writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("parse hook payload: %v", err)) + return + } + if len(reqs) == 0 { + writeJSON(w, http.StatusOK, HookResponse{Source: sourceName}) + return + } + + tally, _ := s.ingestBatch(r.Context(), reqs) + var warnings []string + if tally.failed > 0 { + warnings = append(warnings, fmt.Sprintf("%d ingest failures", tally.failed)) + } + + writeJSON(w, http.StatusOK, HookResponse{ + Source: sourceName, + Enqueued: len(reqs), + Processed: tally.processed, + Ingested: tally.ingested, + Deduped: tally.deduped, + Failed: tally.failed, + Warnings: warnings, + }) + + durationMs := time.Since(started).Milliseconds() + logLevel := "hook_ingest" + shouldLog := tally.failed > 0 || s.shouldLog("hook_ingest_"+sourceName, 3*time.Second) + if !shouldLog { + return + } + if tally.failed > 0 { + s.warnf(logLevel, + "source=%s account_id=%q duration_ms=%d enqueued=%d processed=%d ingested=%d deduped=%d failed=%d", + sourceName, accountID, durationMs, + len(reqs), tally.processed, tally.ingested, tally.deduped, tally.failed, + ) + return + } + s.infof(logLevel, + "source=%s account_id=%q duration_ms=%d enqueued=%d processed=%d ingested=%d deduped=%d failed=%d", + sourceName, accountID, durationMs, + len(reqs), tally.processed, tally.ingested, tally.deduped, tally.failed, + ) +} + +func (s *Service) handleReadModel(w http.ResponseWriter, r *http.Request) { + started := time.Now() + if r.Method != http.MethodPost { + writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req ReadModelRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("decode read-model request: %v", err)) + return + } + + if len(req.Accounts) == 0 { + configReq, configErr := BuildReadModelRequestFromConfig() + if configErr != nil || len(configReq.Accounts) == 0 { + writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: map[string]core.UsageSnapshot{}}) + return + } + req = configReq + } + + cacheKey := ReadModelRequestKey(req) + if cached, cachedAt, ok := s.rmCache.get(cacheKey); ok { + core.Tracef("[read_model] cache hit key=%s age=%s providers=%d", cacheKey, time.Since(cachedAt).Round(time.Millisecond), len(cached)) + for id, snap := range cached { + core.Tracef("[read_model] %s: %d metrics", id, len(snap.Metrics)) + } + writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: cached}) + if time.Since(cachedAt) > 2*time.Second { + s.refreshReadModelCacheAsync(s.backgroundContext(), cacheKey, req, 60*time.Second) + } + return + } + + computeCtx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + snapshots, err := s.computeReadModel(computeCtx, req) + cancel() + if err == nil && len(snapshots) > 0 { + s.rmCache.set(cacheKey, snapshots) + writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: snapshots}) + return + } + + if err != nil && s.shouldLog("read_model_cache_miss_compute_error", 8*time.Second) { + s.warnf("read_model_cache_miss_compute_error", "error=%v", err) + } + + s.refreshReadModelCacheAsync(s.backgroundContext(), cacheKey, req, 60*time.Second) + snapshots = ReadModelTemplatesFromRequest(req, DisabledAccountsFromConfig()) + writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: snapshots}) + durationMs := time.Since(started).Milliseconds() + if durationMs >= 1200 && s.shouldLog("read_model_slow", 30*time.Second) { + s.infof( + "read_model_slow", + "duration_ms=%d requested_accounts=%d returned_snapshots=%d provider_links=%d", + durationMs, + len(req.Accounts), + len(snapshots), + len(req.ProviderLinks), + ) + } +} + +func writeJSON(w http.ResponseWriter, status int, payload any) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + _ = json.NewEncoder(w).Encode(payload) +} + +func writeJSONError(w http.ResponseWriter, status int, message string) { + writeJSON(w, status, map[string]string{"error": message}) +} diff --git a/internal/daemon/server_logging.go b/internal/daemon/server_logging.go new file mode 100644 index 0000000..0beacb0 --- /dev/null +++ b/internal/daemon/server_logging.go @@ -0,0 +1,36 @@ +package daemon + +import ( + "log" + "strings" + "time" +) + +func (s *Service) infof(event, format string, args ...any) { + if s == nil || !s.cfg.Verbose { + return + } + if strings.TrimSpace(format) == "" { + log.Printf("daemon level=info event=%s", event) + return + } + log.Printf("daemon level=info event=%s "+format, append([]any{event}, args...)...) +} + +func (s *Service) warnf(event, format string, args ...any) { + if s == nil || !s.cfg.Verbose { + return + } + if strings.TrimSpace(format) == "" { + log.Printf("daemon level=warn event=%s", event) + return + } + log.Printf("daemon level=warn event=%s "+format, append([]any{event}, args...)...) +} + +func (s *Service) shouldLog(key string, interval time.Duration) bool { + if s == nil { + return false + } + return s.logThrottle.Allow(key, interval, time.Now()) +} diff --git a/internal/daemon/server_read_model.go b/internal/daemon/server_read_model.go new file mode 100644 index 0000000..b78da89 --- /dev/null +++ b/internal/daemon/server_read_model.go @@ -0,0 +1,99 @@ +package daemon + +import ( + "context" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/telemetry" +) + +func (s *Service) computeReadModel( + ctx context.Context, + req ReadModelRequest, +) (map[string]core.UsageSnapshot, error) { + start := time.Now() + templates := ReadModelTemplatesFromRequest(req, DisabledAccountsFromConfig()) + if len(templates) == 0 { + return map[string]core.UsageSnapshot{}, nil + } + tw := normalizeReadModelTimeWindow(req.TimeWindow) + result, err := telemetry.ApplyCanonicalTelemetryViewWithOptions(ctx, s.cfg.DBPath, templates, telemetry.ReadModelOptions{ + ProviderLinks: req.ProviderLinks, + TimeWindowHours: tw.Hours(), + TimeWindow: tw, + }) + core.Tracef("[read_model_perf] computeReadModel TOTAL: %dms (window=%s, accounts=%d, results=%d)", + time.Since(start).Milliseconds(), tw, len(req.Accounts), len(result)) + return result, err +} + +func (s *Service) refreshReadModelCacheAsync( + parent context.Context, + cacheKey string, + req ReadModelRequest, + timeout time.Duration, +) { + if !s.rmCache.beginRefresh(cacheKey) { + return + } + go func() { + defer s.rmCache.endRefresh(cacheKey) + refreshCtx, cancel := context.WithTimeout(parent, timeout) + defer cancel() + snapshots, err := s.computeReadModel(refreshCtx, req) + if err != nil { + if s.shouldLog("read_model_cache_refresh_error", 8*time.Second) { + s.warnf("read_model_cache_refresh_error", "error=%v", err) + } + return + } + s.rmCache.set(cacheKey, snapshots) + }() +} + +func (s *Service) backgroundContext() context.Context { + if s != nil && s.ctx != nil { + return s.ctx + } + return context.Background() +} + +func (s *Service) runReadModelCacheLoop(ctx context.Context) { + if s == nil { + return + } + + interval := s.cfg.PollInterval / 2 + interval = max(5*time.Second, min(30*time.Second, interval)) + + s.infof("read_model_cache_loop_start", "interval=%s", interval) + s.refreshReadModelCacheFromConfig(ctx) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + s.infof("read_model_cache_loop_stop", "reason=context_done") + return + case <-ticker.C: + s.refreshReadModelCacheFromConfig(ctx) + } + } +} + +func (s *Service) refreshReadModelCacheFromConfig(ctx context.Context) { + req, err := BuildReadModelRequestFromConfig() + if err != nil { + if s.shouldLog("read_model_cache_config_error", 15*time.Second) { + s.warnf("read_model_cache_config_error", "error=%v", err) + } + return + } + if len(req.Accounts) == 0 { + return + } + cacheKey := ReadModelRequestKey(req) + s.refreshReadModelCacheAsync(ctx, cacheKey, req, 60*time.Second) +} diff --git a/internal/tui/detail.go b/internal/tui/detail.go index 883133f..5ffd77f 100644 --- a/internal/tui/detail.go +++ b/internal/tui/detail.go @@ -883,37 +883,19 @@ func hasChartableSeries(series map[string][]core.TimePoint) bool { // hasLanguageMetrics checks if the snapshot contains lang_ metric keys. func hasLanguageMetrics(snap core.UsageSnapshot) bool { - for key := range snap.Metrics { - if strings.HasPrefix(key, "lang_") { - return true - } - } - return false + langs, _ := core.ExtractLanguageUsage(snap) + return len(langs) > 0 } func renderLanguagesSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { - type langEntry struct { - name string - count float64 - } - - var langs []langEntry - for key, m := range snap.Metrics { - if !strings.HasPrefix(key, "lang_") || m.Used == nil { - continue - } - name := strings.TrimPrefix(key, "lang_") - langs = append(langs, langEntry{name: name, count: *m.Used}) - } - sort.Slice(langs, func(i, j int) bool { return langs[i].count > langs[j].count }) - + langs, _ := core.ExtractLanguageUsage(snap) if len(langs) == 0 { return } total := float64(0) for _, l := range langs { - total += l.count + total += l.Requests } if total <= 0 { return @@ -927,9 +909,9 @@ func renderLanguagesSection(sb *strings.Builder, snap core.UsageSnapshot, w int) var items []chartItem for _, l := range langs { items = append(items, chartItem{ - Label: l.name, - Value: l.count, - Color: stableModelColor("lang:"+l.name, "languages"), + Label: l.Name, + Value: l.Requests, + Color: stableModelColor("lang:"+l.Name, "languages"), }) } @@ -968,8 +950,8 @@ func renderLanguagesSection(sb *strings.Builder, snap core.UsageSnapshot, w int) bar, track, pctStr, countStr)) } - if len(snap.Metrics) > maxShow { - remaining := len(snap.Metrics) - maxShow + if len(langs) > maxShow { + remaining := len(langs) - maxShow if remaining > 0 { sb.WriteString(" " + dimStyle.Render(fmt.Sprintf("+ %d more languages", remaining)) + "\n") } @@ -978,102 +960,45 @@ func renderLanguagesSection(sb *strings.Builder, snap core.UsageSnapshot, w int) // hasMCPMetrics checks if the snapshot contains any MCP metric keys. func hasMCPMetrics(snap core.UsageSnapshot) bool { - for key := range snap.Metrics { - if strings.HasPrefix(key, "mcp_") { - return true - } - } - return false + servers, _ := core.ExtractMCPUsage(snap) + return len(servers) > 0 } // renderMCPSection renders MCP server and function call metrics. // Uses prettifyMCPServerName/prettifyMCPFunctionName from tiles.go (same package). func renderMCPSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { - type mcpFunc struct { + rawServers, _ := core.ExtractMCPUsage(snap) + servers := make([]struct { name string calls float64 - } - type mcpServer struct { - rawName string - name string - calls float64 - funcs []mcpFunc - } - - // Collect server totals from metrics. - serverMap := make(map[string]*mcpServer) - for key, m := range snap.Metrics { - if !strings.HasPrefix(key, "mcp_") || m.Used == nil { - continue - } - if key == "mcp_calls_total" || key == "mcp_calls_total_today" || key == "mcp_servers_active" { - continue - } - if strings.HasSuffix(key, "_today") { - continue - } - - rest := strings.TrimPrefix(key, "mcp_") - - if strings.HasSuffix(rest, "_total") { - rawServerName := strings.TrimSuffix(rest, "_total") - if rawServerName == "" { - continue - } - serverMap[rawServerName] = &mcpServer{ - rawName: rawServerName, - name: prettifyMCPServerName(rawServerName), - calls: *m.Used, - } - } - } - - // Second pass: collect functions for each known server. - for key, m := range snap.Metrics { - if !strings.HasPrefix(key, "mcp_") || m.Used == nil { - continue - } - if key == "mcp_calls_total" || key == "mcp_calls_total_today" || key == "mcp_servers_active" { - continue - } - if strings.HasSuffix(key, "_today") || strings.HasSuffix(key, "_total") { - continue - } - - rest := strings.TrimPrefix(key, "mcp_") - for rawServerName, srv := range serverMap { - prefix := rawServerName + "_" - if strings.HasPrefix(rest, prefix) { - funcName := strings.TrimPrefix(rest, prefix) - if funcName != "" { - srv.funcs = append(srv.funcs, mcpFunc{ - name: prettifyMCPFunctionName(funcName), - calls: *m.Used, - }) - } - break + funcs []struct { + name string + calls float64 + } + }, 0, len(rawServers)) + for _, rawServer := range rawServers { + server := struct { + name string + calls float64 + funcs []struct { + name string + calls float64 } + }{ + name: prettifyMCPServerName(rawServer.RawName), + calls: rawServer.Calls, + } + for _, rawFunc := range rawServer.Functions { + server.funcs = append(server.funcs, struct { + name string + calls float64 + }{ + name: prettifyMCPFunctionName(rawFunc.RawName), + calls: rawFunc.Calls, + }) } + servers = append(servers, server) } - - // Sort servers by calls desc. - servers := make([]*mcpServer, 0, len(serverMap)) - for _, srv := range serverMap { - sort.Slice(srv.funcs, func(i, j int) bool { - if srv.funcs[i].calls != srv.funcs[j].calls { - return srv.funcs[i].calls > srv.funcs[j].calls - } - return srv.funcs[i].name < srv.funcs[j].name - }) - servers = append(servers, srv) - } - sort.Slice(servers, func(i, j int) bool { - if servers[i].calls != servers[j].calls { - return servers[i].calls > servers[j].calls - } - return servers[i].name < servers[j].name - }) - if len(servers) == 0 { return } diff --git a/internal/tui/tiles_composition.go b/internal/tui/tiles_composition.go index ed424d6..e57a7dd 100644 --- a/internal/tui/tiles_composition.go +++ b/internal/tui/tiles_composition.go @@ -2215,36 +2215,14 @@ func buildProviderLanguageCompositionLines(snap core.UsageSnapshot, innerW int, } func collectProviderLanguageMix(snap core.UsageSnapshot) ([]toolMixEntry, map[string]bool) { - byLang := make(map[string]float64) - usedKeys := make(map[string]bool) - - for key, met := range snap.Metrics { - if met.Used == nil || !strings.HasPrefix(key, "lang_") { - continue - } - name := strings.TrimPrefix(key, "lang_") - if name == "" { - continue - } - byLang[name] = *met.Used - usedKeys[key] = true + languageUsage, usedKeys := core.ExtractLanguageUsage(snap) + if len(languageUsage) == 0 { + return nil, usedKeys } - - langs := make([]toolMixEntry, 0, len(byLang)) - for name, count := range byLang { - if count <= 0 { - continue - } - langs = append(langs, toolMixEntry{name: name, count: count}) + langs := make([]toolMixEntry, 0, len(languageUsage)) + for _, language := range languageUsage { + langs = append(langs, toolMixEntry{name: language.Name, count: language.Requests}) } - - sort.Slice(langs, func(i, j int) bool { - if langs[i].count == langs[j].count { - return langs[i].name < langs[j].name - } - return langs[i].count > langs[j].count - }) - return langs, usedKeys } @@ -2500,96 +2478,34 @@ func isMCPToolMetricName(name string) bool { } func buildMCPUsageLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { - usedKeys := make(map[string]bool) - type funcEntry struct { name string calls float64 } type serverEntry struct { - rawName string - name string - calls float64 - funcs []funcEntry - } - - // First pass: collect server totals. - serverMap := make(map[string]*serverEntry) - for key, m := range snap.Metrics { - if !strings.HasPrefix(key, "mcp_") || m.Used == nil { - continue - } - usedKeys[key] = true - - if key == "mcp_calls_total" || key == "mcp_calls_total_today" || key == "mcp_servers_active" { - continue - } - if strings.HasSuffix(key, "_today") { - continue - } - - rest := strings.TrimPrefix(key, "mcp_") - if !strings.HasSuffix(rest, "_total") { - continue - } - rawServerName := strings.TrimSuffix(rest, "_total") - if rawServerName == "" { - continue - } - serverMap[rawServerName] = &serverEntry{ - rawName: rawServerName, - name: prettifyMCPServerName(rawServerName), - calls: *m.Used, - } + name string + calls float64 + funcs []funcEntry } - // Second pass: collect functions for each known server. - for key, m := range snap.Metrics { - if !strings.HasPrefix(key, "mcp_") || m.Used == nil { - continue - } - if key == "mcp_calls_total" || key == "mcp_calls_total_today" || key == "mcp_servers_active" { - continue + rawServers, usedKeys := core.ExtractMCPUsage(snap) + servers := make([]serverEntry, 0, len(rawServers)) + var totalCalls float64 + for _, rawServer := range rawServers { + server := serverEntry{ + name: prettifyMCPServerName(rawServer.RawName), + calls: rawServer.Calls, } - if strings.HasSuffix(key, "_today") || strings.HasSuffix(key, "_total") { - continue - } - rest := strings.TrimPrefix(key, "mcp_") - for rawServerName, srv := range serverMap { - prefix := rawServerName + "_" - if strings.HasPrefix(rest, prefix) { - funcName := strings.TrimPrefix(rest, prefix) - if funcName != "" { - srv.funcs = append(srv.funcs, funcEntry{ - name: prettifyMCPFunctionName(funcName), - calls: *m.Used, - }) - } - break - } + for _, rawFunc := range rawServer.Functions { + server.funcs = append(server.funcs, funcEntry{ + name: prettifyMCPFunctionName(rawFunc.RawName), + calls: rawFunc.Calls, + }) } + servers = append(servers, server) + totalCalls += server.calls } - // Sort servers and their functions. - var servers []*serverEntry - var totalCalls float64 - for _, srv := range serverMap { - sort.Slice(srv.funcs, func(i, j int) bool { - if srv.funcs[i].calls != srv.funcs[j].calls { - return srv.funcs[i].calls > srv.funcs[j].calls - } - return srv.funcs[i].name < srv.funcs[j].name - }) - servers = append(servers, srv) - totalCalls += srv.calls - } - sort.Slice(servers, func(i, j int) bool { - if servers[i].calls != servers[j].calls { - return servers[i].calls > servers[j].calls - } - return servers[i].name < servers[j].name - }) - if len(servers) == 0 || totalCalls <= 0 { return nil, usedKeys } From dd9e56d26b0de12b9b14af3ac4dcd47be603a2ca Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 12:41:55 +0100 Subject: [PATCH 06/32] refactor: continue daemon and telemetry decomposition --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 9 +- internal/core/usage_breakdowns.go | 130 +++++ internal/core/usage_breakdowns_test.go | 33 ++ internal/daemon/server.go | 546 ------------------ internal/daemon/server_loops.go | 534 +++++++++++++++++ internal/telemetry/usage_view.go | 218 ------- internal/telemetry/usage_view_helpers.go | 198 +++++++ internal/tui/tiles_composition.go | 88 +-- 8 files changed, 912 insertions(+), 844 deletions(-) create mode 100644 internal/daemon/server_loops.go create mode 100644 internal/telemetry/usage_view_helpers.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index b9cef27..f30b7a6 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -34,6 +34,9 @@ This table captures every issue found in this pass. It is broad and high-signal, | R14 | Fixed | Cursor time-source injection | `internal/core/clock.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/tracking_records.go`, `internal/providers/cursor/telemetry.go` | Cursor provider and its shared SQLite readers now use an injectable clock path instead of direct `time.Now()` calls in the main time-sensitive flow. | Extend the same pattern to other provider/analytics subsystems over time. | | R15 | Fixed | TUI language and MCP parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/detail.go`, `internal/tui/tiles_composition.go` | Repeated `lang_` and `mcp_` metric-key parsing moved into shared core extractors so both detail and composition views consume the same typed breakdown data instead of re-parsing raw keys independently. | Extend the same pattern to the remaining client/project/tool/provider mix extractors. | | R16 | Fixed | Daemon server responsibility split | `internal/daemon/server.go`, `internal/daemon/server_logging.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_http.go` | Logging, read-model cache flow, and HTTP handlers now live in dedicated files instead of one monolithic daemon server file. | Continue the same split for polling / collection / retention loops. | +| R17 | Fixed | TUI project parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/tiles_composition.go` | Provider project mix extraction now uses one shared core extractor instead of duplicating project metric and daily-series parsing inside the TUI composition layer. | Continue the same extraction pattern for client / source / provider mix breakdowns. | +| R18 | Fixed | Telemetry MCP/helper split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go` | MCP parsing, metric sanitizing, and generic map-prefix helper logic moved out of the main usage-view file into a dedicated helper unit. | Continue splitting query / aggregation / projection responsibilities. | +| R19 | Fixed | Daemon loop decomposition | `internal/daemon/server.go`, `internal/daemon/server_loops.go` | Collection, spool, hook-spool, retention, and poll loops no longer live inline in the main daemon server file. | Continue splitting by loop family if the new file grows too large. | ## Action Table @@ -41,11 +44,11 @@ This table captures every issue found in this pass. It is broad and high-signal, | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go:393-584`, `internal/dashboardapp/service.go` | The side effects are now injected, but `Model` still owns a very large amount of event-handling and state-transition logic. | Continue splitting update/action logic into smaller TUI units and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P1 | UI metric-prefix parsing | `internal/tui/tiles_composition.go:257-1579`, `internal/tui/analytics.go:663-729`, `internal/core/usage_breakdowns.go` | MCP and language breakdowns now use shared extractors, but client / source / project / provider / model composition logic still parses raw key conventions directly inside the TUI. | Continue promoting those mix extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A3 | P1 | UI metric-prefix parsing | `internal/tui/tiles_composition.go:257-1579`, `internal/tui/analytics.go:663-729`, `internal/core/usage_breakdowns.go` | MCP, language, and project breakdowns now use shared extractors, but client / source / provider / model composition logic still parses raw key conventions directly inside the TUI. | Continue promoting those mix extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go:307-2188` | `openrouter.go` mixes auth probing, credits, keys, analytics parsing, generation pagination, provider resolution, metadata enrichment, and output projection in one 2800+ LOC file. | Split into subpackages/files: `api_client`, `analytics`, `generations`, `provider_resolution`, `projection`, `types`. | Easier maintenance, smaller diff surface, faster targeted testing. | | A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | -| A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go:160-1757` | `usage_view.go` is simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | -| A7 | P2 | Daemon service monolith | `internal/daemon/server.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The daemon is materially less coupled after the logging/cache/http split, but polling, collection, retention, and spool maintenance still live together in the main server file. | Continue splitting the loop-heavy runtime into `polling`, `collection`, `retention`, and `spool` units. | Lower mental load and easier concurrency review. | +| A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go` | `usage_view.go` is smaller now, but it is still simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | +| A7 | P2 | Daemon service monolith | `internal/daemon/server.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_loops.go` | The daemon is materially less coupled after the logging/cache/http/loop split, but polling, collection, retention, and spool maintenance still share one large runtime helper unit. | Continue splitting the loop-heavy runtime into `polling`, `collection`, `retention`, and `spool` units. | Lower mental load and easier concurrency review. | | A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Cursor’s main time-sensitive path now uses an injectable clock, but several other providers and analytics helpers still read `time.Now()` directly, often mixing local time and UTC. | Extend the clock abstraction to the remaining provider and analytics subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | diff --git a/internal/core/usage_breakdowns.go b/internal/core/usage_breakdowns.go index 899af6b..6bb7565 100644 --- a/internal/core/usage_breakdowns.go +++ b/internal/core/usage_breakdowns.go @@ -21,6 +21,13 @@ type MCPServerUsageEntry struct { Functions []MCPFunctionUsageEntry } +type ProjectUsageEntry struct { + Name string + Requests float64 + Requests1d float64 + Series []TimePoint +} + func ExtractLanguageUsage(s UsageSnapshot) ([]LanguageUsageEntry, map[string]bool) { byLang := make(map[string]float64) usedKeys := make(map[string]bool) @@ -146,3 +153,126 @@ func ExtractMCPUsage(s UsageSnapshot) ([]MCPServerUsageEntry, map[string]bool) { }) return out, usedKeys } + +func ExtractProjectUsage(s UsageSnapshot) ([]ProjectUsageEntry, map[string]bool) { + byProject := make(map[string]*ProjectUsageEntry) + usedKeys := make(map[string]bool) + seriesByProject := make(map[string]map[string]float64) + + ensure := func(name string) *ProjectUsageEntry { + if _, ok := byProject[name]; !ok { + byProject[name] = &ProjectUsageEntry{Name: name} + } + return byProject[name] + } + + for key, metric := range s.Metrics { + if metric.Used == nil { + continue + } + name, field, ok := parseProjectMetricKey(key) + if !ok { + continue + } + project := ensure(name) + switch field { + case "requests": + project.Requests = *metric.Used + case "requests_today": + project.Requests1d = *metric.Used + } + usedKeys[key] = true + } + + for key, points := range s.DailySeries { + if !strings.HasPrefix(key, "usage_project_") { + continue + } + name := strings.TrimSpace(strings.TrimPrefix(key, "usage_project_")) + if name == "" || len(points) == 0 { + continue + } + mergeBreakdownSeriesByDay(seriesByProject, name, points) + } + + for name, pointsByDay := range seriesByProject { + project := ensure(name) + project.Series = breakdownSortedSeries(pointsByDay) + if project.Requests <= 0 { + project.Requests = sumBreakdownSeries(project.Series) + } + } + + out := make([]ProjectUsageEntry, 0, len(byProject)) + for _, project := range byProject { + if project.Requests <= 0 && len(project.Series) == 0 { + continue + } + out = append(out, *project) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func parseProjectMetricKey(key string) (name, field string, ok bool) { + const prefix = "project_" + if !strings.HasPrefix(key, prefix) { + return "", "", false + } + rest := strings.TrimPrefix(key, prefix) + if strings.HasSuffix(rest, "_requests_today") { + return strings.TrimSuffix(rest, "_requests_today"), "requests_today", true + } + if strings.HasSuffix(rest, "_requests") { + return strings.TrimSuffix(rest, "_requests"), "requests", true + } + return "", "", false +} + +func mergeBreakdownSeriesByDay(seriesByName map[string]map[string]float64, name string, points []TimePoint) { + if name == "" || len(points) == 0 { + return + } + if seriesByName[name] == nil { + seriesByName[name] = make(map[string]float64) + } + for _, point := range points { + if point.Date == "" { + continue + } + seriesByName[name][point.Date] += point.Value + } +} + +func breakdownSortedSeries(pointsByDay map[string]float64) []TimePoint { + if len(pointsByDay) == 0 { + return nil + } + days := make([]string, 0, len(pointsByDay)) + for day := range pointsByDay { + days = append(days, day) + } + sort.Strings(days) + + points := make([]TimePoint, 0, len(days)) + for _, day := range days { + points = append(points, TimePoint{ + Date: day, + Value: pointsByDay[day], + }) + } + return points +} + +func sumBreakdownSeries(points []TimePoint) float64 { + total := 0.0 + for _, point := range points { + total += point.Value + } + return total +} diff --git a/internal/core/usage_breakdowns_test.go b/internal/core/usage_breakdowns_test.go index c6d121b..6508e36 100644 --- a/internal/core/usage_breakdowns_test.go +++ b/internal/core/usage_breakdowns_test.go @@ -66,3 +66,36 @@ func TestExtractMCPUsage(t *testing.T) { t.Fatalf("aggregate MCP key should still be marked used") } } + +func TestExtractProjectUsage(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "project_alpha_requests": {Used: Float64Ptr(5)}, + "project_alpha_requests_today": {Used: Float64Ptr(2)}, + "project_beta_requests": {Used: Float64Ptr(3)}, + }, + DailySeries: map[string][]TimePoint{ + "usage_project_alpha": { + {Date: "2026-03-08", Value: 2}, + {Date: "2026-03-09", Value: 3}, + }, + }, + } + + got, used := ExtractProjectUsage(snap) + if len(got) != 2 { + t.Fatalf("len(got) = %d, want 2", len(got)) + } + if got[0].Name != "alpha" || got[0].Requests != 5 || got[0].Requests1d != 2 { + t.Fatalf("got[0] = %#v, want alpha/5/2", got[0]) + } + if len(got[0].Series) != 2 { + t.Fatalf("len(got[0].Series) = %d, want 2", len(got[0].Series)) + } + if got[1].Name != "beta" || got[1].Requests != 3 { + t.Fatalf("got[1] = %#v, want beta/3", got[1]) + } + if !used["project_alpha_requests"] || !used["project_beta_requests"] { + t.Fatalf("used keys missing project metrics: %#v", used) + } +} diff --git a/internal/daemon/server.go b/internal/daemon/server.go index 6f93d01..13cb94e 100644 --- a/internal/daemon/server.go +++ b/internal/daemon/server.go @@ -2,7 +2,6 @@ package daemon import ( "context" - "encoding/json" "errors" "fmt" "io" @@ -17,7 +16,6 @@ import ( "syscall" "time" - "github.com/janekbaraniewski/openusage/internal/config" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers" "github.com/janekbaraniewski/openusage/internal/providers/shared" @@ -205,550 +203,6 @@ func (s *Service) flushBacklog(ctx context.Context, retryReqs []telemetry.Ingest return flush, enqueued, append(warnings, flushWarnings...) } -// --- Collection loop --- - -func (s *Service) runCollectLoop(ctx context.Context) { - ticker := time.NewTicker(s.cfg.CollectInterval) - defer ticker.Stop() - - s.infof("collect_loop_start", "interval=%s", s.cfg.CollectInterval) - s.collectAndFlush(ctx) - for { - select { - case <-ctx.Done(): - s.infof("collect_loop_stop", "reason=context_done") - return - case <-ticker.C: - s.collectAndFlush(ctx) - } - } -} - -func (s *Service) runSpoolMaintenanceLoop(ctx context.Context) { - if s == nil { - return - } - flushTicker := time.NewTicker(5 * time.Second) - cleanupTicker := time.NewTicker(60 * time.Second) - defer flushTicker.Stop() - defer cleanupTicker.Stop() - - s.infof("spool_loop_start", "flush_interval=%s cleanup_interval=%s", 5*time.Second, 60*time.Second) - s.flushSpoolBacklog(ctx, 10000) - s.cleanupSpool() - - for { - select { - case <-ctx.Done(): - s.infof("spool_loop_stop", "reason=context_done") - return - case <-flushTicker.C: - s.flushSpoolBacklog(ctx, 10000) - case <-cleanupTicker.C: - s.cleanupSpool() - } - } -} - -func (s *Service) flushSpoolBacklog(ctx context.Context, maxTotal int) { - if s == nil || s.pipeline == nil { - return - } - - s.spoolMu.Lock() - flush, warnings := FlushInBatches(ctx, s.pipeline, maxTotal) - s.spoolMu.Unlock() - - if flush.Processed > 0 || flush.Failed > 0 || len(warnings) > 0 { - s.infof( - "spool_flush", - "processed=%d ingested=%d deduped=%d failed=%d warnings=%d", - flush.Processed, flush.Ingested, flush.Deduped, flush.Failed, len(warnings), - ) - for _, w := range warnings { - s.warnf("spool_flush_warning", "message=%q", w) - } - } -} - -func (s *Service) cleanupSpool() { - if s == nil || strings.TrimSpace(s.cfg.SpoolDir) == "" { - return - } - - policy := telemetry.SpoolCleanupPolicy{ - MaxAge: 96 * time.Hour, - MaxFiles: 25000, - MaxBytes: 768 << 20, // 768 MB - } - - s.spoolMu.Lock() - result, err := telemetry.NewSpool(s.cfg.SpoolDir).Cleanup(policy) - s.spoolMu.Unlock() - if err != nil { - if s.shouldLog("spool_cleanup_error", 20*time.Second) { - s.warnf("spool_cleanup_error", "error=%v", err) - } - return - } - if result.RemovedFiles > 0 { - s.infof( - "spool_cleanup", - "removed_files=%d removed_bytes=%d remaining_files=%d remaining_bytes=%d", - result.RemovedFiles, - result.RemovedBytes, - result.RemainingFiles, - result.RemainingBytes, - ) - return - } - if s.shouldLog("spool_cleanup_steady", 30*time.Minute) { - s.infof( - "spool_cleanup_steady", - "remaining_files=%d remaining_bytes=%d", - result.RemainingFiles, - result.RemainingBytes, - ) - } -} - -// runHookSpoolLoop processes raw hook payloads written to disk by the -// shell hook when the daemon socket was unreachable. Files live in -// the hook-spool directory (sibling of the main spool) and contain a -// single JSON object: {"source":"…","account_id":"…","payload":}. -func (s *Service) runHookSpoolLoop(ctx context.Context) { - if s == nil { - return - } - hookSpoolDir, err := telemetry.DefaultHookSpoolDir() - if err != nil { - s.warnf("hook_spool_loop", "resolve dir error=%v", err) - return - } - - processInterval := 5 * time.Second - cleanupInterval := 5 * time.Minute - processTicker := time.NewTicker(processInterval) - cleanupTicker := time.NewTicker(cleanupInterval) - defer processTicker.Stop() - defer cleanupTicker.Stop() - - s.infof( - "hook_spool_loop_start", - "dir=%s process_interval=%s cleanup_interval=%s", - hookSpoolDir, - processInterval, - cleanupInterval, - ) - s.processHookSpool(ctx, hookSpoolDir) - s.cleanupHookSpool(hookSpoolDir) - - for { - select { - case <-ctx.Done(): - s.infof("hook_spool_loop_stop", "reason=context_done") - return - case <-processTicker.C: - s.processHookSpool(ctx, hookSpoolDir) - case <-cleanupTicker.C: - s.cleanupHookSpool(hookSpoolDir) - } - } -} - -type rawHookFile struct { - Source string `json:"source"` - AccountID string `json:"account_id"` - Payload json.RawMessage `json:"payload"` -} - -const hookSpoolBatchLimit = 200 - -func (s *Service) processHookSpool(ctx context.Context, dir string) { - files, err := filepath.Glob(filepath.Join(dir, "*.json")) - if err != nil || len(files) == 0 { - return - } - - processed := 0 - for _, path := range files { - if processed >= hookSpoolBatchLimit { - break - } - if ctx.Err() != nil { - return - } - - data, readErr := os.ReadFile(path) - if readErr != nil { - _ = os.Remove(path) // unreadable — discard - processed++ - continue - } - - var raw rawHookFile - if json.Unmarshal(data, &raw) != nil || len(raw.Payload) == 0 { - _ = os.Remove(path) // malformed — discard - processed++ - continue - } - - source, ok := providers.TelemetrySourceBySystem(raw.Source) - if !ok { - _ = os.Remove(path) // unknown source — discard - processed++ - continue - } - - reqs, parseErr := telemetry.ParseSourceHookPayload( - source, raw.Payload, - source.DefaultCollectOptions(), - strings.TrimSpace(raw.AccountID), - ) - if parseErr != nil || len(reqs) == 0 { - _ = os.Remove(path) - processed++ - continue - } - - tally, _ := s.ingestBatch(ctx, reqs) - _ = os.Remove(path) - processed++ - - s.infof("hook_spool_ingest", - "file=%s source=%s processed=%d ingested=%d deduped=%d failed=%d", - filepath.Base(path), raw.Source, - tally.processed, tally.ingested, tally.deduped, tally.failed, - ) - } -} - -// cleanupHookSpool removes stale or excess files from the hook spool -// directory. Files older than 24h are removed unconditionally; the -// directory is capped at 500 files. -func (s *Service) cleanupHookSpool(dir string) { - files, err := filepath.Glob(filepath.Join(dir, "*.json")) - if err != nil || len(files) == 0 { - // also clean leftover .tmp files - tmps, _ := filepath.Glob(filepath.Join(dir, "*.json.tmp")) - for _, t := range tmps { - _ = os.Remove(t) - } - return - } - - now := time.Now() - removed := 0 - remaining := make([]string, 0, len(files)) - for _, path := range files { - info, statErr := os.Stat(path) - if statErr != nil { - _ = os.Remove(path) - removed++ - continue - } - if now.Sub(info.ModTime()) > 24*time.Hour { - _ = os.Remove(path) - removed++ - continue - } - remaining = append(remaining, path) - } - - // hard cap - if len(remaining) > 500 { - for _, path := range remaining[:len(remaining)-500] { - _ = os.Remove(path) - removed++ - } - remaining = remaining[len(remaining)-500:] - } - - // clean .tmp files - tmps, _ := filepath.Glob(filepath.Join(dir, "*.json.tmp")) - for _, t := range tmps { - _ = os.Remove(t) - removed++ - } - - if removed > 0 { - s.infof("hook_spool_cleanup", "removed=%d remaining=%d", removed, len(remaining)) - } -} - -func (s *Service) collectAndFlush(ctx context.Context) { - if s == nil { - return - } - started := time.Now() - const backlogFlushLimit = 2000 - - var allReqs []telemetry.IngestRequest - totalCollected := 0 - var warnings []string - - for _, collector := range s.collectors { - reqs, err := collector.Collect(ctx) - if err != nil { - warnings = append(warnings, fmt.Sprintf("%s: %v", collector.Name(), err)) - continue - } - totalCollected += len(reqs) - allReqs = append(allReqs, reqs...) - } - - direct, retries := s.ingestBatch(ctx, allReqs) - flush, enqueued, flushWarnings := s.flushBacklog(ctx, retries, backlogFlushLimit) - warnings = append(warnings, flushWarnings...) - - durationMs := time.Since(started).Milliseconds() - if totalCollected > 0 || direct.processed > 0 || enqueued > 0 || flush.Processed > 0 || len(warnings) > 0 { - s.infof( - "collect_cycle", - "duration_ms=%d collected=%d direct_processed=%d direct_ingested=%d direct_deduped=%d direct_failed=%d enqueued=%d flush_processed=%d flush_ingested=%d flush_deduped=%d flush_failed=%d warnings=%d", - durationMs, totalCollected, - direct.processed, direct.ingested, direct.deduped, direct.failed, - enqueued, flush.Processed, flush.Ingested, flush.Deduped, flush.Failed, - len(warnings), - ) - for _, w := range warnings { - s.warnf("collect_warning", "message=%q", w) - } - s.pruneTelemetryOrphans(ctx) - return - } - - if durationMs >= 1500 && s.shouldLog("collect_slow", 30*time.Second) { - s.infof("collect_idle_slow", "duration_ms=%d", durationMs) - } - - // Keep raw telemetry storage bounded by pruning orphan raw rows created by - // historical dedup paths and intermittent duplicate ingestion races. - s.pruneTelemetryOrphans(ctx) -} - -func (s *Service) pruneTelemetryOrphans(ctx context.Context) { - if s == nil || s.store == nil { - return - } - if !s.shouldLog("prune_orphan_raw_events_tick", 45*time.Second) { - return - } - - const pruneBatchSize = 10000 - pruneCtx, cancel := context.WithTimeout(ctx, 4*time.Second) - defer cancel() - - removed, err := s.store.PruneOrphanRawEvents(pruneCtx, pruneBatchSize) - if err != nil { - if s.shouldLog("prune_orphan_raw_events_error", 20*time.Second) { - s.warnf("prune_orphan_raw_events_error", "error=%v", err) - } - return - } - if removed > 0 { - s.infof("prune_orphan_raw_events", "removed=%d batch_size=%d", removed, pruneBatchSize) - } - - // Opportunistically prune raw payloads older than 1 hour during each - // orphan cleanup cycle. This keeps the DB compact without waiting for - // the 6-hour retention loop. - payloadCtx, payloadCancel := context.WithTimeout(ctx, 4*time.Second) - defer payloadCancel() - pruned, pruneErr := s.store.PruneRawEventPayloads(payloadCtx, 1, pruneBatchSize) - if pruneErr == nil && pruned > 0 { - s.infof("prune_raw_payloads", "pruned=%d", pruned) - } -} - -// --- Retention loop --- - -func (s *Service) runRetentionLoop(ctx context.Context) { - s.pruneOldData(ctx) - ticker := time.NewTicker(6 * time.Hour) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - s.infof("retention_loop_stop", "reason=context_done") - return - case <-ticker.C: - s.pruneOldData(ctx) - } - } -} - -func (s *Service) pruneOldData(ctx context.Context) { - if s == nil || s.store == nil { - return - } - cfg, err := config.Load() - if err != nil { - if s.shouldLog("retention_config_error", 30*time.Second) { - s.warnf("retention_config_error", "error=%v", err) - } - return - } - retentionDays := cfg.Data.RetentionDays - if retentionDays <= 0 { - retentionDays = 30 - } - - pruneCtx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - deleted, err := s.store.PruneOldEvents(pruneCtx, retentionDays) - if err != nil { - if s.shouldLog("retention_prune_error", 30*time.Second) { - s.warnf("retention_prune_error", "error=%v", err) - } - return - } - if deleted > 0 { - s.infof("retention_prune", "deleted=%d retention_days=%d", deleted, retentionDays) - // Clean up orphaned raw events after pruning - orphanCtx, orphanCancel := context.WithTimeout(ctx, 10*time.Second) - defer orphanCancel() - orphaned, orphanErr := s.store.PruneOrphanRawEvents(orphanCtx, 50000) - if orphanErr != nil { - s.warnf("retention_orphan_prune_error", "error=%v", orphanErr) - } else if orphaned > 0 { - s.infof("retention_orphan_prune", "removed=%d", orphaned) - } - } - - // Payload pruning is handled by pruneTelemetryOrphans (runs every ~45s). -} - -// --- Poll loop --- - -func (s *Service) runPollLoop(ctx context.Context) { - ticker := time.NewTicker(s.cfg.PollInterval) - defer ticker.Stop() - - s.infof("poll_loop_start", "interval=%s", s.cfg.PollInterval) - s.pollProviders(ctx) - for { - select { - case <-ctx.Done(): - s.infof("poll_loop_stop", "reason=context_done") - return - case <-ticker.C: - s.pollProviders(ctx) - } - } -} - -func (s *Service) pollProviders(ctx context.Context) { - if s == nil || s.quotaIngest == nil { - return - } - started := time.Now() - - accounts, modelNorm, err := LoadAccountsAndNorm() - if err != nil { - if s.shouldLog("poll_config_warning", 20*time.Second) { - s.warnf("poll_config_warning", "error=%v", err) - } - return - } - if len(accounts) == 0 { - if s.shouldLog("poll_no_accounts", 30*time.Second) { - s.infof("poll_skipped", "reason=no_enabled_accounts") - } - return - } - - type providerResult struct { - accountID string - snapshot core.UsageSnapshot - } - - results := make(chan providerResult, len(accounts)) - var wg sync.WaitGroup - - for _, acct := range accounts { - wg.Add(1) - go func(a core.AccountConfig) { - defer wg.Done() - - provider, ok := s.providerByID[a.Provider] - if !ok { - results <- providerResult{ - accountID: a.ID, - snapshot: core.UsageSnapshot{ - ProviderID: a.Provider, - AccountID: a.ID, - Timestamp: time.Now().UTC(), - Status: core.StatusError, - Message: fmt.Sprintf("no provider adapter registered for %q (restart/reinstall telemetry daemon if recently added)", a.Provider), - }, - } - return - } - - fetchCtx, cancel := context.WithTimeout(ctx, 8*time.Second) - defer cancel() - - snap, fetchErr := provider.Fetch(fetchCtx, a) - if fetchErr != nil { - snap = core.UsageSnapshot{ - ProviderID: a.Provider, - AccountID: a.ID, - Timestamp: time.Now().UTC(), - Status: core.StatusError, - Message: fetchErr.Error(), - } - } - snap = core.NormalizeUsageSnapshotWithConfig(snap, modelNorm) - results <- providerResult{accountID: a.ID, snapshot: snap} - }(acct) - } - - go func() { - wg.Wait() - close(results) - }() - - snapshots := make(map[string]core.UsageSnapshot, len(accounts)) - statusCounts := map[core.Status]int{} - errorCount := 0 - for result := range results { - snapshots[result.accountID] = result.snapshot - statusCounts[result.snapshot.Status]++ - if result.snapshot.Status == core.StatusError { - errorCount++ - } - } - if len(snapshots) == 0 { - return - } - - ingestCtx, cancel := context.WithTimeout(ctx, 12*time.Second) - defer cancel() - ingestErr := s.ingestQuotaSnapshots(ingestCtx, snapshots) - if ingestErr != nil && s.shouldLog("poll_ingest_warning", 10*time.Second) { - s.warnf("poll_ingest_warning", "error=%v", ingestErr) - } - - durationMs := time.Since(started).Milliseconds() - if ingestErr != nil || errorCount > 0 || s.shouldLog("poll_cycle_info", 45*time.Second) { - s.infof( - "poll_cycle", - "duration_ms=%d accounts=%d snapshots=%d status_ok=%d status_auth=%d status_limited=%d status_error=%d status_unknown=%d ingest_error=%t", - durationMs, - len(accounts), - len(snapshots), - statusCounts[core.StatusOK], - statusCounts[core.StatusAuth], - statusCounts[core.StatusLimited], - statusCounts[core.StatusError], - statusCounts[core.StatusUnknown], - ingestErr != nil, - ) - } -} - // --- HTTP server --- func (s *Service) startSocketServer(ctx context.Context) error { diff --git a/internal/daemon/server_loops.go b/internal/daemon/server_loops.go new file mode 100644 index 0000000..ae05357 --- /dev/null +++ b/internal/daemon/server_loops.go @@ -0,0 +1,534 @@ +package daemon + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/janekbaraniewski/openusage/internal/config" + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers" + "github.com/janekbaraniewski/openusage/internal/telemetry" +) + +func (s *Service) runCollectLoop(ctx context.Context) { + ticker := time.NewTicker(s.cfg.CollectInterval) + defer ticker.Stop() + + s.infof("collect_loop_start", "interval=%s", s.cfg.CollectInterval) + s.collectAndFlush(ctx) + for { + select { + case <-ctx.Done(): + s.infof("collect_loop_stop", "reason=context_done") + return + case <-ticker.C: + s.collectAndFlush(ctx) + } + } +} + +func (s *Service) runSpoolMaintenanceLoop(ctx context.Context) { + if s == nil { + return + } + flushTicker := time.NewTicker(5 * time.Second) + cleanupTicker := time.NewTicker(60 * time.Second) + defer flushTicker.Stop() + defer cleanupTicker.Stop() + + s.infof("spool_loop_start", "flush_interval=%s cleanup_interval=%s", 5*time.Second, 60*time.Second) + s.flushSpoolBacklog(ctx, 10000) + s.cleanupSpool() + + for { + select { + case <-ctx.Done(): + s.infof("spool_loop_stop", "reason=context_done") + return + case <-flushTicker.C: + s.flushSpoolBacklog(ctx, 10000) + case <-cleanupTicker.C: + s.cleanupSpool() + } + } +} + +func (s *Service) flushSpoolBacklog(ctx context.Context, maxTotal int) { + if s == nil || s.pipeline == nil { + return + } + + flush, warnings := FlushInBatches(ctx, s.pipeline, maxTotal) + if flush.Processed > 0 || len(warnings) > 0 { + s.infof( + "spool_flush", + "processed=%d ingested=%d deduped=%d failed=%d warnings=%d", + flush.Processed, flush.Ingested, flush.Deduped, flush.Failed, len(warnings), + ) + for _, warning := range warnings { + s.warnf("spool_flush_warning", "message=%q", warning) + } + } +} + +func (s *Service) cleanupSpool() { + if s == nil || strings.TrimSpace(s.cfg.SpoolDir) == "" { + return + } + + policy := telemetry.SpoolCleanupPolicy{ + MaxAge: 96 * time.Hour, + MaxFiles: 25000, + MaxBytes: 768 << 20, + } + + s.spoolMu.Lock() + result, err := telemetry.NewSpool(s.cfg.SpoolDir).Cleanup(policy) + s.spoolMu.Unlock() + if err != nil { + if s.shouldLog("spool_cleanup_error", 20*time.Second) { + s.warnf("spool_cleanup_error", "error=%v", err) + } + return + } + if result.RemovedFiles > 0 { + s.infof( + "spool_cleanup", + "removed_files=%d removed_bytes=%d remaining_files=%d remaining_bytes=%d", + result.RemovedFiles, + result.RemovedBytes, + result.RemainingFiles, + result.RemainingBytes, + ) + return + } + if s.shouldLog("spool_cleanup_steady", 30*time.Minute) { + s.infof( + "spool_cleanup_steady", + "remaining_files=%d remaining_bytes=%d", + result.RemainingFiles, + result.RemainingBytes, + ) + } +} + +func (s *Service) runHookSpoolLoop(ctx context.Context) { + if s == nil { + return + } + hookSpoolDir, err := telemetry.DefaultHookSpoolDir() + if err != nil { + s.warnf("hook_spool_loop", "resolve dir error=%v", err) + return + } + + processInterval := 5 * time.Second + cleanupInterval := 5 * time.Minute + processTicker := time.NewTicker(processInterval) + cleanupTicker := time.NewTicker(cleanupInterval) + defer processTicker.Stop() + defer cleanupTicker.Stop() + + s.infof( + "hook_spool_loop_start", + "dir=%s process_interval=%s cleanup_interval=%s", + hookSpoolDir, + processInterval, + cleanupInterval, + ) + s.processHookSpool(ctx, hookSpoolDir) + s.cleanupHookSpool(hookSpoolDir) + + for { + select { + case <-ctx.Done(): + s.infof("hook_spool_loop_stop", "reason=context_done") + return + case <-processTicker.C: + s.processHookSpool(ctx, hookSpoolDir) + case <-cleanupTicker.C: + s.cleanupHookSpool(hookSpoolDir) + } + } +} + +type rawHookFile struct { + Source string `json:"source"` + AccountID string `json:"account_id"` + Payload json.RawMessage `json:"payload"` +} + +const hookSpoolBatchLimit = 200 + +func (s *Service) processHookSpool(ctx context.Context, dir string) { + files, err := filepath.Glob(filepath.Join(dir, "*.json")) + if err != nil || len(files) == 0 { + return + } + + processed := 0 + for _, path := range files { + if processed >= hookSpoolBatchLimit { + break + } + if ctx.Err() != nil { + return + } + + data, readErr := os.ReadFile(path) + if readErr != nil { + _ = os.Remove(path) + processed++ + continue + } + + var raw rawHookFile + if json.Unmarshal(data, &raw) != nil || len(raw.Payload) == 0 { + _ = os.Remove(path) + processed++ + continue + } + + source, ok := providers.TelemetrySourceBySystem(raw.Source) + if !ok { + _ = os.Remove(path) + processed++ + continue + } + + reqs, parseErr := telemetry.ParseSourceHookPayload( + source, raw.Payload, + source.DefaultCollectOptions(), + strings.TrimSpace(raw.AccountID), + ) + if parseErr != nil || len(reqs) == 0 { + _ = os.Remove(path) + processed++ + continue + } + + tally, _ := s.ingestBatch(ctx, reqs) + _ = os.Remove(path) + processed++ + + s.infof("hook_spool_ingest", + "file=%s source=%s processed=%d ingested=%d deduped=%d failed=%d", + filepath.Base(path), raw.Source, + tally.processed, tally.ingested, tally.deduped, tally.failed, + ) + } +} + +func (s *Service) cleanupHookSpool(dir string) { + files, err := filepath.Glob(filepath.Join(dir, "*.json")) + if err != nil || len(files) == 0 { + tmps, _ := filepath.Glob(filepath.Join(dir, "*.json.tmp")) + for _, tmp := range tmps { + _ = os.Remove(tmp) + } + return + } + + now := time.Now() + removed := 0 + remaining := make([]string, 0, len(files)) + for _, path := range files { + info, statErr := os.Stat(path) + if statErr != nil { + _ = os.Remove(path) + removed++ + continue + } + if now.Sub(info.ModTime()) > 24*time.Hour { + _ = os.Remove(path) + removed++ + continue + } + remaining = append(remaining, path) + } + + if len(remaining) > 500 { + for _, path := range remaining[:len(remaining)-500] { + _ = os.Remove(path) + removed++ + } + remaining = remaining[len(remaining)-500:] + } + + tmps, _ := filepath.Glob(filepath.Join(dir, "*.json.tmp")) + for _, tmp := range tmps { + _ = os.Remove(tmp) + removed++ + } + + if removed > 0 { + s.infof("hook_spool_cleanup", "removed=%d remaining=%d", removed, len(remaining)) + } +} + +func (s *Service) collectAndFlush(ctx context.Context) { + if s == nil { + return + } + started := time.Now() + const backlogFlushLimit = 2000 + + var allReqs []telemetry.IngestRequest + totalCollected := 0 + var warnings []string + + for _, collector := range s.collectors { + reqs, err := collector.Collect(ctx) + if err != nil { + warnings = append(warnings, fmt.Sprintf("%s: %v", collector.Name(), err)) + continue + } + totalCollected += len(reqs) + allReqs = append(allReqs, reqs...) + } + + direct, retries := s.ingestBatch(ctx, allReqs) + flush, enqueued, flushWarnings := s.flushBacklog(ctx, retries, backlogFlushLimit) + warnings = append(warnings, flushWarnings...) + + durationMs := time.Since(started).Milliseconds() + if totalCollected > 0 || direct.processed > 0 || enqueued > 0 || flush.Processed > 0 || len(warnings) > 0 { + s.infof( + "collect_cycle", + "duration_ms=%d collected=%d direct_processed=%d direct_ingested=%d direct_deduped=%d direct_failed=%d enqueued=%d flush_processed=%d flush_ingested=%d flush_deduped=%d flush_failed=%d warnings=%d", + durationMs, totalCollected, + direct.processed, direct.ingested, direct.deduped, direct.failed, + enqueued, flush.Processed, flush.Ingested, flush.Deduped, flush.Failed, + len(warnings), + ) + for _, warning := range warnings { + s.warnf("collect_warning", "message=%q", warning) + } + s.pruneTelemetryOrphans(ctx) + return + } + + if durationMs >= 1500 && s.shouldLog("collect_slow", 30*time.Second) { + s.infof("collect_idle_slow", "duration_ms=%d", durationMs) + } + + s.pruneTelemetryOrphans(ctx) +} + +func (s *Service) pruneTelemetryOrphans(ctx context.Context) { + if s == nil || s.store == nil { + return + } + if !s.shouldLog("prune_orphan_raw_events_tick", 45*time.Second) { + return + } + + const pruneBatchSize = 10000 + pruneCtx, cancel := context.WithTimeout(ctx, 4*time.Second) + defer cancel() + + removed, err := s.store.PruneOrphanRawEvents(pruneCtx, pruneBatchSize) + if err != nil { + if s.shouldLog("prune_orphan_raw_events_error", 20*time.Second) { + s.warnf("prune_orphan_raw_events_error", "error=%v", err) + } + return + } + if removed > 0 { + s.infof("prune_orphan_raw_events", "removed=%d batch_size=%d", removed, pruneBatchSize) + } + + payloadCtx, payloadCancel := context.WithTimeout(ctx, 4*time.Second) + defer payloadCancel() + pruned, pruneErr := s.store.PruneRawEventPayloads(payloadCtx, 1, pruneBatchSize) + if pruneErr == nil && pruned > 0 { + s.infof("prune_raw_payloads", "pruned=%d", pruned) + } +} + +func (s *Service) runRetentionLoop(ctx context.Context) { + s.pruneOldData(ctx) + ticker := time.NewTicker(6 * time.Hour) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + s.infof("retention_loop_stop", "reason=context_done") + return + case <-ticker.C: + s.pruneOldData(ctx) + } + } +} + +func (s *Service) pruneOldData(ctx context.Context) { + if s == nil || s.store == nil { + return + } + cfg, err := config.Load() + if err != nil { + if s.shouldLog("retention_config_error", 30*time.Second) { + s.warnf("retention_config_error", "error=%v", err) + } + return + } + retentionDays := cfg.Data.RetentionDays + if retentionDays <= 0 { + retentionDays = 30 + } + + pruneCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + deleted, err := s.store.PruneOldEvents(pruneCtx, retentionDays) + if err != nil { + if s.shouldLog("retention_prune_error", 30*time.Second) { + s.warnf("retention_prune_error", "error=%v", err) + } + return + } + if deleted > 0 { + s.infof("retention_prune", "deleted=%d retention_days=%d", deleted, retentionDays) + orphanCtx, orphanCancel := context.WithTimeout(ctx, 10*time.Second) + defer orphanCancel() + orphaned, orphanErr := s.store.PruneOrphanRawEvents(orphanCtx, 50000) + if orphanErr != nil { + s.warnf("retention_orphan_prune_error", "error=%v", orphanErr) + } else if orphaned > 0 { + s.infof("retention_orphan_prune", "removed=%d", orphaned) + } + } +} + +func (s *Service) runPollLoop(ctx context.Context) { + ticker := time.NewTicker(s.cfg.PollInterval) + defer ticker.Stop() + + s.infof("poll_loop_start", "interval=%s", s.cfg.PollInterval) + s.pollProviders(ctx) + for { + select { + case <-ctx.Done(): + s.infof("poll_loop_stop", "reason=context_done") + return + case <-ticker.C: + s.pollProviders(ctx) + } + } +} + +func (s *Service) pollProviders(ctx context.Context) { + if s == nil || s.quotaIngest == nil { + return + } + started := time.Now() + + accounts, modelNorm, err := LoadAccountsAndNorm() + if err != nil { + if s.shouldLog("poll_config_warning", 20*time.Second) { + s.warnf("poll_config_warning", "error=%v", err) + } + return + } + if len(accounts) == 0 { + if s.shouldLog("poll_no_accounts", 30*time.Second) { + s.infof("poll_skipped", "reason=no_enabled_accounts") + } + return + } + + type providerResult struct { + accountID string + snapshot core.UsageSnapshot + } + + results := make(chan providerResult, len(accounts)) + var wg sync.WaitGroup + + for _, acct := range accounts { + wg.Add(1) + go func(account core.AccountConfig) { + defer wg.Done() + + provider, ok := s.providerByID[account.Provider] + if !ok { + results <- providerResult{ + accountID: account.ID, + snapshot: core.UsageSnapshot{ + ProviderID: account.Provider, + AccountID: account.ID, + Timestamp: time.Now().UTC(), + Status: core.StatusError, + Message: fmt.Sprintf("no provider adapter registered for %q (restart/reinstall telemetry daemon if recently added)", account.Provider), + }, + } + return + } + + fetchCtx, cancel := context.WithTimeout(ctx, 8*time.Second) + defer cancel() + + snap, fetchErr := provider.Fetch(fetchCtx, account) + if fetchErr != nil { + snap = core.UsageSnapshot{ + ProviderID: account.Provider, + AccountID: account.ID, + Timestamp: time.Now().UTC(), + Status: core.StatusError, + Message: fetchErr.Error(), + } + } + snap = core.NormalizeUsageSnapshotWithConfig(snap, modelNorm) + results <- providerResult{accountID: account.ID, snapshot: snap} + }(acct) + } + + go func() { + wg.Wait() + close(results) + }() + + snapshots := make(map[string]core.UsageSnapshot, len(accounts)) + statusCounts := map[core.Status]int{} + errorCount := 0 + for result := range results { + snapshots[result.accountID] = result.snapshot + statusCounts[result.snapshot.Status]++ + if result.snapshot.Status == core.StatusError { + errorCount++ + } + } + if len(snapshots) == 0 { + return + } + + ingestCtx, cancel := context.WithTimeout(ctx, 12*time.Second) + defer cancel() + ingestErr := s.ingestQuotaSnapshots(ingestCtx, snapshots) + if ingestErr != nil && s.shouldLog("poll_ingest_warning", 10*time.Second) { + s.warnf("poll_ingest_warning", "error=%v", ingestErr) + } + + durationMs := time.Since(started).Milliseconds() + if ingestErr != nil || errorCount > 0 || s.shouldLog("poll_cycle_info", 45*time.Second) { + s.infof( + "poll_cycle", + "duration_ms=%d accounts=%d snapshots=%d status_ok=%d status_auth=%d status_limited=%d status_error=%d status_unknown=%d ingest_error=%t", + durationMs, + len(accounts), + len(snapshots), + statusCounts[core.StatusOK], + statusCounts[core.StatusAuth], + statusCounts[core.StatusLimited], + statusCounts[core.StatusError], + statusCounts[core.StatusUnknown], + ingestErr != nil, + ) + } +} diff --git a/internal/telemetry/usage_view.go b/internal/telemetry/usage_view.go index 820f9e6..93532a6 100644 --- a/internal/telemetry/usage_view.go +++ b/internal/telemetry/usage_view.go @@ -7,7 +7,6 @@ import ( "sort" "strings" "time" - "unicode" "github.com/janekbaraniewski/openusage/internal/core" "github.com/samber/lo" @@ -1637,220 +1636,3 @@ func sortedSeriesFromByDay(byDay map[string]float64) []core.TimePoint { } // parseMCPToolName extracts server and function from an MCP tool name. -// Raw tool names use double underscores: mcp__server__function. -// Returns ("", "", false) for non-MCP tools. -// parseMCPToolName extracts server and function from an MCP tool name. -// Supports two formats: -// - Canonical: "mcp__server__function" (double underscores, from Claude Code and normalized Cursor) -// - Legacy: "server-function (mcp)" or "user-server-function (mcp)" (old Cursor data) -// -// Returns ("", "", false) for non-MCP tools. -func parseMCPToolName(raw string) (server, function string, ok bool) { - raw = strings.ToLower(strings.TrimSpace(raw)) - - // Canonical format: mcp__server__function - if strings.HasPrefix(raw, "mcp__") { - rest := raw[5:] - idx := strings.Index(rest, "__") - if idx < 0 { - return rest, "", true - } - return rest[:idx], rest[idx+2:], true - } - - // Copilot legacy wrapper format: "_mcp_server_". - if strings.Contains(raw, "_mcp_server_") { - parts := strings.SplitN(raw, "_mcp_server_", 2) - server = sanitizeMCPToolSegment(parts[0]) - function = sanitizeMCPToolSegment(parts[1]) - if server != "" && function != "" { - return server, function, true - } - } - - // Copilot legacy wrapper format variant: "-mcp-server-". - if strings.Contains(raw, "-mcp-server-") { - parts := strings.SplitN(raw, "-mcp-server-", 2) - server = sanitizeMCPToolSegment(parts[0]) - function = sanitizeMCPToolSegment(parts[1]) - if server != "" && function != "" { - return server, function, true - } - } - - // Legacy format: "something (mcp)" from old Cursor data. - if strings.HasSuffix(raw, " (mcp)") { - body := strings.TrimSuffix(raw, " (mcp)") - body = strings.TrimSpace(body) - if body == "" { - return "", "", false - } - - // Strip "user-" prefix if present. - body = strings.TrimPrefix(body, "user-") - - // Try to extract server from "server-function" format. - // e.g., "kubernetes-pods_log" → server=kubernetes, function=pods_log - // e.g., "gcp-gcloud-run_gcloud_command" → server=gcp-gcloud, function=run_gcloud_command - // Heuristic: the function part typically contains underscores, so split on the - // last hyphen that precedes an underscore-containing segment. - if idx := findServerFunctionSplit(body); idx > 0 { - return body[:idx], body[idx+1:], true - } - - // No clear server-function split — treat whole body as function with unknown server. - return "other", body, true - } - - return "", "", false -} - -func sanitizeMCPToolSegment(raw string) string { - raw = strings.ToLower(strings.TrimSpace(raw)) - if raw == "" { - return "" - } - var b strings.Builder - b.Grow(len(raw)) - lastUnderscore := false - for _, r := range raw { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - b.WriteRune(r) - lastUnderscore = false - continue - } - if !lastUnderscore { - b.WriteRune('_') - lastUnderscore = true - } - } - return strings.Trim(b.String(), "_") -} - -// findServerFunctionSplit finds the best hyphen position to split "server-function" -// in a Cursor MCP tool name. The function name typically contains underscores -// (e.g., "pods_list", "search_docs") while server names use hyphens. -// Strategy: find the last hyphen where the part AFTER it contains an underscore. -// This handles multi-segment server names like "gcp-gcloud" or "runai-docs". -func findServerFunctionSplit(s string) int { - bestIdx := -1 - for i := 0; i < len(s); i++ { - if s[i] == '-' { - rest := s[i+1:] - if strings.Contains(rest, "_") { - bestIdx = i - } - } - } - if bestIdx > 0 { - return bestIdx - } - - // No underscore-based split found. Fall back to first hyphen if no more hyphens after. - // e.g., "kubernetes-pods_log" or "smart-query" - if idx := strings.Index(s, "-"); idx > 0 { - return idx - } - return -1 -} - -func buildMCPAgg(tools []telemetryToolAgg) []telemetryMCPServerAgg { - type serverData struct { - calls float64 - calls1d float64 - funcs map[string]*telemetryMCPFunctionAgg - } - servers := make(map[string]*serverData) - - for _, tool := range tools { - server, function, ok := parseMCPToolName(tool.Tool) - if !ok || server == "" { - continue - } - sd, exists := servers[server] - if !exists { - sd = &serverData{funcs: make(map[string]*telemetryMCPFunctionAgg)} - servers[server] = sd - } - sd.calls += tool.Calls - sd.calls1d += tool.Calls1d - if function != "" { - if f, ok := sd.funcs[function]; ok { - f.Calls += tool.Calls - f.Calls1d += tool.Calls1d - } else { - sd.funcs[function] = &telemetryMCPFunctionAgg{ - Function: function, - Calls: tool.Calls, - Calls1d: tool.Calls1d, - } - } - } - } - - result := make([]telemetryMCPServerAgg, 0, len(servers)) - for name, sd := range servers { - var funcs []telemetryMCPFunctionAgg - for _, f := range sd.funcs { - funcs = append(funcs, *f) - } - sort.Slice(funcs, func(i, j int) bool { - if funcs[i].Calls != funcs[j].Calls { - return funcs[i].Calls > funcs[j].Calls - } - return funcs[i].Function < funcs[j].Function - }) - result = append(result, telemetryMCPServerAgg{ - Server: name, - Calls: sd.calls, - Calls1d: sd.calls1d, - Functions: funcs, - }) - } - sort.Slice(result, func(i, j int) bool { - if result[i].Calls != result[j].Calls { - return result[i].Calls > result[j].Calls - } - return result[i].Server < result[j].Server - }) - return result -} - -// deleteByPrefixes removes all entries from a string-keyed map whose key -// matches any of the given prefixes. Works with any map[string]V. -func deleteByPrefixes[V any](m map[string]V, prefixes []string) { - for key := range m { - for _, p := range prefixes { - if strings.HasPrefix(key, p) { - delete(m, key) - break - } - } - } -} - -func sanitizeMetricID(raw string) string { - raw = strings.TrimSpace(strings.ToLower(raw)) - if raw == "" { - return "unknown" - } - var b strings.Builder - b.Grow(len(raw)) - lastUnderscore := false - for _, r := range raw { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - b.WriteRune(r) - lastUnderscore = false - continue - } - if !lastUnderscore { - b.WriteRune('_') - lastUnderscore = true - } - } - out := strings.Trim(b.String(), "_") - if out == "" { - return "unknown" - } - return out -} diff --git a/internal/telemetry/usage_view_helpers.go b/internal/telemetry/usage_view_helpers.go new file mode 100644 index 0000000..bd2f0c8 --- /dev/null +++ b/internal/telemetry/usage_view_helpers.go @@ -0,0 +1,198 @@ +package telemetry + +import ( + "sort" + "strings" + "unicode" +) + +// Raw tool names use double underscores: mcp__server__function. +// Returns ("", "", false) for non-MCP tools. +// parseMCPToolName extracts server and function from an MCP tool name. +// Supports two formats: +// - Canonical: "mcp__server__function" (double underscores, from Claude Code and normalized Cursor) +// - Legacy: "server-function (mcp)" or "user-server-function (mcp)" (old Cursor data) +func parseMCPToolName(raw string) (server, function string, ok bool) { + raw = strings.ToLower(strings.TrimSpace(raw)) + + if strings.HasPrefix(raw, "mcp__") { + rest := raw[5:] + idx := strings.Index(rest, "__") + if idx < 0 { + return rest, "", true + } + return rest[:idx], rest[idx+2:], true + } + + if strings.Contains(raw, "_mcp_server_") { + parts := strings.SplitN(raw, "_mcp_server_", 2) + server = sanitizeMCPToolSegment(parts[0]) + function = sanitizeMCPToolSegment(parts[1]) + if server != "" && function != "" { + return server, function, true + } + } + + if strings.Contains(raw, "-mcp-server-") { + parts := strings.SplitN(raw, "-mcp-server-", 2) + server = sanitizeMCPToolSegment(parts[0]) + function = sanitizeMCPToolSegment(parts[1]) + if server != "" && function != "" { + return server, function, true + } + } + + if strings.HasSuffix(raw, " (mcp)") { + body := strings.TrimSpace(strings.TrimSuffix(raw, " (mcp)")) + if body == "" { + return "", "", false + } + body = strings.TrimPrefix(body, "user-") + if idx := findServerFunctionSplit(body); idx > 0 { + return body[:idx], body[idx+1:], true + } + return "other", body, true + } + + return "", "", false +} + +func sanitizeMCPToolSegment(raw string) string { + raw = strings.ToLower(strings.TrimSpace(raw)) + if raw == "" { + return "" + } + var b strings.Builder + b.Grow(len(raw)) + lastUnderscore := false + for _, r := range raw { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + b.WriteRune(r) + lastUnderscore = false + continue + } + if !lastUnderscore { + b.WriteRune('_') + lastUnderscore = true + } + } + return strings.Trim(b.String(), "_") +} + +func findServerFunctionSplit(s string) int { + bestIdx := -1 + for i := 0; i < len(s); i++ { + if s[i] == '-' { + rest := s[i+1:] + if strings.Contains(rest, "_") { + bestIdx = i + } + } + } + if bestIdx > 0 { + return bestIdx + } + if idx := strings.Index(s, "-"); idx > 0 { + return idx + } + return -1 +} + +func buildMCPAgg(tools []telemetryToolAgg) []telemetryMCPServerAgg { + type serverData struct { + calls float64 + calls1d float64 + funcs map[string]*telemetryMCPFunctionAgg + } + servers := make(map[string]*serverData) + + for _, tool := range tools { + server, function, ok := parseMCPToolName(tool.Tool) + if !ok || server == "" { + continue + } + sd, exists := servers[server] + if !exists { + sd = &serverData{funcs: make(map[string]*telemetryMCPFunctionAgg)} + servers[server] = sd + } + sd.calls += tool.Calls + sd.calls1d += tool.Calls1d + if function != "" { + if f, ok := sd.funcs[function]; ok { + f.Calls += tool.Calls + f.Calls1d += tool.Calls1d + } else { + sd.funcs[function] = &telemetryMCPFunctionAgg{ + Function: function, + Calls: tool.Calls, + Calls1d: tool.Calls1d, + } + } + } + } + + result := make([]telemetryMCPServerAgg, 0, len(servers)) + for name, sd := range servers { + var funcs []telemetryMCPFunctionAgg + for _, fn := range sd.funcs { + funcs = append(funcs, *fn) + } + sort.Slice(funcs, func(i, j int) bool { + if funcs[i].Calls != funcs[j].Calls { + return funcs[i].Calls > funcs[j].Calls + } + return funcs[i].Function < funcs[j].Function + }) + result = append(result, telemetryMCPServerAgg{ + Server: name, + Calls: sd.calls, + Calls1d: sd.calls1d, + Functions: funcs, + }) + } + sort.Slice(result, func(i, j int) bool { + if result[i].Calls != result[j].Calls { + return result[i].Calls > result[j].Calls + } + return result[i].Server < result[j].Server + }) + return result +} + +func deleteByPrefixes[V any](m map[string]V, prefixes []string) { + for key := range m { + for _, prefix := range prefixes { + if strings.HasPrefix(key, prefix) { + delete(m, key) + break + } + } + } +} + +func sanitizeMetricID(raw string) string { + raw = strings.TrimSpace(strings.ToLower(raw)) + if raw == "" { + return "unknown" + } + var b strings.Builder + b.Grow(len(raw)) + lastUnderscore := false + for _, r := range raw { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + b.WriteRune(r) + lastUnderscore = false + continue + } + if !lastUnderscore { + b.WriteRune('_') + lastUnderscore = true + } + } + out := strings.Trim(b.String(), "_") + if out == "" { + return "unknown" + } + return out +} diff --git a/internal/tui/tiles_composition.go b/internal/tui/tiles_composition.go index e57a7dd..0f97cec 100644 --- a/internal/tui/tiles_composition.go +++ b/internal/tui/tiles_composition.go @@ -1241,88 +1241,22 @@ func buildProviderProjectBreakdownLines(snap core.UsageSnapshot, innerW int, exp } func collectProviderProjectMix(snap core.UsageSnapshot) ([]projectMixEntry, map[string]bool) { - byProject := make(map[string]*projectMixEntry) - usedKeys := make(map[string]bool) - - ensure := func(name string) *projectMixEntry { - if _, ok := byProject[name]; !ok { - byProject[name] = &projectMixEntry{name: name} - } - return byProject[name] - } - - seriesByProject := make(map[string]map[string]float64) - - for key, met := range snap.Metrics { - if met.Used == nil { - continue - } - name, field, ok := parseProjectMetricKey(key) - if !ok { - continue - } - project := ensure(name) - switch field { - case "requests": - project.requests = *met.Used - case "requests_today": - project.requests1d = *met.Used - } - usedKeys[key] = true - } - - for key, points := range snap.DailySeries { - if !strings.HasPrefix(key, "usage_project_") { - continue - } - name := strings.TrimPrefix(key, "usage_project_") - if strings.TrimSpace(name) == "" || len(points) == 0 { - continue - } - mergeSeriesByDay(seriesByProject, name, points) - } - - for name, pointsByDay := range seriesByProject { - project := ensure(name) - project.series = sortedSeriesFromByDay(pointsByDay) - if project.requests <= 0 { - project.requests = sumSeriesValues(project.series) - } + projectUsage, usedKeys := core.ExtractProjectUsage(snap) + if len(projectUsage) == 0 { + return nil, usedKeys } - - projects := make([]projectMixEntry, 0, len(byProject)) - for _, project := range byProject { - if project.requests <= 0 && len(project.series) == 0 { - continue - } - projects = append(projects, *project) + projects := make([]projectMixEntry, 0, len(projectUsage)) + for _, project := range projectUsage { + projects = append(projects, projectMixEntry{ + name: project.Name, + requests: project.Requests, + requests1d: project.Requests1d, + series: project.Series, + }) } - - sort.Slice(projects, func(i, j int) bool { - if projects[i].requests == projects[j].requests { - return projects[i].name < projects[j].name - } - return projects[i].requests > projects[j].requests - }) - return projects, usedKeys } -func parseProjectMetricKey(key string) (name, field string, ok bool) { - const prefix = "project_" - if !strings.HasPrefix(key, prefix) { - return "", "", false - } - rest := strings.TrimPrefix(key, prefix) - if strings.HasSuffix(rest, "_requests_today") { - return strings.TrimSuffix(rest, "_requests_today"), "requests_today", true - } - if strings.HasSuffix(rest, "_requests") { - return strings.TrimSuffix(rest, "_requests"), "requests", true - } - return "", "", false -} - func limitProjectMix(projects []projectMixEntry, expanded bool, maxVisible int) ([]projectMixEntry, int) { if expanded || maxVisible <= 0 || len(projects) <= maxVisible { return projects, 0 From a3b9793a502602da0ea41f5fa1c45d5ef1e39817 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 12:50:31 +0100 Subject: [PATCH 07/32] refactor: share usage breakdown extractors --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 6 +- internal/core/usage_breakdowns.go | 759 ++++++++++++++++++ internal/core/usage_breakdowns_test.go | 110 +++ internal/providers/openrouter/openrouter.go | 155 ---- .../openrouter/provider_resolution.go | 153 ++++ internal/tui/tiles_composition.go | 688 ++-------------- 6 files changed, 1076 insertions(+), 795 deletions(-) create mode 100644 internal/providers/openrouter/provider_resolution.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index f30b7a6..cd1dd9b 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -37,6 +37,8 @@ This table captures every issue found in this pass. It is broad and high-signal, | R17 | Fixed | TUI project parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/tiles_composition.go` | Provider project mix extraction now uses one shared core extractor instead of duplicating project metric and daily-series parsing inside the TUI composition layer. | Continue the same extraction pattern for client / source / provider mix breakdowns. | | R18 | Fixed | Telemetry MCP/helper split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go` | MCP parsing, metric sanitizing, and generic map-prefix helper logic moved out of the main usage-view file into a dedicated helper unit. | Continue splitting query / aggregation / projection responsibilities. | | R19 | Fixed | Daemon loop decomposition | `internal/daemon/server.go`, `internal/daemon/server_loops.go` | Collection, spool, hook-spool, retention, and poll loops no longer live inline in the main daemon server file. | Continue splitting by loop family if the new file grows too large. | +| R20 | Fixed | TUI model/client/provider parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/tiles_composition.go` | Model, client, provider, upstream-provider, and interface-client aggregation/parsing now live in shared core extractors, leaving the TUI composition layer as a thin adapter over typed breakdown entries. | The remaining TUI parsing drift is now mostly in analytics/detail-specific sections rather than the main composition bars. | +| R21 | Fixed | OpenRouter provider-resolution split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go` | Hosting-provider resolution, BYOK cost inference, and provider-name heuristics moved out of the main OpenRouter provider file into a dedicated helper unit. | Continue splitting analytics/generation pagination/projection concerns. | ## Action Table @@ -44,8 +46,8 @@ This table captures every issue found in this pass. It is broad and high-signal, | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go:393-584`, `internal/dashboardapp/service.go` | The side effects are now injected, but `Model` still owns a very large amount of event-handling and state-transition logic. | Continue splitting update/action logic into smaller TUI units and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P1 | UI metric-prefix parsing | `internal/tui/tiles_composition.go:257-1579`, `internal/tui/analytics.go:663-729`, `internal/core/usage_breakdowns.go` | MCP, language, and project breakdowns now use shared extractors, but client / source / provider / model composition logic still parses raw key conventions directly inside the TUI. | Continue promoting those mix extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go:307-2188` | `openrouter.go` mixes auth probing, credits, keys, analytics parsing, generation pagination, provider resolution, metadata enrichment, and output projection in one 2800+ LOC file. | Split into subpackages/files: `api_client`, `analytics`, `generations`, `provider_resolution`, `projection`, `types`. | Easier maintenance, smaller diff surface, faster targeted testing. | +| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go:663-729`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go` | The main composition bars now consume shared extractors, but analytics/detail-specific sections still decode some raw metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go` | `openrouter.go` is smaller after the provider-resolution split, but it still mixes auth probing, credits, keys, analytics parsing, generation pagination, metadata enrichment, and output projection in one large file. | Continue splitting into `api_client`, `analytics`, `generations`, `projection`, and `types` units. | Easier maintenance, smaller diff surface, faster targeted testing. | | A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | | A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go` | `usage_view.go` is smaller now, but it is still simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A7 | P2 | Daemon service monolith | `internal/daemon/server.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_loops.go` | The daemon is materially less coupled after the logging/cache/http/loop split, but polling, collection, retention, and spool maintenance still share one large runtime helper unit. | Continue splitting the loop-heavy runtime into `polling`, `collection`, `retention`, and `spool` units. | Lower mental load and easier concurrency review. | diff --git a/internal/core/usage_breakdowns.go b/internal/core/usage_breakdowns.go index 6bb7565..0bc1ce3 100644 --- a/internal/core/usage_breakdowns.go +++ b/internal/core/usage_breakdowns.go @@ -2,6 +2,7 @@ package core import ( "sort" + "strconv" "strings" ) @@ -28,6 +29,37 @@ type ProjectUsageEntry struct { Series []TimePoint } +type ModelBreakdownEntry struct { + Name string + Cost float64 + Input float64 + Output float64 + Requests float64 + Requests1d float64 + Series []TimePoint +} + +type ProviderBreakdownEntry struct { + Name string + Cost float64 + Input float64 + Output float64 + Requests float64 +} + +type ClientBreakdownEntry struct { + Name string + Total float64 + Input float64 + Output float64 + Cached float64 + Reasoning float64 + Requests float64 + Sessions float64 + SeriesKind string + Series []TimePoint +} + func ExtractLanguageUsage(s UsageSnapshot) ([]LanguageUsageEntry, map[string]bool) { byLang := make(map[string]float64) usedKeys := make(map[string]bool) @@ -219,6 +251,594 @@ func ExtractProjectUsage(s UsageSnapshot) ([]ProjectUsageEntry, map[string]bool) return out, usedKeys } +func ExtractModelBreakdown(s UsageSnapshot) ([]ModelBreakdownEntry, map[string]bool) { + type agg struct { + cost float64 + input float64 + output float64 + requests float64 + requests1d float64 + series []TimePoint + } + byModel := make(map[string]*agg) + usedKeys := make(map[string]bool) + + ensure := func(name string) *agg { + if _, ok := byModel[name]; !ok { + byModel[name] = &agg{} + } + return byModel[name] + } + + recordInput := func(name string, value float64, key string) { + ensure(name).input += value + usedKeys[key] = true + } + recordOutput := func(name string, value float64, key string) { + ensure(name).output += value + usedKeys[key] = true + } + recordCost := func(name string, value float64, key string) { + ensure(name).cost += value + usedKeys[key] = true + } + recordRequests := func(name string, value float64, key string) { + ensure(name).requests += value + usedKeys[key] = true + } + recordRequests1d := func(name string, value float64, key string) { + ensure(name).requests1d += value + usedKeys[key] = true + } + + for key, metric := range s.Metrics { + if metric.Used == nil { + continue + } + switch { + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_requests_today"): + recordRequests1d(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_requests_today"), *metric.Used, key) + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_requests"): + recordRequests(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_requests"), *metric.Used, key) + default: + rawModel, kind, ok := parseModelMetricKey(key) + if !ok { + continue + } + switch kind { + case modelMetricInput: + recordInput(rawModel, *metric.Used, key) + case modelMetricOutput: + recordOutput(rawModel, *metric.Used, key) + case modelMetricCostUSD: + recordCost(rawModel, *metric.Used, key) + } + } + } + + for key, points := range s.DailySeries { + if !strings.HasPrefix(key, "usage_model_") || len(points) == 0 { + continue + } + name := strings.TrimSpace(strings.TrimPrefix(key, "usage_model_")) + if name == "" { + continue + } + entry := ensure(name) + entry.series = points + if entry.requests <= 0 { + entry.requests = sumBreakdownSeries(points) + } + } + + out := make([]ModelBreakdownEntry, 0, len(byModel)) + for name, entry := range byModel { + if entry.cost <= 0 && entry.input <= 0 && entry.output <= 0 && entry.requests <= 0 && len(entry.series) == 0 { + continue + } + out = append(out, ModelBreakdownEntry{ + Name: name, + Cost: entry.cost, + Input: entry.input, + Output: entry.output, + Requests: entry.requests, + Requests1d: entry.requests1d, + Series: entry.series, + }) + } + sort.Slice(out, func(i, j int) bool { + ti := out[i].Input + out[i].Output + tj := out[j].Input + out[j].Output + if ti != tj { + return ti > tj + } + if out[i].Cost != out[j].Cost { + return out[i].Cost > out[j].Cost + } + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func ExtractProviderBreakdown(s UsageSnapshot) ([]ProviderBreakdownEntry, map[string]bool) { + type agg struct { + cost float64 + input float64 + output float64 + requests float64 + } + type fieldState struct { + cost bool + input bool + output bool + requests bool + } + byProvider := make(map[string]*agg) + usedKeys := make(map[string]bool) + fieldsByProvider := make(map[string]*fieldState) + + ensure := func(name string) *agg { + if _, ok := byProvider[name]; !ok { + byProvider[name] = &agg{} + } + return byProvider[name] + } + ensureFields := func(name string) *fieldState { + if _, ok := fieldsByProvider[name]; !ok { + fieldsByProvider[name] = &fieldState{} + } + return fieldsByProvider[name] + } + recordCost := func(name string, value float64, key string) { + ensure(name).cost += value + ensureFields(name).cost = true + usedKeys[key] = true + } + recordInput := func(name string, value float64, key string) { + ensure(name).input += value + ensureFields(name).input = true + usedKeys[key] = true + } + recordOutput := func(name string, value float64, key string) { + ensure(name).output += value + ensureFields(name).output = true + usedKeys[key] = true + } + recordRequests := func(name string, value float64, key string) { + ensure(name).requests += value + ensureFields(name).requests = true + usedKeys[key] = true + } + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "provider_") { + continue + } + switch { + case strings.HasSuffix(key, "_cost_usd"): + recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost_usd"), *metric.Used, key) + case strings.HasSuffix(key, "_cost") && !strings.HasSuffix(key, "_byok_cost"): + recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost"), *metric.Used, key) + case strings.HasSuffix(key, "_input_tokens"): + recordInput(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_input_tokens"), *metric.Used, key) + case strings.HasSuffix(key, "_output_tokens"): + recordOutput(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_output_tokens"), *metric.Used, key) + case strings.HasSuffix(key, "_requests"): + recordRequests(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_requests"), *metric.Used, key) + } + } + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "provider_") || !strings.HasSuffix(key, "_byok_cost") { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_byok_cost") + if base == "" || ensureFields(base).cost { + continue + } + recordCost(base, *metric.Used, key) + } + + meta := snapshotBreakdownMetaEntries(s) + for key, raw := range meta { + if usedKeys[key] || !strings.HasPrefix(key, "provider_") { + continue + } + switch { + case strings.HasSuffix(key, "_cost") && !strings.HasSuffix(key, "_byok_cost"): + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost") + if base == "" || ensureFields(base).cost { + continue + } + recordCost(base, value, key) + case strings.HasSuffix(key, "_input_tokens"), strings.HasSuffix(key, "_prompt_tokens"): + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_input_tokens") + base = strings.TrimSuffix(base, "_prompt_tokens") + if base == "" || ensureFields(base).input { + continue + } + recordInput(base, value, key) + case strings.HasSuffix(key, "_output_tokens"), strings.HasSuffix(key, "_completion_tokens"): + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_output_tokens") + base = strings.TrimSuffix(base, "_completion_tokens") + if base == "" || ensureFields(base).output { + continue + } + recordOutput(base, value, key) + case strings.HasSuffix(key, "_requests"): + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_requests") + if base == "" || ensureFields(base).requests { + continue + } + recordRequests(base, value, key) + } + } + for key, raw := range meta { + if usedKeys[key] || !strings.HasPrefix(key, "provider_") || !strings.HasSuffix(key, "_byok_cost") { + continue + } + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_byok_cost") + if base == "" || ensureFields(base).cost { + continue + } + recordCost(base, value, key) + } + + out := make([]ProviderBreakdownEntry, 0, len(byProvider)) + for name, entry := range byProvider { + if entry.cost <= 0 && entry.input <= 0 && entry.output <= 0 && entry.requests <= 0 { + continue + } + out = append(out, ProviderBreakdownEntry{ + Name: name, + Cost: entry.cost, + Input: entry.input, + Output: entry.output, + Requests: entry.requests, + }) + } + sort.Slice(out, func(i, j int) bool { + ti := out[i].Input + out[i].Output + tj := out[j].Input + out[j].Output + if ti != tj { + return ti > tj + } + if out[i].Cost != out[j].Cost { + return out[i].Cost > out[j].Cost + } + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func ExtractUpstreamProviderBreakdown(s UsageSnapshot) ([]ProviderBreakdownEntry, map[string]bool) { + type agg struct { + cost float64 + input float64 + output float64 + requests float64 + } + byProvider := make(map[string]*agg) + usedKeys := make(map[string]bool) + + ensure := func(name string) *agg { + if _, ok := byProvider[name]; !ok { + byProvider[name] = &agg{} + } + return byProvider[name] + } + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "upstream_") { + continue + } + switch { + case strings.HasSuffix(key, "_cost_usd"): + ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_cost_usd")).cost += *metric.Used + usedKeys[key] = true + case strings.HasSuffix(key, "_input_tokens"): + ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_input_tokens")).input += *metric.Used + usedKeys[key] = true + case strings.HasSuffix(key, "_output_tokens"): + ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_output_tokens")).output += *metric.Used + usedKeys[key] = true + case strings.HasSuffix(key, "_requests"): + ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_requests")).requests += *metric.Used + usedKeys[key] = true + } + } + + out := make([]ProviderBreakdownEntry, 0, len(byProvider)) + for name, entry := range byProvider { + out = append(out, ProviderBreakdownEntry{ + Name: name, + Cost: entry.cost, + Input: entry.input, + Output: entry.output, + Requests: entry.requests, + }) + } + sort.Slice(out, func(i, j int) bool { + ti := out[i].Input + out[i].Output + tj := out[j].Input + out[j].Output + if ti != tj { + return ti > tj + } + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + if len(out) == 0 { + return nil, nil + } + return out, usedKeys +} + +func ExtractClientBreakdown(s UsageSnapshot) ([]ClientBreakdownEntry, map[string]bool) { + byClient := make(map[string]*ClientBreakdownEntry) + usedKeys := make(map[string]bool) + tokenSeriesByClient := make(map[string]map[string]float64) + usageClientSeriesByClient := make(map[string]map[string]float64) + usageSourceSeriesByClient := make(map[string]map[string]float64) + hasAllTimeRequests := make(map[string]bool) + requestsTodayFallback := make(map[string]float64) + hasAnyClientMetrics := false + + ensure := func(name string) *ClientBreakdownEntry { + if _, ok := byClient[name]; !ok { + byClient[name] = &ClientBreakdownEntry{Name: name} + } + return byClient[name] + } + + for key, metric := range s.Metrics { + if metric.Used == nil { + continue + } + if strings.HasPrefix(key, "client_") { + name, field, ok := parseClientMetricKey(key) + if !ok { + continue + } + name = canonicalizeClientBucket(name) + hasAnyClientMetrics = true + client := ensure(name) + switch field { + case "total_tokens": + client.Total = *metric.Used + case "input_tokens": + client.Input = *metric.Used + case "output_tokens": + client.Output = *metric.Used + case "cached_tokens": + client.Cached = *metric.Used + case "reasoning_tokens": + client.Reasoning = *metric.Used + case "requests": + client.Requests = *metric.Used + hasAllTimeRequests[name] = true + case "sessions": + client.Sessions = *metric.Used + } + usedKeys[key] = true + continue + } + if strings.HasPrefix(key, "source_") { + sourceName, field, ok := parseSourceMetricKey(key) + if !ok { + continue + } + clientName := canonicalizeClientBucket(sourceName) + client := ensure(clientName) + switch field { + case "requests": + client.Requests += *metric.Used + hasAllTimeRequests[clientName] = true + case "requests_today": + requestsTodayFallback[clientName] += *metric.Used + } + usedKeys[key] = true + } + } + + for clientName, value := range requestsTodayFallback { + if hasAllTimeRequests[clientName] { + continue + } + client := ensure(clientName) + if client.Requests <= 0 { + client.Requests = value + } + } + + hasAnyClientSeries := false + for key := range s.DailySeries { + if strings.HasPrefix(key, "tokens_client_") || strings.HasPrefix(key, "usage_client_") { + hasAnyClientSeries = true + break + } + } + + for key, points := range s.DailySeries { + if len(points) == 0 { + continue + } + switch { + case strings.HasPrefix(key, "tokens_client_"): + name := canonicalizeClientBucket(strings.TrimPrefix(key, "tokens_client_")) + if name == "" { + continue + } + mergeBreakdownSeriesByDay(tokenSeriesByClient, name, points) + case strings.HasPrefix(key, "usage_client_"): + name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_client_")) + if name == "" { + continue + } + mergeBreakdownSeriesByDay(usageClientSeriesByClient, name, points) + case strings.HasPrefix(key, "usage_source_"): + if hasAnyClientMetrics || hasAnyClientSeries { + continue + } + name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_source_")) + if name == "" { + continue + } + mergeBreakdownSeriesByDay(usageSourceSeriesByClient, name, points) + } + } + + for name, pointsByDay := range tokenSeriesByClient { + client := ensure(name) + client.Series = breakdownSortedSeries(pointsByDay) + client.SeriesKind = "tokens" + if client.Total <= 0 { + client.Total = sumBreakdownSeries(client.Series) + } + } + for name, pointsByDay := range usageClientSeriesByClient { + client := ensure(name) + if client.SeriesKind == "tokens" { + continue + } + client.Series = breakdownSortedSeries(pointsByDay) + client.SeriesKind = "requests" + if client.Requests <= 0 { + client.Requests = sumBreakdownSeries(client.Series) + } + } + for name, pointsByDay := range usageSourceSeriesByClient { + client := ensure(name) + if client.SeriesKind != "" { + continue + } + client.Series = breakdownSortedSeries(pointsByDay) + client.SeriesKind = "requests" + if client.Requests <= 0 { + client.Requests = sumBreakdownSeries(client.Series) + } + } + + out := make([]ClientBreakdownEntry, 0, len(byClient)) + for _, client := range byClient { + if breakdownClientValue(*client) <= 0 && client.Sessions <= 0 && client.Requests <= 0 && len(client.Series) == 0 { + continue + } + out = append(out, *client) + } + sort.Slice(out, func(i, j int) bool { + vi := breakdownClientTokenValue(out[i]) + vj := breakdownClientTokenValue(out[j]) + if vi != vj { + return vi > vj + } + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + if out[i].Sessions != out[j].Sessions { + return out[i].Sessions > out[j].Sessions + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func ExtractInterfaceClientBreakdown(s UsageSnapshot) ([]ClientBreakdownEntry, map[string]bool) { + byName := make(map[string]*ClientBreakdownEntry) + usedKeys := make(map[string]bool) + usageSeriesByName := make(map[string]map[string]float64) + + ensure := func(name string) *ClientBreakdownEntry { + if _, ok := byName[name]; !ok { + byName[name] = &ClientBreakdownEntry{Name: name} + } + return byName[name] + } + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "interface_") { + continue + } + name := canonicalizeClientBucket(strings.TrimPrefix(key, "interface_")) + if name == "" { + continue + } + ensure(name).Requests += *metric.Used + usedKeys[key] = true + } + + for key, points := range s.DailySeries { + if len(points) == 0 { + continue + } + switch { + case strings.HasPrefix(key, "usage_client_"): + name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_client_")) + if name != "" { + mergeBreakdownSeriesByDay(usageSeriesByName, name, points) + } + case strings.HasPrefix(key, "usage_source_"): + name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_source_")) + if name != "" { + mergeBreakdownSeriesByDay(usageSeriesByName, name, points) + } + } + } + + for name, pointsByDay := range usageSeriesByName { + entry := ensure(name) + entry.Series = breakdownSortedSeries(pointsByDay) + entry.SeriesKind = "requests" + if entry.Requests <= 0 { + entry.Requests = sumBreakdownSeries(entry.Series) + } + } + + out := make([]ClientBreakdownEntry, 0, len(byName)) + for _, entry := range byName { + if entry.Requests <= 0 && len(entry.Series) == 0 { + continue + } + out = append(out, *entry) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + if len(out) == 0 { + return nil, nil + } + return out, usedKeys +} + func parseProjectMetricKey(key string) (name, field string, ok bool) { const prefix = "project_" if !strings.HasPrefix(key, prefix) { @@ -276,3 +896,142 @@ func sumBreakdownSeries(points []TimePoint) float64 { } return total } + +func parseSourceMetricKey(key string) (name, field string, ok bool) { + const prefix = "source_" + if !strings.HasPrefix(key, prefix) { + return "", "", false + } + rest := strings.TrimPrefix(key, prefix) + for _, suffix := range []string{"_requests_today", "_requests"} { + if strings.HasSuffix(rest, suffix) { + return strings.TrimSuffix(rest, suffix), strings.TrimPrefix(suffix, "_"), true + } + } + return "", "", false +} + +func parseClientMetricKey(key string) (name, field string, ok bool) { + const prefix = "client_" + if !strings.HasPrefix(key, prefix) { + return "", "", false + } + rest := strings.TrimPrefix(key, prefix) + for _, suffix := range []string{ + "_total_tokens", "_input_tokens", "_output_tokens", + "_cached_tokens", "_reasoning_tokens", "_requests", "_sessions", + } { + if strings.HasSuffix(rest, suffix) { + return strings.TrimSuffix(rest, suffix), strings.TrimPrefix(suffix, "_"), true + } + } + return "", "", false +} + +func canonicalizeClientBucket(name string) string { + bucket := sourceAsClientBucket(name) + switch bucket { + case "codex", "openusage": + return "cli_agents" + } + return bucket +} + +func sourceAsClientBucket(source string) string { + s := strings.ToLower(strings.TrimSpace(source)) + s = strings.ReplaceAll(s, "-", "_") + s = strings.ReplaceAll(s, " ", "_") + if s == "" || s == "unknown" { + return "other" + } + + switch s { + case "composer", "tab", "human", "vscode", "ide", "editor", "cursor": + return "ide" + case "cloud", "cloud_agent", "cloud_agents", "web", "web_agent", "background_agent": + return "cloud_agents" + case "cli", "terminal", "agent", "agents", "cli_agents": + return "cli_agents" + case "desktop", "desktop_app": + return "desktop_app" + } + + if strings.Contains(s, "cloud") || strings.Contains(s, "web") { + return "cloud_agents" + } + if strings.Contains(s, "cli") || strings.Contains(s, "terminal") || strings.Contains(s, "agent") { + return "cli_agents" + } + if strings.Contains(s, "compose") || strings.Contains(s, "tab") || strings.Contains(s, "ide") || strings.Contains(s, "editor") { + return "ide" + } + return s +} + +func snapshotBreakdownMetaEntries(s UsageSnapshot) map[string]string { + if len(s.Raw) == 0 && len(s.Attributes) == 0 && len(s.Diagnostics) == 0 { + return nil + } + meta := make(map[string]string, len(s.Raw)+len(s.Attributes)+len(s.Diagnostics)) + for key, raw := range s.Attributes { + meta[key] = raw + } + for key, raw := range s.Diagnostics { + if _, ok := meta[key]; !ok { + meta[key] = raw + } + } + for key, raw := range s.Raw { + if _, ok := meta[key]; !ok { + meta[key] = raw + } + } + return meta +} + +func parseBreakdownNumeric(raw string) (float64, bool) { + s := strings.TrimSpace(strings.ReplaceAll(raw, ",", "")) + if s == "" { + return 0, false + } + s = strings.TrimPrefix(s, "$") + s = strings.TrimSuffix(s, "%") + if idx := strings.IndexByte(s, ' '); idx > 0 { + s = s[:idx] + } + if idx := strings.IndexByte(s, '/'); idx > 0 { + s = s[:idx] + } + s = strings.TrimSpace(s) + if s == "" { + return 0, false + } + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, false + } + return v, true +} + +func breakdownClientTokenValue(client ClientBreakdownEntry) float64 { + if client.Total > 0 { + return client.Total + } + if client.Input > 0 || client.Output > 0 || client.Cached > 0 || client.Reasoning > 0 { + return client.Input + client.Output + client.Cached + client.Reasoning + } + return 0 +} + +func breakdownClientValue(client ClientBreakdownEntry) float64 { + if value := breakdownClientTokenValue(client); value > 0 { + return value + } + if client.Requests > 0 { + return client.Requests + } + if len(client.Series) > 0 { + return sumBreakdownSeries(client.Series) + } + return 0 +} diff --git a/internal/core/usage_breakdowns_test.go b/internal/core/usage_breakdowns_test.go index 6508e36..5933d00 100644 --- a/internal/core/usage_breakdowns_test.go +++ b/internal/core/usage_breakdowns_test.go @@ -99,3 +99,113 @@ func TestExtractProjectUsage(t *testing.T) { t.Fatalf("used keys missing project metrics: %#v", used) } } + +func TestExtractModelBreakdown(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "model_alpha_input_tokens": {Used: Float64Ptr(10)}, + "model_alpha_output_tokens": {Used: Float64Ptr(3)}, + "model_alpha_cost_usd": {Used: Float64Ptr(1.25)}, + "model_alpha_requests": {Used: Float64Ptr(4)}, + "model_alpha_requests_today": {Used: Float64Ptr(2)}, + "input_tokens_beta": {Used: Float64Ptr(7)}, + "output_tokens_beta": {Used: Float64Ptr(2)}, + }, + DailySeries: map[string][]TimePoint{ + "usage_model_beta": { + {Date: "2026-03-08", Value: 4}, + {Date: "2026-03-09", Value: 5}, + }, + }, + } + + got, used := ExtractModelBreakdown(snap) + if len(got) != 2 { + t.Fatalf("len(got) = %d, want 2", len(got)) + } + if got[0].Name != "alpha" || got[0].Input != 10 || got[0].Output != 3 || got[0].Cost != 1.25 || got[0].Requests1d != 2 { + t.Fatalf("got[0] = %#v", got[0]) + } + if got[1].Name != "beta" || got[1].Requests != 9 || len(got[1].Series) != 2 { + t.Fatalf("got[1] = %#v", got[1]) + } + if !used["model_alpha_cost_usd"] || !used["input_tokens_beta"] { + t.Fatalf("used keys missing expected model metrics: %#v", used) + } +} + +func TestExtractProviderBreakdown(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "provider_openai_byok_cost": {Used: Float64Ptr(0.8)}, + "provider_openai_requests": {Used: Float64Ptr(6)}, + }, + Raw: map[string]string{ + "provider_openai_prompt_tokens": "120", + "provider_openai_completion_tokens": "20", + }, + } + + got, used := ExtractProviderBreakdown(snap) + if len(got) != 1 { + t.Fatalf("len(got) = %d, want 1", len(got)) + } + if got[0].Name != "openai" || got[0].Cost != 0.8 || got[0].Input != 120 || got[0].Output != 20 || got[0].Requests != 6 { + t.Fatalf("got[0] = %#v", got[0]) + } + if !used["provider_openai_byok_cost"] || !used["provider_openai_requests"] { + t.Fatalf("used keys missing expected provider metrics: %#v", used) + } +} + +func TestExtractClientBreakdown(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "source_composer_requests": {Used: Float64Ptr(80)}, + "client_ide_sessions": {Used: Float64Ptr(3)}, + }, + DailySeries: map[string][]TimePoint{ + "usage_source_composer": { + {Date: "2026-02-20", Value: 10}, + {Date: "2026-02-21", Value: 70}, + }, + }, + } + + got, used := ExtractClientBreakdown(snap) + if len(got) != 1 { + t.Fatalf("len(got) = %d, want 1", len(got)) + } + if got[0].Name != "ide" || got[0].Requests != 80 || got[0].Sessions != 3 || len(got[0].Series) != 0 { + t.Fatalf("got[0] = %#v", got[0]) + } + if !used["source_composer_requests"] || !used["client_ide_sessions"] { + t.Fatalf("used keys missing expected client metrics: %#v", used) + } +} + +func TestExtractInterfaceClientBreakdown(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "interface_cli": {Used: Float64Ptr(5)}, + "interface_tab": {Used: Float64Ptr(4)}, + }, + DailySeries: map[string][]TimePoint{ + "usage_source_cli": { + {Date: "2026-03-08", Value: 2}, + {Date: "2026-03-09", Value: 3}, + }, + }, + } + + got, used := ExtractInterfaceClientBreakdown(snap) + if len(got) != 2 { + t.Fatalf("len(got) = %d, want 2", len(got)) + } + if got[0].Name != "cli_agents" || got[0].Requests != 5 { + t.Fatalf("got[0] = %#v", got[0]) + } + if !used["interface_cli"] || !used["interface_tab"] { + t.Fatalf("used keys missing expected interface metrics: %#v", used) + } +} diff --git a/internal/providers/openrouter/openrouter.go b/internal/providers/openrouter/openrouter.go index e3f1725..b648769 100644 --- a/internal/providers/openrouter/openrouter.go +++ b/internal/providers/openrouter/openrouter.go @@ -1991,161 +1991,6 @@ func (p *Provider) fetchAllGenerations(ctx context.Context, baseURL, apiKey stri return all, nil } -func generationByokCost(g generationEntry) float64 { - if !g.IsByok && g.UpstreamInferenceCost == nil { - return 0 - } - if g.UpstreamInferenceCost != nil && *g.UpstreamInferenceCost > 0 { - return *g.UpstreamInferenceCost - } - if g.TotalCost > 0 { - return g.TotalCost - } - return g.Usage -} - -func resolveGenerationHostingProvider(g generationEntry) string { - name, _ := resolveGenerationHostingProviderWithSource(g) - return name -} - -func resolveGenerationHostingProviderWithSource(g generationEntry) (string, providerResolutionSource) { - if name := providerNameFromResponses(g.ProviderResponses); name != "" { - return name, providerSourceResponses - } - if name := providerNameFromGenerationEntry(g); name != "" { - return name, providerSourceEntryField - } - if name := providerNameFromUpstreamID(g.UpstreamID); name != "" { - return name, providerSourceUpstreamID - } - if name := strings.TrimSpace(g.ProviderName); name != "" && !isLikelyRouterClientProviderName(name) { - return name, providerSourceProviderName - } - // Final fallback keeps provider splits stable when generation payloads - // omit upstream provider metadata but model IDs still include vendor prefix. - if name := providerNameFromModel(g.Model); name != "" { - return name, providerSourceModelPrefix - } - // Last-resort fallback: keep whatever the API returned. - return strings.TrimSpace(g.ProviderName), providerSourceFallbackLabel -} - -func providerNameFromResponses(responses []generationProviderResponse) string { - if len(responses) == 0 { - return "" - } - // Prefer the last successful upstream response (final selected hoster). - for i := len(responses) - 1; i >= 0; i-- { - name := generationProviderResponseName(responses[i]) - if name == "" { - continue - } - if responses[i].Status != nil && *responses[i].Status >= 200 && *responses[i].Status < 300 { - return name - } - } - // Fall back to the last named upstream response when statuses are absent. - for i := len(responses) - 1; i >= 0; i-- { - name := generationProviderResponseName(responses[i]) - if name != "" { - return name - } - } - return "" -} - -func generationProviderResponseName(resp generationProviderResponse) string { - for _, candidate := range []string{ - resp.ProviderName, - resp.Provider, - resp.ProviderID, - } { - name := strings.TrimSpace(candidate) - if name != "" && !isLikelyRouterClientProviderName(name) { - return name - } - } - return "" -} - -func providerNameFromGenerationEntry(g generationEntry) string { - for _, candidate := range []string{ - g.UpstreamProviderName, - g.UpstreamProvider, - g.ProviderSlug, - g.ProviderID, - g.Provider, - } { - name := strings.TrimSpace(candidate) - if name != "" && !isLikelyRouterClientProviderName(name) { - return name - } - } - return "" -} - -func providerNameFromModel(model string) string { - norm := normalizeModelName(model) - if norm == "" { - return "" - } - slash := strings.IndexByte(norm, '/') - if slash <= 0 { - for _, prefix := range knownModelVendorPrefixes { - if norm == prefix || strings.HasPrefix(norm, prefix+"-") || strings.HasPrefix(norm, prefix+"_") { - return prefix - } - } - return "" - } - return norm[:slash] -} - -func providerNameFromUpstreamID(upstreamID string) string { - id := strings.TrimSpace(upstreamID) - if id == "" { - return "" - } - for _, sep := range []string{"/", ":", "|"} { - if idx := strings.Index(id, sep); idx > 0 { - candidate := strings.TrimSpace(id[:idx]) - if isLikelyProviderSlug(candidate) { - return candidate - } - } - } - return "" -} - -func isLikelyProviderSlug(candidate string) bool { - if candidate == "" { - return false - } - slug := strings.ToLower(sanitizeName(candidate)) - if slug == "" || slug == "unknown" { - return false - } - switch slug { - case "chatcmpl", "msg", "resp", "response", "gen", "cmpl", "request", "req", "run", "completion": - return false - } - return true -} - -func isLikelyRouterClientProviderName(name string) bool { - n := strings.ToLower(strings.TrimSpace(name)) - if n == "" { - return true - } - clean := strings.NewReplacer(" ", "", "-", "", "_", "", ".", "").Replace(n) - switch clean { - case "unknown", "openrouter", "openrouterauto", "openusage": - return true - } - return strings.Contains(clean, "openrouter") || strings.Contains(clean, "openusage") -} - func (p *Provider) enrichGenerationProviderMetadata(ctx context.Context, baseURL, apiKey string, rows []generationEntry) (int, int) { attempts := 0 hits := 0 diff --git a/internal/providers/openrouter/provider_resolution.go b/internal/providers/openrouter/provider_resolution.go new file mode 100644 index 0000000..b7acb3f --- /dev/null +++ b/internal/providers/openrouter/provider_resolution.go @@ -0,0 +1,153 @@ +package openrouter + +import "strings" + +func generationByokCost(g generationEntry) float64 { + if !g.IsByok && g.UpstreamInferenceCost == nil { + return 0 + } + if g.UpstreamInferenceCost != nil && *g.UpstreamInferenceCost > 0 { + return *g.UpstreamInferenceCost + } + if g.TotalCost > 0 { + return g.TotalCost + } + return g.Usage +} + +func resolveGenerationHostingProvider(g generationEntry) string { + name, _ := resolveGenerationHostingProviderWithSource(g) + return name +} + +func resolveGenerationHostingProviderWithSource(g generationEntry) (string, providerResolutionSource) { + if name := providerNameFromResponses(g.ProviderResponses); name != "" { + return name, providerSourceResponses + } + if name := providerNameFromGenerationEntry(g); name != "" { + return name, providerSourceEntryField + } + if name := providerNameFromUpstreamID(g.UpstreamID); name != "" { + return name, providerSourceUpstreamID + } + if name := strings.TrimSpace(g.ProviderName); name != "" && !isLikelyRouterClientProviderName(name) { + return name, providerSourceProviderName + } + if name := providerNameFromModel(g.Model); name != "" { + return name, providerSourceModelPrefix + } + return strings.TrimSpace(g.ProviderName), providerSourceFallbackLabel +} + +func providerNameFromResponses(responses []generationProviderResponse) string { + if len(responses) == 0 { + return "" + } + for i := len(responses) - 1; i >= 0; i-- { + name := generationProviderResponseName(responses[i]) + if name == "" { + continue + } + if responses[i].Status != nil && *responses[i].Status >= 200 && *responses[i].Status < 300 { + return name + } + } + for i := len(responses) - 1; i >= 0; i-- { + name := generationProviderResponseName(responses[i]) + if name != "" { + return name + } + } + return "" +} + +func generationProviderResponseName(resp generationProviderResponse) string { + for _, candidate := range []string{ + resp.ProviderName, + resp.Provider, + resp.ProviderID, + } { + name := strings.TrimSpace(candidate) + if name != "" && !isLikelyRouterClientProviderName(name) { + return name + } + } + return "" +} + +func providerNameFromGenerationEntry(g generationEntry) string { + for _, candidate := range []string{ + g.UpstreamProviderName, + g.UpstreamProvider, + g.ProviderSlug, + g.ProviderID, + g.Provider, + } { + name := strings.TrimSpace(candidate) + if name != "" && !isLikelyRouterClientProviderName(name) { + return name + } + } + return "" +} + +func providerNameFromModel(model string) string { + norm := normalizeModelName(model) + if norm == "" { + return "" + } + slash := strings.IndexByte(norm, '/') + if slash <= 0 { + for _, prefix := range knownModelVendorPrefixes { + if norm == prefix || strings.HasPrefix(norm, prefix+"-") || strings.HasPrefix(norm, prefix+"_") { + return prefix + } + } + return "" + } + return norm[:slash] +} + +func providerNameFromUpstreamID(upstreamID string) string { + id := strings.TrimSpace(upstreamID) + if id == "" { + return "" + } + for _, sep := range []string{"/", ":", "|"} { + if idx := strings.Index(id, sep); idx > 0 { + candidate := strings.TrimSpace(id[:idx]) + if isLikelyProviderSlug(candidate) { + return candidate + } + } + } + return "" +} + +func isLikelyProviderSlug(candidate string) bool { + if candidate == "" { + return false + } + slug := strings.ToLower(sanitizeName(candidate)) + if slug == "" || slug == "unknown" { + return false + } + switch slug { + case "chatcmpl", "msg", "resp", "response", "gen", "cmpl", "request", "req", "run", "completion": + return false + } + return true +} + +func isLikelyRouterClientProviderName(name string) bool { + n := strings.ToLower(strings.TrimSpace(name)) + if n == "" { + return true + } + clean := strings.NewReplacer(" ", "", "-", "", "_", "", ".", "").Replace(n) + switch clean { + case "unknown", "openrouter", "openrouterauto", "openusage": + return true + } + return strings.Contains(clean, "openrouter") || strings.Contains(clean, "openusage") +} diff --git a/internal/tui/tiles_composition.go b/internal/tui/tiles_composition.go index 0f97cec..92907db 100644 --- a/internal/tui/tiles_composition.go +++ b/internal/tui/tiles_composition.go @@ -255,115 +255,19 @@ func selectBurnMode(totalTokens, totalCost, totalRequests float64) (mode string, } func collectProviderModelMix(snap core.UsageSnapshot) ([]modelMixEntry, map[string]bool) { - type agg struct { - cost float64 - input float64 - output float64 - requests float64 - requests1d float64 - series []core.TimePoint - } - byModel := make(map[string]*agg) - usedKeys := make(map[string]bool) - - ensure := func(name string) *agg { - if _, ok := byModel[name]; !ok { - byModel[name] = &agg{} - } - return byModel[name] - } - - recordCost := func(name string, v float64, key string) { - ensure(name).cost += v - usedKeys[key] = true - } - recordInput := func(name string, v float64, key string) { - ensure(name).input += v - usedKeys[key] = true - } - recordOutput := func(name string, v float64, key string) { - ensure(name).output += v - usedKeys[key] = true - } - recordRequests := func(name string, v float64, key string) { - ensure(name).requests += v - usedKeys[key] = true - } - recordRequests1d := func(name string, v float64, key string) { - ensure(name).requests1d += v - usedKeys[key] = true - } - - for key, met := range snap.Metrics { - if met.Used == nil { - continue - } - switch { - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cost_usd"): - recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cost_usd"), *met.Used, key) - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cost"): - recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cost"), *met.Used, key) - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_input_tokens"): - recordInput(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_input_tokens"), *met.Used, key) - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_output_tokens"): - recordOutput(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_output_tokens"), *met.Used, key) - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_requests_today"): - recordRequests1d(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_requests_today"), *met.Used, key) - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_requests"): - recordRequests(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_requests"), *met.Used, key) - case strings.HasPrefix(key, "input_tokens_"): - recordInput(strings.TrimPrefix(key, "input_tokens_"), *met.Used, key) - case strings.HasPrefix(key, "output_tokens_"): - recordOutput(strings.TrimPrefix(key, "output_tokens_"), *met.Used, key) - } - } - - for key, points := range snap.DailySeries { - const prefix = "usage_model_" - if !strings.HasPrefix(key, prefix) || len(points) == 0 { - continue - } - name := strings.TrimPrefix(key, prefix) - if name == "" { - continue - } - m := ensure(name) - m.series = points - if m.requests <= 0 { - m.requests = sumSeriesValues(points) - } - } - - models := make([]modelMixEntry, 0, len(byModel)) - for name, v := range byModel { - if v.cost <= 0 && v.input <= 0 && v.output <= 0 && v.requests <= 0 && len(v.series) == 0 { - continue - } + entries, usedKeys := core.ExtractModelBreakdown(snap) + models := make([]modelMixEntry, 0, len(entries)) + for _, entry := range entries { models = append(models, modelMixEntry{ - name: name, - cost: v.cost, - input: v.input, - output: v.output, - requests: v.requests, - requests1d: v.requests1d, - series: v.series, + name: entry.Name, + cost: entry.Cost, + input: entry.Input, + output: entry.Output, + requests: entry.Requests, + requests1d: entry.Requests1d, + series: entry.Series, }) } - - sort.Slice(models, func(i, j int) bool { - ti := models[i].input + models[i].output - tj := models[j].input + models[j].output - if ti != tj { - return ti > tj - } - if models[i].cost != models[j].cost { - return models[i].cost > models[j].cost - } - if models[i].requests != models[j].requests { - return models[i].requests > models[j].requests - } - return models[i].name < models[j].name - }) return models, usedKeys } @@ -474,168 +378,17 @@ func buildProviderVendorCompositionLines(snap core.UsageSnapshot, innerW int, ex } func collectProviderVendorMix(snap core.UsageSnapshot) ([]providerMixEntry, map[string]bool) { - type agg struct { - cost float64 - input float64 - output float64 - requests float64 - } - type providerFieldState struct { - cost bool - input bool - output bool - requests bool - } - byProvider := make(map[string]*agg) - usedKeys := make(map[string]bool) - fieldState := make(map[string]*providerFieldState) - - ensure := func(name string) *agg { - if _, ok := byProvider[name]; !ok { - byProvider[name] = &agg{} - } - return byProvider[name] - } - ensureFieldState := func(name string) *providerFieldState { - if _, ok := fieldState[name]; !ok { - fieldState[name] = &providerFieldState{} - } - return fieldState[name] - } - - recordCost := func(name string, v float64, key string) { - ensure(name).cost += v - ensureFieldState(name).cost = true - usedKeys[key] = true - } - recordInput := func(name string, v float64, key string) { - ensure(name).input += v - ensureFieldState(name).input = true - usedKeys[key] = true - } - recordOutput := func(name string, v float64, key string) { - ensure(name).output += v - ensureFieldState(name).output = true - usedKeys[key] = true - } - recordRequests := func(name string, v float64, key string) { - ensure(name).requests += v - ensureFieldState(name).requests = true - usedKeys[key] = true - } - - // Pass 1: primary metrics (including non-BYOK cost) so BYOK fallback logic is order-independent. - for key, met := range snap.Metrics { - if met.Used == nil || !strings.HasPrefix(key, "provider_") { - continue - } - switch { - case strings.HasSuffix(key, "_cost_usd"): - recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost_usd"), *met.Used, key) - case strings.HasSuffix(key, "_cost") && !strings.HasSuffix(key, "_byok_cost"): - recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost"), *met.Used, key) - case strings.HasSuffix(key, "_input_tokens"): - recordInput(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_input_tokens"), *met.Used, key) - case strings.HasSuffix(key, "_output_tokens"): - recordOutput(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_output_tokens"), *met.Used, key) - case strings.HasSuffix(key, "_requests"): - recordRequests(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_requests"), *met.Used, key) - } - } - // Pass 2: BYOK cost only when primary provider cost is absent. - for key, met := range snap.Metrics { - if met.Used == nil || !strings.HasPrefix(key, "provider_") || !strings.HasSuffix(key, "_byok_cost") { - continue - } - base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_byok_cost") - if base == "" || ensureFieldState(base).cost { - continue - } - recordCost(base, *met.Used, key) - } - - meta := snapshotMetaEntries(snap) - // Pass 3: raw fallback for primary cost fields (excluding BYOK), tokens, requests. - for key, raw := range meta { - if usedKeys[key] || !strings.HasPrefix(key, "provider_") { - continue - } - switch { - case strings.HasSuffix(key, "_cost") && !strings.HasSuffix(key, "_byok_cost"): - if v, ok := parseTileNumeric(raw); ok { - baseKey := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost") - if baseKey == "" || ensureFieldState(baseKey).cost { - continue - } - recordCost(baseKey, v, key) - } - case strings.HasSuffix(key, "_input_tokens"), strings.HasSuffix(key, "_prompt_tokens"): - if v, ok := parseTileNumeric(raw); ok { - baseKey := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_input_tokens") - baseKey = strings.TrimSuffix(baseKey, "_prompt_tokens") - if baseKey == "" || ensureFieldState(baseKey).input { - continue - } - recordInput(baseKey, v, key) - } - case strings.HasSuffix(key, "_output_tokens"), strings.HasSuffix(key, "_completion_tokens"): - if v, ok := parseTileNumeric(raw); ok { - baseKey := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_output_tokens") - baseKey = strings.TrimSuffix(baseKey, "_completion_tokens") - if baseKey == "" || ensureFieldState(baseKey).output { - continue - } - recordOutput(baseKey, v, key) - } - case strings.HasSuffix(key, "_requests"): - if v, ok := parseTileNumeric(raw); ok { - baseKey := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_requests") - if baseKey == "" || ensureFieldState(baseKey).requests { - continue - } - recordRequests(baseKey, v, key) - } - } - } - // Pass 4: raw fallback for BYOK cost only when no primary cost exists. - for key, raw := range meta { - if usedKeys[key] || !strings.HasPrefix(key, "provider_") || !strings.HasSuffix(key, "_byok_cost") { - continue - } - if v, ok := parseTileNumeric(raw); ok { - baseKey := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_byok_cost") - if baseKey == "" || ensureFieldState(baseKey).cost { - continue - } - recordCost(baseKey, v, key) - } - } - - providers := make([]providerMixEntry, 0, len(byProvider)) - for name, v := range byProvider { - if v.cost <= 0 && v.input <= 0 && v.output <= 0 && v.requests <= 0 { - continue - } + entries, usedKeys := core.ExtractProviderBreakdown(snap) + providers := make([]providerMixEntry, 0, len(entries)) + for _, entry := range entries { providers = append(providers, providerMixEntry{ - name: name, - cost: v.cost, - input: v.input, - output: v.output, - requests: v.requests, + name: entry.Name, + cost: entry.Cost, + input: entry.Input, + output: entry.Output, + requests: entry.Requests, }) } - - sort.Slice(providers, func(i, j int) bool { - ti := providers[i].input + providers[i].output - tj := providers[j].input + providers[j].output - if ti != tj { - return ti > tj - } - if providers[i].cost != providers[j].cost { - return providers[i].cost > providers[j].cost - } - return providers[i].requests > providers[j].requests - }) return providers, usedKeys } @@ -746,69 +499,17 @@ func buildUpstreamProviderCompositionLines(snap core.UsageSnapshot, innerW int, } func collectUpstreamProviderMix(snap core.UsageSnapshot) ([]providerMixEntry, map[string]bool) { - type agg struct { - cost float64 - input float64 - output float64 - requests float64 - } - byProvider := make(map[string]*agg) - usedKeys := make(map[string]bool) - - ensure := func(name string) *agg { - if _, ok := byProvider[name]; !ok { - byProvider[name] = &agg{} - } - return byProvider[name] - } - - for key, met := range snap.Metrics { - if met.Used == nil || !strings.HasPrefix(key, "upstream_") { - continue - } - switch { - case strings.HasSuffix(key, "_cost_usd"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_cost_usd") - ensure(name).cost += *met.Used - usedKeys[key] = true - case strings.HasSuffix(key, "_input_tokens"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_input_tokens") - ensure(name).input += *met.Used - usedKeys[key] = true - case strings.HasSuffix(key, "_output_tokens"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_output_tokens") - ensure(name).output += *met.Used - usedKeys[key] = true - case strings.HasSuffix(key, "_requests"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_requests") - ensure(name).requests += *met.Used - usedKeys[key] = true - } - } - - if len(byProvider) == 0 { - return nil, nil - } - - result := make([]providerMixEntry, 0, len(byProvider)) - for name, a := range byProvider { + entries, usedKeys := core.ExtractUpstreamProviderBreakdown(snap) + result := make([]providerMixEntry, 0, len(entries)) + for _, entry := range entries { result = append(result, providerMixEntry{ - name: name, - cost: a.cost, - input: a.input, - output: a.output, - requests: a.requests, + name: entry.Name, + cost: entry.Cost, + input: entry.Input, + output: entry.Output, + requests: entry.Requests, }) } - sort.Slice(result, func(i, j int) bool { - ti := result[i].input + result[i].output - tj := result[j].input + result[j].output - if ti != tj { - return ti > tj - } - return result[i].requests > result[j].requests - }) - return result, usedKeys } @@ -909,140 +610,20 @@ func tailSeriesValues(points []core.TimePoint, max int) []float64 { return values } -func parseSourceMetricKey(key string) (name, field string, ok bool) { - const prefix = "source_" - if !strings.HasPrefix(key, prefix) { - return "", "", false - } - rest := strings.TrimPrefix(key, prefix) - for _, suffix := range []string{ - "_requests_today", - "_requests", - } { - if strings.HasSuffix(rest, suffix) { - return strings.TrimSuffix(rest, suffix), strings.TrimPrefix(suffix, "_"), true - } - } - return "", "", false -} - -func sourceAsClientBucket(source string) string { - s := strings.ToLower(strings.TrimSpace(source)) - s = strings.ReplaceAll(s, "-", "_") - s = strings.ReplaceAll(s, " ", "_") - if s == "" || s == "unknown" { - return "other" - } - - switch s { - case "composer", "tab", "human", "vscode", "ide", "editor", "cursor": - return "ide" - case "cloud", "cloud_agent", "cloud_agents", "web", "web_agent", "background_agent": - return "cloud_agents" - case "cli", "terminal", "agent", "agents", "cli_agents": - return "cli_agents" - case "desktop", "desktop_app": - return "desktop_app" - } - - if strings.Contains(s, "cloud") || strings.Contains(s, "web") { - return "cloud_agents" - } - if strings.Contains(s, "cli") || strings.Contains(s, "terminal") || strings.Contains(s, "agent") { - return "cli_agents" - } - if strings.Contains(s, "compose") || strings.Contains(s, "tab") || strings.Contains(s, "ide") || strings.Contains(s, "editor") { - return "ide" - } - return s -} - -func canonicalClientBucket(name string) string { - bucket := sourceAsClientBucket(name) - switch bucket { - case "codex", "openusage": - return "cli_agents" - } - return bucket -} - // collectInterfaceAsClients builds clientMixEntry items from interface_ metrics // so the interface breakdown (composer, cli, human, tab) can be shown directly // in the client composition section instead of a separate panel. func collectInterfaceAsClients(snap core.UsageSnapshot) ([]clientMixEntry, map[string]bool) { - byName := make(map[string]*clientMixEntry) - ensure := func(name string) *clientMixEntry { - if _, ok := byName[name]; !ok { - byName[name] = &clientMixEntry{name: name} - } - return byName[name] - } - usedKeys := make(map[string]bool) - usageSeriesByName := make(map[string]map[string]float64) - - for key, met := range snap.Metrics { - if met.Used == nil { - continue - } - if !strings.HasPrefix(key, "interface_") { - continue - } - name := canonicalClientBucket(strings.TrimPrefix(key, "interface_")) - if name == "" { - continue - } - entry := ensure(name) - entry.requests += *met.Used - usedKeys[key] = true - } - - for key, points := range snap.DailySeries { - if len(points) == 0 { - continue - } - switch { - case strings.HasPrefix(key, "usage_client_"): - name := canonicalClientBucket(strings.TrimPrefix(key, "usage_client_")) - if name == "" { - continue - } - mergeSeriesByDay(usageSeriesByName, name, points) - case strings.HasPrefix(key, "usage_source_"): - source := strings.TrimPrefix(key, "usage_source_") - if source == "" { - continue - } - name := canonicalClientBucket(source) - mergeSeriesByDay(usageSeriesByName, name, points) - } - } - - for name, pointsByDay := range usageSeriesByName { - entry := ensure(name) - entry.series = sortedSeriesFromByDay(pointsByDay) - entry.seriesKind = "requests" - if entry.requests <= 0 { - entry.requests = sumSeriesValues(entry.series) - } - } - - if len(byName) == 0 { - return nil, nil - } - - clients := make([]clientMixEntry, 0, len(byName)) - for _, entry := range byName { - if entry.requests <= 0 && len(entry.series) == 0 { - continue - } - clients = append(clients, *entry) + entries, usedKeys := core.ExtractInterfaceClientBreakdown(snap) + clients := make([]clientMixEntry, 0, len(entries)) + for _, entry := range entries { + clients = append(clients, clientMixEntry{ + name: entry.Name, + requests: entry.Requests, + seriesKind: entry.SeriesKind, + series: entry.Series, + }) } - sort.Slice(clients, func(i, j int) bool { - if clients[i].requests != clients[j].requests { - return clients[i].requests > clients[j].requests - } - return clients[i].name < clients[j].name - }) return clients, usedKeys } @@ -1285,194 +866,25 @@ func colorForProject(colors map[string]lipgloss.Color, name string) lipgloss.Col } func collectProviderClientMix(snap core.UsageSnapshot) ([]clientMixEntry, map[string]bool) { - byClient := make(map[string]*clientMixEntry) - usedKeys := make(map[string]bool) - - ensure := func(name string) *clientMixEntry { - if _, ok := byClient[name]; !ok { - byClient[name] = &clientMixEntry{name: name} - } - return byClient[name] - } - tokenSeriesByClient := make(map[string]map[string]float64) - usageClientSeriesByClient := make(map[string]map[string]float64) - usageSourceSeriesByClient := make(map[string]map[string]float64) - hasAllTimeRequests := make(map[string]bool) - requestsTodayFallback := make(map[string]float64) - hasAnyClientMetrics := false - - for key, met := range snap.Metrics { - if met.Used == nil { - continue - } - if strings.HasPrefix(key, "client_") { - name, field, ok := parseClientMetricKey(key) - if !ok { - continue - } - name = canonicalClientBucket(name) - hasAnyClientMetrics = true - client := ensure(name) - switch field { - case "total_tokens": - client.total = *met.Used - case "input_tokens": - client.input = *met.Used - case "output_tokens": - client.output = *met.Used - case "cached_tokens": - client.cached = *met.Used - case "reasoning_tokens": - client.reasoning = *met.Used - case "requests": - client.requests = *met.Used - hasAllTimeRequests[name] = true - case "sessions": - client.sessions = *met.Used - } - usedKeys[key] = true - continue - } - if strings.HasPrefix(key, "source_") { - sourceName, field, ok := parseSourceMetricKey(key) - if !ok { - continue - } - clientName := canonicalClientBucket(sourceName) - client := ensure(clientName) - switch field { - case "requests": - client.requests += *met.Used - hasAllTimeRequests[clientName] = true - case "requests_today": - requestsTodayFallback[clientName] += *met.Used - } - usedKeys[key] = true - } - } - for clientName, value := range requestsTodayFallback { - if hasAllTimeRequests[clientName] { - continue - } - client := ensure(clientName) - if client.requests <= 0 { - client.requests = value - } - } - hasAnyClientSeries := false - for key := range snap.DailySeries { - if strings.HasPrefix(key, "tokens_client_") || strings.HasPrefix(key, "usage_client_") { - hasAnyClientSeries = true - break - } - } - - for key, points := range snap.DailySeries { - if len(points) == 0 { - continue - } - - switch { - case strings.HasPrefix(key, "tokens_client_"): - name := canonicalClientBucket(strings.TrimPrefix(key, "tokens_client_")) - if name == "" { - continue - } - mergeSeriesByDay(tokenSeriesByClient, name, points) - case strings.HasPrefix(key, "usage_client_"): - name := canonicalClientBucket(strings.TrimPrefix(key, "usage_client_")) - if name == "" { - continue - } - mergeSeriesByDay(usageClientSeriesByClient, name, points) - case strings.HasPrefix(key, "usage_source_"): - if hasAnyClientMetrics || hasAnyClientSeries { - continue - } - name := canonicalClientBucket(strings.TrimPrefix(key, "usage_source_")) - if name == "" { - continue - } - mergeSeriesByDay(usageSourceSeriesByClient, name, points) - default: - continue - } - } - - for name, pointsByDay := range tokenSeriesByClient { - client := ensure(name) - client.series = sortedSeriesFromByDay(pointsByDay) - client.seriesKind = "tokens" - if client.total <= 0 { - client.total = sumSeriesValues(client.series) - } - } - for name, pointsByDay := range usageClientSeriesByClient { - client := ensure(name) - if client.seriesKind == "tokens" { - continue - } - client.series = sortedSeriesFromByDay(pointsByDay) - client.seriesKind = "requests" - if client.requests <= 0 { - client.requests = sumSeriesValues(client.series) - } - } - for name, pointsByDay := range usageSourceSeriesByClient { - client := ensure(name) - if client.seriesKind != "" { - continue - } - client.series = sortedSeriesFromByDay(pointsByDay) - client.seriesKind = "requests" - if client.requests <= 0 { - client.requests = sumSeriesValues(client.series) - } - } - - clients := make([]clientMixEntry, 0, len(byClient)) - for _, client := range byClient { - if clientMixValue(*client) <= 0 && client.sessions <= 0 && client.requests <= 0 && len(client.series) == 0 { - continue - } - clients = append(clients, *client) + entries, usedKeys := core.ExtractClientBreakdown(snap) + clients := make([]clientMixEntry, 0, len(entries)) + for _, entry := range entries { + clients = append(clients, clientMixEntry{ + name: entry.Name, + total: entry.Total, + input: entry.Input, + output: entry.Output, + cached: entry.Cached, + reasoning: entry.Reasoning, + requests: entry.Requests, + sessions: entry.Sessions, + seriesKind: entry.SeriesKind, + series: entry.Series, + }) } - - sort.Slice(clients, func(i, j int) bool { - vi := clientTokenValue(clients[i]) - vj := clientTokenValue(clients[j]) - if vi == vj { - if clients[i].requests == clients[j].requests { - if clients[i].sessions == clients[j].sessions { - return clients[i].name < clients[j].name - } - return clients[i].sessions > clients[j].sessions - } - return clients[i].requests > clients[j].requests - } - return vi > vj - }) - return clients, usedKeys } -func parseClientMetricKey(key string) (name, field string, ok bool) { - const prefix = "client_" - if !strings.HasPrefix(key, prefix) { - return "", "", false - } - rest := strings.TrimPrefix(key, prefix) - for _, suffix := range []string{ - "_total_tokens", "_input_tokens", "_output_tokens", - "_cached_tokens", "_reasoning_tokens", "_requests", "_sessions", - } { - if strings.HasSuffix(rest, suffix) { - return strings.TrimSuffix(rest, suffix), strings.TrimPrefix(suffix, "_"), true - } - } - return "", "", false -} - func clientTokenValue(client clientMixEntry) float64 { if client.total > 0 { return client.total From dbf3520be82872543e2db9b9574b0b6c9a8d194d Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 12:55:24 +0100 Subject: [PATCH 08/32] refactor: split telemetry snapshot projection --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 3 +- internal/telemetry/usage_view.go | 395 ------------------ internal/telemetry/usage_view_projection.go | 351 ++++++++++++++++ 3 files changed, 353 insertions(+), 396 deletions(-) create mode 100644 internal/telemetry/usage_view_projection.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index cd1dd9b..ab5cdb9 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -39,6 +39,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | R19 | Fixed | Daemon loop decomposition | `internal/daemon/server.go`, `internal/daemon/server_loops.go` | Collection, spool, hook-spool, retention, and poll loops no longer live inline in the main daemon server file. | Continue splitting by loop family if the new file grows too large. | | R20 | Fixed | TUI model/client/provider parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/tiles_composition.go` | Model, client, provider, upstream-provider, and interface-client aggregation/parsing now live in shared core extractors, leaving the TUI composition layer as a thin adapter over typed breakdown entries. | The remaining TUI parsing drift is now mostly in analytics/detail-specific sections rather than the main composition bars. | | R21 | Fixed | OpenRouter provider-resolution split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go` | Hosting-provider resolution, BYOK cost inference, and provider-name heuristics moved out of the main OpenRouter provider file into a dedicated helper unit. | Continue splitting analytics/generation pagination/projection concerns. | +| R22 | Fixed | Telemetry snapshot projection split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_projection.go` | Snapshot projection, stale-metric cleanup, daily-series projection, and windowed metric emission moved out of the main usage-view file into a dedicated projection unit. | Continue with the same split for the SQL/query layer. | ## Action Table @@ -49,7 +50,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go:663-729`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go` | The main composition bars now consume shared extractors, but analytics/detail-specific sections still decode some raw metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go` | `openrouter.go` is smaller after the provider-resolution split, but it still mixes auth probing, credits, keys, analytics parsing, generation pagination, metadata enrichment, and output projection in one large file. | Continue splitting into `api_client`, `analytics`, `generations`, `projection`, and `types` units. | Easier maintenance, smaller diff surface, faster targeted testing. | | A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | -| A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go` | `usage_view.go` is smaller now, but it is still simultaneously query planner, SQL execution layer, aggregation engine, naming normalizer, and snapshot projection layer. | Split into `query_*`, `aggregate_*`, `projection_*`, and `mcp_*` units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | +| A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go` | `usage_view.go` is materially smaller after the helper/projection split, but it still mixes query planning, SQL execution, aggregation orchestration, and query-specific shaping logic. | Continue splitting into `query_*`, `aggregate_*`, and remaining orchestration units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A7 | P2 | Daemon service monolith | `internal/daemon/server.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_loops.go` | The daemon is materially less coupled after the logging/cache/http/loop split, but polling, collection, retention, and spool maintenance still share one large runtime helper unit. | Continue splitting the loop-heavy runtime into `polling`, `collection`, `retention`, and `spool` units. | Lower mental load and easier concurrency review. | | A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Cursor’s main time-sensitive path now uses an injectable clock, but several other providers and analytics helpers still read `time.Now()` directly, often mixing local time and UTC. | Extend the clock abstraction to the remaining provider and analytics subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | diff --git a/internal/telemetry/usage_view.go b/internal/telemetry/usage_view.go index 93532a6..6661508 100644 --- a/internal/telemetry/usage_view.go +++ b/internal/telemetry/usage_view.go @@ -237,302 +237,6 @@ func applyCanonicalUsageViewWithDB( return out, nil } -func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, timeWindow core.TimeWindow) { - if snap == nil || agg == nil { - return - } - authoritativeCost := usageAuthoritativeCost(*snap) - windowLabel := string(timeWindow) - snap.EnsureMaps() - if snap.DailySeries == nil { - snap.DailySeries = make(map[string][]core.TimePoint) - } - - // Save API-sourced model cost metrics (billing-cycle) before cleanup. - // These will be restored if telemetry events lack sufficient cost attribution. - savedAPIModelCosts := make(map[string]core.Metric) - for key, m := range snap.Metrics { - if strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cost") && m.Window == "billing-cycle" { - savedAPIModelCosts[key] = m - } - } - - metricsBefore := len(snap.Metrics) - _, hadFiveHourBefore := snap.Metrics["usage_five_hour"] - stripAllTime := timeWindow != "" && timeWindow != "all" - deletedCount := 0 - for key, m := range snap.Metrics { - if strings.HasPrefix(key, "source_") || - strings.HasPrefix(key, "client_") || - strings.HasPrefix(key, "tool_") || - strings.HasPrefix(key, "model_") || - strings.HasPrefix(key, "project_") || - strings.HasPrefix(key, "provider_") || - strings.HasPrefix(key, "lang_") || - strings.HasPrefix(key, "interface_") || - isStaleActivityMetric(key) { - delete(snap.Metrics, key) - deletedCount++ - } else if stripAllTime && m.Window == "all-time" && !isCurrentStateMetric(key) { - // Strip cumulative "all-time" metrics from the limit_snapshot so the - // TUI doesn't show misleading all-time counts under a windowed badge. - // Current-state metrics (plan quotas, billing, team) are preserved. - delete(snap.Metrics, key) - deletedCount++ - } - } - _, hasFiveHourAfter := snap.Metrics["usage_five_hour"] - core.Tracef("[usage_view] %s: cleanup deleted %d/%d metrics, usage_five_hour before=%v after=%v", - snap.ProviderID, deletedCount, metricsBefore, hadFiveHourBefore, hasFiveHourAfter) - telemetryPrefixes := []string{"source_", "client_", "tool_", "model_", "project_", "provider_", "usage_", "analytics_"} - extendedPrefixes := append(telemetryPrefixes, "lang_", "jsonl_") - deleteByPrefixes(snap.Raw, extendedPrefixes) - deleteByPrefixes(snap.Attributes, telemetryPrefixes) - deleteByPrefixes(snap.Diagnostics, telemetryPrefixes) - for key := range snap.DailySeries { - if strings.HasPrefix(key, "usage_model_") || - strings.HasPrefix(key, "usage_source_") || - strings.HasPrefix(key, "usage_project_") || - strings.HasPrefix(key, "usage_client_") || - strings.HasPrefix(key, "tokens_client_") || - key == "analytics_cost" || - key == "analytics_requests" || - key == "analytics_tokens" { - delete(snap.DailySeries, key) - } - } - - // Replace stale template ModelUsage with time-windowed records from - // telemetry. The template's ModelUsage represents the full billing cycle - // and would be misleading for shorter time windows. - snap.ModelUsage = nil - modelCostTotal := 0.0 - for _, model := range agg.Models { - mk := sanitizeMetricID(model.Model) - snap.Metrics["model_"+mk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(model.InputTokens), Unit: "tokens", Window: windowLabel} - snap.Metrics["model_"+mk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(model.OutputTokens), Unit: "tokens", Window: windowLabel} - snap.Metrics["model_"+mk+"_cached_tokens"] = core.Metric{Used: core.Float64Ptr(model.CachedTokens), Unit: "tokens", Window: windowLabel} - snap.Metrics["model_"+mk+"_reasoning_tokens"] = core.Metric{Used: core.Float64Ptr(model.Reasoning), Unit: "tokens", Window: windowLabel} - snap.Metrics["model_"+mk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(model.CostUSD), Unit: "USD", Window: windowLabel} - snap.Metrics["model_"+mk+"_requests"] = core.Metric{Used: core.Float64Ptr(model.Requests), Unit: "requests", Window: windowLabel} - snap.Metrics["model_"+mk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(model.Requests1d), Unit: "requests", Window: "1d"} - modelCostTotal += model.CostUSD - snap.ModelUsage = append(snap.ModelUsage, core.ModelUsageRecord{ - RawModelID: model.Model, - RawSource: "telemetry", - Window: windowLabel, - InputTokens: core.Float64Ptr(model.InputTokens), - OutputTokens: core.Float64Ptr(model.OutputTokens), - CachedTokens: core.Float64Ptr(model.CachedTokens), - ReasoningTokens: core.Float64Ptr(model.Reasoning), - TotalTokens: core.Float64Ptr(model.TotalTokens), - CostUSD: core.Float64Ptr(model.CostUSD), - Requests: core.Float64Ptr(model.Requests), - }) - } - // When telemetry events lack cost attribution but the provider's API - // supplied per-model cost data (e.g. Cursor's GetAggregatedUsageEvents), - // restore the API model costs so the Model Burn section shows the real - // per-model breakdown instead of a single "unattributed" entry. - telemetryCostInsufficient := authoritativeCost > 0 && modelCostTotal < authoritativeCost*0.1 - if telemetryCostInsufficient && len(savedAPIModelCosts) > 0 { - for key, m := range savedAPIModelCosts { - snap.Metrics[key] = m - } - core.Tracef("[usage_view] %s: restored %d API model cost metrics (telemetry cost %.2f << authoritative %.2f)", - snap.ProviderID, len(savedAPIModelCosts), modelCostTotal, authoritativeCost) - } else if len(agg.Models) > 0 { - // Only compute unattributed model cost when telemetry has meaningful - // cost data. When agg.Models is empty (no events in window), the - // authoritativeCost represents the full billing cycle — attributing - // it as "unattributed" would be misleading for the selected time range. - if delta := authoritativeCost - modelCostTotal; authoritativeCost > 0 && delta > 0.000001 { - uk := "model_unattributed" - snap.Metrics[uk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(delta), Unit: "USD", Window: windowLabel} - snap.SetDiagnostic("telemetry_unattributed_model_cost_usd", fmt.Sprintf("%.6f", delta)) - } - } - - if !strings.EqualFold(strings.TrimSpace(snap.ProviderID), "codex") { - providerCostTotal := 0.0 - for _, provider := range agg.Providers { - pk := sanitizeMetricID(provider.Provider) - snap.Metrics["provider_"+pk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(provider.CostUSD), Unit: "USD", Window: windowLabel} - snap.Metrics["provider_"+pk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(provider.Input), Unit: "tokens", Window: windowLabel} - snap.Metrics["provider_"+pk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(provider.Output), Unit: "tokens", Window: windowLabel} - snap.Metrics["provider_"+pk+"_requests"] = core.Metric{Used: core.Float64Ptr(provider.Requests), Unit: "requests", Window: windowLabel} - providerCostTotal += provider.CostUSD - } - if delta := authoritativeCost - providerCostTotal; authoritativeCost > 0 && delta > 0.000001 { - uk := "provider_unattributed" - snap.Metrics[uk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(delta), Unit: "USD", Window: windowLabel} - snap.SetDiagnostic("telemetry_unattributed_provider_cost_usd", fmt.Sprintf("%.6f", delta)) - } - } - - for _, source := range agg.Sources { - sk := sanitizeMetricID(source.Source) - // Only emit source_*_requests_today (used by TUI's today-fallback path). - // source_*_requests is intentionally omitted: client_*_requests covers the - // same data, and emitting both causes the TUI to double-count requests due - // to Go's random map iteration order. - snap.Metrics["source_"+sk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(source.Requests1d), Unit: "requests", Window: "1d"} - - snap.Metrics["client_"+sk+"_total_tokens"] = core.Metric{Used: core.Float64Ptr(source.Tokens), Unit: "tokens", Window: windowLabel} - snap.Metrics["client_"+sk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(source.Input), Unit: "tokens", Window: windowLabel} - snap.Metrics["client_"+sk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(source.Output), Unit: "tokens", Window: windowLabel} - snap.Metrics["client_"+sk+"_cached_tokens"] = core.Metric{Used: core.Float64Ptr(source.Cached), Unit: "tokens", Window: windowLabel} - snap.Metrics["client_"+sk+"_reasoning_tokens"] = core.Metric{Used: core.Float64Ptr(source.Reasoning), Unit: "tokens", Window: windowLabel} - snap.Metrics["client_"+sk+"_requests"] = core.Metric{Used: core.Float64Ptr(source.Requests), Unit: "requests", Window: windowLabel} - snap.Metrics["client_"+sk+"_sessions"] = core.Metric{Used: core.Float64Ptr(source.Sessions), Unit: "sessions", Window: windowLabel} - } - for _, project := range agg.Projects { - pk := sanitizeMetricID(project.Project) - if pk == "" { - continue - } - snap.Metrics["project_"+pk+"_requests"] = core.Metric{Used: core.Float64Ptr(project.Requests), Unit: "requests", Window: windowLabel} - snap.Metrics["project_"+pk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(project.Requests1d), Unit: "requests", Window: "1d"} - } - - var totalToolCalls float64 - var totalToolCallsOK float64 - var totalToolCallsError float64 - var totalToolCallsAborted float64 - for _, tool := range agg.Tools { - tk := sanitizeMetricID(tool.Tool) - snap.Metrics["tool_"+tk] = core.Metric{Used: core.Float64Ptr(tool.Calls), Unit: "calls", Window: windowLabel} - snap.Metrics["tool_"+tk+"_today"] = core.Metric{Used: core.Float64Ptr(tool.Calls1d), Unit: "calls", Window: "1d"} - totalToolCalls += tool.Calls - totalToolCallsOK += tool.CallsOK - totalToolCallsError += tool.CallsError - totalToolCallsAborted += tool.CallsAborted - } - if totalToolCalls > 0 { - snap.Metrics["tool_calls_total"] = core.Metric{Used: core.Float64Ptr(totalToolCalls), Unit: "calls", Window: windowLabel} - snap.Metrics["tool_completed"] = core.Metric{Used: core.Float64Ptr(totalToolCallsOK), Unit: "calls", Window: windowLabel} - snap.Metrics["tool_errored"] = core.Metric{Used: core.Float64Ptr(totalToolCallsError), Unit: "calls", Window: windowLabel} - snap.Metrics["tool_cancelled"] = core.Metric{Used: core.Float64Ptr(totalToolCallsAborted), Unit: "calls", Window: windowLabel} - successRate := 0.0 - if totalToolCalls > 0 { - successRate = (totalToolCallsOK / totalToolCalls) * 100 - } - snap.Metrics["tool_success_rate"] = core.Metric{Used: core.Float64Ptr(successRate), Unit: "%", Window: windowLabel} - } - - // MCP server metrics. - var mcpTotalCalls, mcpTotalCalls1d float64 - for _, srv := range agg.MCPServers { - sk := sanitizeMetricID(srv.Server) - snap.Metrics["mcp_"+sk+"_total"] = core.Metric{Used: core.Float64Ptr(srv.Calls), Unit: "calls", Window: windowLabel} - snap.Metrics["mcp_"+sk+"_total_today"] = core.Metric{Used: core.Float64Ptr(srv.Calls1d), Unit: "calls", Window: "1d"} - mcpTotalCalls += srv.Calls - mcpTotalCalls1d += srv.Calls1d - for _, fn := range srv.Functions { - fk := sanitizeMetricID(fn.Function) - snap.Metrics["mcp_"+sk+"_"+fk] = core.Metric{Used: core.Float64Ptr(fn.Calls), Unit: "calls", Window: windowLabel} - } - } - if mcpTotalCalls > 0 { - snap.Metrics["mcp_calls_total"] = core.Metric{Used: core.Float64Ptr(mcpTotalCalls), Unit: "calls", Window: windowLabel} - snap.Metrics["mcp_calls_total_today"] = core.Metric{Used: core.Float64Ptr(mcpTotalCalls1d), Unit: "calls", Window: "1d"} - snap.Metrics["mcp_servers_active"] = core.Metric{Used: core.Float64Ptr(float64(len(agg.MCPServers))), Unit: "servers", Window: windowLabel} - } - - for _, lang := range agg.Languages { - lk := sanitizeMetricID(lang.Language) - snap.Metrics["lang_"+lk] = core.Metric{Used: core.Float64Ptr(lang.Requests), Unit: "requests", Window: windowLabel} - } - - // Emit windowed activity metrics. - act := agg.Activity - if act.Messages > 0 { - snap.Metrics["messages_today"] = core.Metric{Used: core.Float64Ptr(act.Messages), Unit: "messages", Window: windowLabel} - } - if act.Sessions > 0 { - snap.Metrics["sessions_today"] = core.Metric{Used: core.Float64Ptr(act.Sessions), Unit: "sessions", Window: windowLabel} - } - if act.ToolCalls > 0 { - snap.Metrics["tool_calls_today"] = core.Metric{Used: core.Float64Ptr(act.ToolCalls), Unit: "calls", Window: windowLabel} - snap.Metrics["7d_tool_calls"] = core.Metric{Used: core.Float64Ptr(act.ToolCalls), Unit: "calls", Window: windowLabel} - } - if act.InputTokens > 0 { - snap.Metrics["today_input_tokens"] = core.Metric{Used: core.Float64Ptr(act.InputTokens), Unit: "tokens", Window: windowLabel} - } - if act.OutputTokens > 0 { - snap.Metrics["today_output_tokens"] = core.Metric{Used: core.Float64Ptr(act.OutputTokens), Unit: "tokens", Window: windowLabel} - } - if act.TotalCost > 0 { - snap.Metrics["today_api_cost"] = core.Metric{Used: core.Float64Ptr(act.TotalCost), Unit: "USD", Window: windowLabel} - } - - // Emit windowed code stats. - cs := agg.CodeStats - if cs.FilesChanged > 0 { - snap.Metrics["composer_files_changed"] = core.Metric{Used: core.Float64Ptr(cs.FilesChanged), Unit: "files", Window: windowLabel} - } - if cs.LinesAdded > 0 { - snap.Metrics["composer_lines_added"] = core.Metric{Used: core.Float64Ptr(cs.LinesAdded), Unit: "lines", Window: windowLabel} - } - if cs.LinesRemoved > 0 { - snap.Metrics["composer_lines_removed"] = core.Metric{Used: core.Float64Ptr(cs.LinesRemoved), Unit: "lines", Window: windowLabel} - } - - // Emit window-level aggregate metrics for the TUI header/tile display. - var windowRequests, windowCost, windowTokens float64 - for _, model := range agg.Models { - windowRequests += model.Requests - windowCost += model.CostUSD - windowTokens += model.TotalTokens - } - if windowRequests > 0 { - snap.Metrics["window_requests"] = core.Metric{Used: core.Float64Ptr(windowRequests), Unit: "requests", Window: windowLabel} - } - if windowCost > 0 { - snap.Metrics["window_cost"] = core.Metric{Used: core.Float64Ptr(windowCost), Unit: "USD", Window: windowLabel} - } - if windowTokens > 0 { - snap.Metrics["window_tokens"] = core.Metric{Used: core.Float64Ptr(windowTokens), Unit: "tokens", Window: windowLabel} - } - - snap.DailySeries["analytics_cost"] = pointsFromDaily(agg.Daily, func(v telemetryDayPoint) float64 { return v.CostUSD }) - snap.DailySeries["analytics_requests"] = pointsFromDaily(agg.Daily, func(v telemetryDayPoint) float64 { return v.Requests }) - snap.DailySeries["analytics_tokens"] = pointsFromDaily(agg.Daily, func(v telemetryDayPoint) float64 { return v.Tokens }) - // Fixed-window cost metrics (7d_api_cost, 5h_block_cost, all_time_api_cost, - // usage_daily, usage_weekly) are preserved from the provider template — they - // come from the provider's Fetch() with real API data. We do NOT re-emit - // them here because agg.Daily is already filtered to the selected time - // window, so usageCostWindowsUTC would produce incorrect values (e.g. - // "7d cost" would equal "3d cost" when the user picks a 3-day window). - - for model, series := range agg.ModelDaily { - snap.DailySeries["usage_model_"+sanitizeMetricID(model)] = series - } - for source, series := range agg.SourceDaily { - snap.DailySeries["usage_source_"+sanitizeMetricID(source)] = series - } - for project, series := range agg.ProjectDaily { - snap.DailySeries["usage_project_"+sanitizeMetricID(project)] = series - } - for client, series := range agg.ClientDaily { - snap.DailySeries["usage_client_"+sanitizeMetricID(client)] = series - } - for client, series := range agg.ClientTokens { - snap.DailySeries["tokens_client_"+sanitizeMetricID(client)] = series - } - - snap.SetAttribute("telemetry_view", "canonical") - snap.SetAttribute("telemetry_source_of_truth", "canonical_usage_events") - snap.SetAttribute("telemetry_last_event_at", agg.LastOccurred) - if strings.TrimSpace(agg.Scope) != "" { - snap.SetAttribute("telemetry_scope", agg.Scope) - } - if strings.TrimSpace(agg.AccountID) != "" { - snap.SetAttribute("telemetry_scope_account_id", agg.AccountID) - } - snap.SetDiagnostic("telemetry_event_count", fmt.Sprintf("%d", agg.EventCount)) -} - // queryTelemetryActiveProviders returns the set of provider IDs that have at least // one telemetry event in the database, regardless of time window. This is used to // distinguish providers that have a telemetry adapter (but may have no events in the @@ -1536,103 +1240,4 @@ func normalizeProviderIDs(in []string) []string { return result } -func pointsFromDaily(in []telemetryDayPoint, pick func(telemetryDayPoint) float64) []core.TimePoint { - return lo.Map(in, func(row telemetryDayPoint, _ int) core.TimePoint { - return core.TimePoint{Date: row.Day, Value: pick(row)} - }) -} - -// isStaleActivityMetric returns true for metrics that are computed by the provider -// with hardcoded time windows (today/7d/all-time) and should be replaced by -// telemetry-windowed equivalents. -func isStaleActivityMetric(key string) bool { - // Activity counters with hardcoded time windows. - switch key { - case "messages_today", "sessions_today", "tool_calls_today", - "7d_tool_calls", "all_time_tool_calls", "tool_calls_total", - "tool_completed", "tool_errored", "tool_cancelled", "tool_success_rate", - "today_input_tokens", "today_output_tokens", - "7d_input_tokens", "7d_output_tokens", - "all_time_input_tokens", "all_time_output_tokens", - "all_time_cache_read_tokens", "all_time_cache_create_tokens", - "all_time_cache_create_5m_tokens", "all_time_cache_create_1h_tokens", - "all_time_reasoning_tokens", - "today_api_cost", - "burn_rate", - "composer_lines_added", "composer_lines_removed", - "composer_files_changed": - return true - } - // Fixed-window cost metrics from provider Fetch() are preserved — - // the telemetry view does NOT re-emit them (it only has windowed data). - switch key { - case "7d_api_cost", "all_time_api_cost", "5h_block_cost": - return false - } - // Prefixed tokens/cost metrics from providers. - if strings.HasPrefix(key, "tokens_today_") || - strings.HasPrefix(key, "input_tokens_") || - strings.HasPrefix(key, "output_tokens_") || - strings.HasPrefix(key, "today_") || - strings.HasPrefix(key, "7d_") || - strings.HasPrefix(key, "all_time_") || - strings.HasPrefix(key, "5h_block_") || - strings.HasPrefix(key, "project_") || - strings.HasPrefix(key, "agent_") { - return true - } - return false -} - -// isCurrentStateMetric returns true for metrics that represent the current state -// of a plan, billing cycle, or team — values that are always "latest" regardless -// of time window. These are preserved even when stripping all-time cumulative -// metrics for windowed views. -func isCurrentStateMetric(key string) bool { - if strings.HasPrefix(key, "plan_") || - strings.HasPrefix(key, "billing_") || - strings.HasPrefix(key, "team_") || - strings.HasPrefix(key, "spend_") || - strings.HasPrefix(key, "individual_") { - return true - } - switch key { - case "today_cost", - "7d_api_cost", "all_time_api_cost", "5h_block_cost", - "usage_daily", "usage_weekly", "usage_five_hour": - return true - } - return false -} - -func usageAuthoritativeCost(snap core.UsageSnapshot) float64 { - if m, ok := snap.Metrics["credit_balance"]; ok && m.Used != nil && *m.Used > 0 { - return *m.Used - } - if m, ok := snap.Metrics["spend_limit"]; ok && m.Used != nil && *m.Used > 0 { - return *m.Used - } - if m, ok := snap.Metrics["plan_total_spend_usd"]; ok && m.Used != nil && *m.Used > 0 { - return *m.Used - } - if m, ok := snap.Metrics["credits"]; ok && m.Used != nil && *m.Used > 0 { - return *m.Used - } - return 0 -} - -func sortedSeriesFromByDay(byDay map[string]float64) []core.TimePoint { - days := lo.Keys(byDay) - sort.Strings(days) - - out := make([]core.TimePoint, 0, len(days)) - for _, day := range days { - out = append(out, core.TimePoint{ - Date: day, - Value: byDay[day], - }) - } - return out -} - // parseMCPToolName extracts server and function from an MCP tool name. diff --git a/internal/telemetry/usage_view_projection.go b/internal/telemetry/usage_view_projection.go new file mode 100644 index 0000000..d73871b --- /dev/null +++ b/internal/telemetry/usage_view_projection.go @@ -0,0 +1,351 @@ +package telemetry + +import ( + "fmt" + "sort" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/samber/lo" +) + +func applyUsageViewToSnapshot(snap *core.UsageSnapshot, agg *telemetryUsageAgg, timeWindow core.TimeWindow) { + if snap == nil || agg == nil { + return + } + authoritativeCost := usageAuthoritativeCost(*snap) + windowLabel := string(timeWindow) + snap.EnsureMaps() + if snap.DailySeries == nil { + snap.DailySeries = make(map[string][]core.TimePoint) + } + + savedAPIModelCosts := make(map[string]core.Metric) + for key, metric := range snap.Metrics { + if strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cost") && metric.Window == "billing-cycle" { + savedAPIModelCosts[key] = metric + } + } + + metricsBefore := len(snap.Metrics) + _, hadFiveHourBefore := snap.Metrics["usage_five_hour"] + stripAllTime := timeWindow != "" && timeWindow != "all" + deletedCount := 0 + for key, metric := range snap.Metrics { + if strings.HasPrefix(key, "source_") || + strings.HasPrefix(key, "client_") || + strings.HasPrefix(key, "tool_") || + strings.HasPrefix(key, "model_") || + strings.HasPrefix(key, "project_") || + strings.HasPrefix(key, "provider_") || + strings.HasPrefix(key, "lang_") || + strings.HasPrefix(key, "interface_") || + isStaleActivityMetric(key) { + delete(snap.Metrics, key) + deletedCount++ + } else if stripAllTime && metric.Window == "all-time" && !isCurrentStateMetric(key) { + delete(snap.Metrics, key) + deletedCount++ + } + } + _, hasFiveHourAfter := snap.Metrics["usage_five_hour"] + core.Tracef("[usage_view] %s: cleanup deleted %d/%d metrics, usage_five_hour before=%v after=%v", + snap.ProviderID, deletedCount, metricsBefore, hadFiveHourBefore, hasFiveHourAfter) + telemetryPrefixes := []string{"source_", "client_", "tool_", "model_", "project_", "provider_", "usage_", "analytics_"} + extendedPrefixes := append(telemetryPrefixes, "lang_", "jsonl_") + deleteByPrefixes(snap.Raw, extendedPrefixes) + deleteByPrefixes(snap.Attributes, telemetryPrefixes) + deleteByPrefixes(snap.Diagnostics, telemetryPrefixes) + for key := range snap.DailySeries { + if strings.HasPrefix(key, "usage_model_") || + strings.HasPrefix(key, "usage_source_") || + strings.HasPrefix(key, "usage_project_") || + strings.HasPrefix(key, "usage_client_") || + strings.HasPrefix(key, "tokens_client_") || + key == "analytics_cost" || + key == "analytics_requests" || + key == "analytics_tokens" { + delete(snap.DailySeries, key) + } + } + + snap.ModelUsage = nil + modelCostTotal := 0.0 + for _, model := range agg.Models { + mk := sanitizeMetricID(model.Model) + snap.Metrics["model_"+mk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(model.InputTokens), Unit: "tokens", Window: windowLabel} + snap.Metrics["model_"+mk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(model.OutputTokens), Unit: "tokens", Window: windowLabel} + snap.Metrics["model_"+mk+"_cached_tokens"] = core.Metric{Used: core.Float64Ptr(model.CachedTokens), Unit: "tokens", Window: windowLabel} + snap.Metrics["model_"+mk+"_reasoning_tokens"] = core.Metric{Used: core.Float64Ptr(model.Reasoning), Unit: "tokens", Window: windowLabel} + snap.Metrics["model_"+mk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(model.CostUSD), Unit: "USD", Window: windowLabel} + snap.Metrics["model_"+mk+"_requests"] = core.Metric{Used: core.Float64Ptr(model.Requests), Unit: "requests", Window: windowLabel} + snap.Metrics["model_"+mk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(model.Requests1d), Unit: "requests", Window: "1d"} + modelCostTotal += model.CostUSD + snap.ModelUsage = append(snap.ModelUsage, core.ModelUsageRecord{ + RawModelID: model.Model, + RawSource: "telemetry", + Window: windowLabel, + InputTokens: core.Float64Ptr(model.InputTokens), + OutputTokens: core.Float64Ptr(model.OutputTokens), + CachedTokens: core.Float64Ptr(model.CachedTokens), + ReasoningTokens: core.Float64Ptr(model.Reasoning), + TotalTokens: core.Float64Ptr(model.TotalTokens), + CostUSD: core.Float64Ptr(model.CostUSD), + Requests: core.Float64Ptr(model.Requests), + }) + } + telemetryCostInsufficient := authoritativeCost > 0 && modelCostTotal < authoritativeCost*0.1 + if telemetryCostInsufficient && len(savedAPIModelCosts) > 0 { + for key, metric := range savedAPIModelCosts { + snap.Metrics[key] = metric + } + core.Tracef("[usage_view] %s: restored %d API model cost metrics (telemetry cost %.2f << authoritative %.2f)", + snap.ProviderID, len(savedAPIModelCosts), modelCostTotal, authoritativeCost) + } else if len(agg.Models) > 0 { + if delta := authoritativeCost - modelCostTotal; authoritativeCost > 0 && delta > 0.000001 { + snap.Metrics["model_unattributed_cost_usd"] = core.Metric{Used: core.Float64Ptr(delta), Unit: "USD", Window: windowLabel} + snap.SetDiagnostic("telemetry_unattributed_model_cost_usd", fmt.Sprintf("%.6f", delta)) + } + } + + if !strings.EqualFold(strings.TrimSpace(snap.ProviderID), "codex") { + providerCostTotal := 0.0 + for _, provider := range agg.Providers { + pk := sanitizeMetricID(provider.Provider) + snap.Metrics["provider_"+pk+"_cost_usd"] = core.Metric{Used: core.Float64Ptr(provider.CostUSD), Unit: "USD", Window: windowLabel} + snap.Metrics["provider_"+pk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(provider.Input), Unit: "tokens", Window: windowLabel} + snap.Metrics["provider_"+pk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(provider.Output), Unit: "tokens", Window: windowLabel} + snap.Metrics["provider_"+pk+"_requests"] = core.Metric{Used: core.Float64Ptr(provider.Requests), Unit: "requests", Window: windowLabel} + providerCostTotal += provider.CostUSD + } + if delta := authoritativeCost - providerCostTotal; authoritativeCost > 0 && delta > 0.000001 { + snap.Metrics["provider_unattributed_cost_usd"] = core.Metric{Used: core.Float64Ptr(delta), Unit: "USD", Window: windowLabel} + snap.SetDiagnostic("telemetry_unattributed_provider_cost_usd", fmt.Sprintf("%.6f", delta)) + } + } + + for _, source := range agg.Sources { + sk := sanitizeMetricID(source.Source) + snap.Metrics["source_"+sk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(source.Requests1d), Unit: "requests", Window: "1d"} + snap.Metrics["client_"+sk+"_total_tokens"] = core.Metric{Used: core.Float64Ptr(source.Tokens), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_input_tokens"] = core.Metric{Used: core.Float64Ptr(source.Input), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_output_tokens"] = core.Metric{Used: core.Float64Ptr(source.Output), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_cached_tokens"] = core.Metric{Used: core.Float64Ptr(source.Cached), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_reasoning_tokens"] = core.Metric{Used: core.Float64Ptr(source.Reasoning), Unit: "tokens", Window: windowLabel} + snap.Metrics["client_"+sk+"_requests"] = core.Metric{Used: core.Float64Ptr(source.Requests), Unit: "requests", Window: windowLabel} + snap.Metrics["client_"+sk+"_sessions"] = core.Metric{Used: core.Float64Ptr(source.Sessions), Unit: "sessions", Window: windowLabel} + } + for _, project := range agg.Projects { + pk := sanitizeMetricID(project.Project) + if pk == "" { + continue + } + snap.Metrics["project_"+pk+"_requests"] = core.Metric{Used: core.Float64Ptr(project.Requests), Unit: "requests", Window: windowLabel} + snap.Metrics["project_"+pk+"_requests_today"] = core.Metric{Used: core.Float64Ptr(project.Requests1d), Unit: "requests", Window: "1d"} + } + + var totalToolCalls, totalToolCallsOK, totalToolCallsError, totalToolCallsAborted float64 + for _, tool := range agg.Tools { + tk := sanitizeMetricID(tool.Tool) + snap.Metrics["tool_"+tk] = core.Metric{Used: core.Float64Ptr(tool.Calls), Unit: "calls", Window: windowLabel} + snap.Metrics["tool_"+tk+"_today"] = core.Metric{Used: core.Float64Ptr(tool.Calls1d), Unit: "calls", Window: "1d"} + totalToolCalls += tool.Calls + totalToolCallsOK += tool.CallsOK + totalToolCallsError += tool.CallsError + totalToolCallsAborted += tool.CallsAborted + } + if totalToolCalls > 0 { + snap.Metrics["tool_calls_total"] = core.Metric{Used: core.Float64Ptr(totalToolCalls), Unit: "calls", Window: windowLabel} + snap.Metrics["tool_completed"] = core.Metric{Used: core.Float64Ptr(totalToolCallsOK), Unit: "calls", Window: windowLabel} + snap.Metrics["tool_errored"] = core.Metric{Used: core.Float64Ptr(totalToolCallsError), Unit: "calls", Window: windowLabel} + snap.Metrics["tool_cancelled"] = core.Metric{Used: core.Float64Ptr(totalToolCallsAborted), Unit: "calls", Window: windowLabel} + successRate := (totalToolCallsOK / totalToolCalls) * 100 + snap.Metrics["tool_success_rate"] = core.Metric{Used: core.Float64Ptr(successRate), Unit: "%", Window: windowLabel} + } + + var mcpTotalCalls, mcpTotalCalls1d float64 + for _, server := range agg.MCPServers { + sk := sanitizeMetricID(server.Server) + snap.Metrics["mcp_"+sk+"_total"] = core.Metric{Used: core.Float64Ptr(server.Calls), Unit: "calls", Window: windowLabel} + snap.Metrics["mcp_"+sk+"_total_today"] = core.Metric{Used: core.Float64Ptr(server.Calls1d), Unit: "calls", Window: "1d"} + mcpTotalCalls += server.Calls + mcpTotalCalls1d += server.Calls1d + for _, function := range server.Functions { + fk := sanitizeMetricID(function.Function) + snap.Metrics["mcp_"+sk+"_"+fk] = core.Metric{Used: core.Float64Ptr(function.Calls), Unit: "calls", Window: windowLabel} + } + } + if mcpTotalCalls > 0 { + snap.Metrics["mcp_calls_total"] = core.Metric{Used: core.Float64Ptr(mcpTotalCalls), Unit: "calls", Window: windowLabel} + snap.Metrics["mcp_calls_total_today"] = core.Metric{Used: core.Float64Ptr(mcpTotalCalls1d), Unit: "calls", Window: "1d"} + snap.Metrics["mcp_servers_active"] = core.Metric{Used: core.Float64Ptr(float64(len(agg.MCPServers))), Unit: "servers", Window: windowLabel} + } + + for _, language := range agg.Languages { + lk := sanitizeMetricID(language.Language) + snap.Metrics["lang_"+lk] = core.Metric{Used: core.Float64Ptr(language.Requests), Unit: "requests", Window: windowLabel} + } + + act := agg.Activity + if act.Messages > 0 { + snap.Metrics["messages_today"] = core.Metric{Used: core.Float64Ptr(act.Messages), Unit: "messages", Window: windowLabel} + } + if act.Sessions > 0 { + snap.Metrics["sessions_today"] = core.Metric{Used: core.Float64Ptr(act.Sessions), Unit: "sessions", Window: windowLabel} + } + if act.ToolCalls > 0 { + snap.Metrics["tool_calls_today"] = core.Metric{Used: core.Float64Ptr(act.ToolCalls), Unit: "calls", Window: windowLabel} + snap.Metrics["7d_tool_calls"] = core.Metric{Used: core.Float64Ptr(act.ToolCalls), Unit: "calls", Window: windowLabel} + } + if act.InputTokens > 0 { + snap.Metrics["today_input_tokens"] = core.Metric{Used: core.Float64Ptr(act.InputTokens), Unit: "tokens", Window: windowLabel} + } + if act.OutputTokens > 0 { + snap.Metrics["today_output_tokens"] = core.Metric{Used: core.Float64Ptr(act.OutputTokens), Unit: "tokens", Window: windowLabel} + } + if act.TotalCost > 0 { + snap.Metrics["today_api_cost"] = core.Metric{Used: core.Float64Ptr(act.TotalCost), Unit: "USD", Window: windowLabel} + } + + codeStats := agg.CodeStats + if codeStats.FilesChanged > 0 { + snap.Metrics["composer_files_changed"] = core.Metric{Used: core.Float64Ptr(codeStats.FilesChanged), Unit: "files", Window: windowLabel} + } + if codeStats.LinesAdded > 0 { + snap.Metrics["composer_lines_added"] = core.Metric{Used: core.Float64Ptr(codeStats.LinesAdded), Unit: "lines", Window: windowLabel} + } + if codeStats.LinesRemoved > 0 { + snap.Metrics["composer_lines_removed"] = core.Metric{Used: core.Float64Ptr(codeStats.LinesRemoved), Unit: "lines", Window: windowLabel} + } + + var windowRequests, windowCost, windowTokens float64 + for _, model := range agg.Models { + windowRequests += model.Requests + windowCost += model.CostUSD + windowTokens += model.TotalTokens + } + if windowRequests > 0 { + snap.Metrics["window_requests"] = core.Metric{Used: core.Float64Ptr(windowRequests), Unit: "requests", Window: windowLabel} + } + if windowCost > 0 { + snap.Metrics["window_cost"] = core.Metric{Used: core.Float64Ptr(windowCost), Unit: "USD", Window: windowLabel} + } + if windowTokens > 0 { + snap.Metrics["window_tokens"] = core.Metric{Used: core.Float64Ptr(windowTokens), Unit: "tokens", Window: windowLabel} + } + + snap.DailySeries["analytics_cost"] = pointsFromDaily(agg.Daily, func(point telemetryDayPoint) float64 { return point.CostUSD }) + snap.DailySeries["analytics_requests"] = pointsFromDaily(agg.Daily, func(point telemetryDayPoint) float64 { return point.Requests }) + snap.DailySeries["analytics_tokens"] = pointsFromDaily(agg.Daily, func(point telemetryDayPoint) float64 { return point.Tokens }) + + for model, series := range agg.ModelDaily { + snap.DailySeries["usage_model_"+sanitizeMetricID(model)] = series + } + for source, series := range agg.SourceDaily { + snap.DailySeries["usage_source_"+sanitizeMetricID(source)] = series + } + for project, series := range agg.ProjectDaily { + snap.DailySeries["usage_project_"+sanitizeMetricID(project)] = series + } + for client, series := range agg.ClientDaily { + snap.DailySeries["usage_client_"+sanitizeMetricID(client)] = series + } + for client, series := range agg.ClientTokens { + snap.DailySeries["tokens_client_"+sanitizeMetricID(client)] = series + } + + snap.SetAttribute("telemetry_view", "canonical") + snap.SetAttribute("telemetry_source_of_truth", "canonical_usage_events") + snap.SetAttribute("telemetry_last_event_at", agg.LastOccurred) + if strings.TrimSpace(agg.Scope) != "" { + snap.SetAttribute("telemetry_scope", agg.Scope) + } + if strings.TrimSpace(agg.AccountID) != "" { + snap.SetAttribute("telemetry_scope_account_id", agg.AccountID) + } + snap.SetDiagnostic("telemetry_event_count", fmt.Sprintf("%d", agg.EventCount)) +} + +func pointsFromDaily(in []telemetryDayPoint, pick func(telemetryDayPoint) float64) []core.TimePoint { + return lo.Map(in, func(row telemetryDayPoint, _ int) core.TimePoint { + return core.TimePoint{Date: row.Day, Value: pick(row)} + }) +} + +func isStaleActivityMetric(key string) bool { + switch key { + case "messages_today", "sessions_today", "tool_calls_today", + "7d_tool_calls", "all_time_tool_calls", "tool_calls_total", + "tool_completed", "tool_errored", "tool_cancelled", "tool_success_rate", + "today_input_tokens", "today_output_tokens", + "7d_input_tokens", "7d_output_tokens", + "all_time_input_tokens", "all_time_output_tokens", + "all_time_cache_read_tokens", "all_time_cache_create_tokens", + "all_time_cache_create_5m_tokens", "all_time_cache_create_1h_tokens", + "all_time_reasoning_tokens", + "today_api_cost", + "burn_rate", + "composer_lines_added", "composer_lines_removed", + "composer_files_changed": + return true + case "7d_api_cost", "all_time_api_cost", "5h_block_cost": + return false + } + if strings.HasPrefix(key, "tokens_today_") || + strings.HasPrefix(key, "input_tokens_") || + strings.HasPrefix(key, "output_tokens_") || + strings.HasPrefix(key, "today_") || + strings.HasPrefix(key, "7d_") || + strings.HasPrefix(key, "all_time_") || + strings.HasPrefix(key, "5h_block_") || + strings.HasPrefix(key, "project_") || + strings.HasPrefix(key, "agent_") { + return true + } + return false +} + +func isCurrentStateMetric(key string) bool { + if strings.HasPrefix(key, "plan_") || + strings.HasPrefix(key, "billing_") || + strings.HasPrefix(key, "team_") || + strings.HasPrefix(key, "spend_") || + strings.HasPrefix(key, "individual_") { + return true + } + switch key { + case "today_cost", "7d_api_cost", "all_time_api_cost", "5h_block_cost", "usage_daily", "usage_weekly", "usage_five_hour": + return true + } + return false +} + +func usageAuthoritativeCost(snap core.UsageSnapshot) float64 { + if metric, ok := snap.Metrics["credit_balance"]; ok && metric.Used != nil && *metric.Used > 0 { + return *metric.Used + } + if metric, ok := snap.Metrics["spend_limit"]; ok && metric.Used != nil && *metric.Used > 0 { + return *metric.Used + } + if metric, ok := snap.Metrics["plan_total_spend_usd"]; ok && metric.Used != nil && *metric.Used > 0 { + return *metric.Used + } + if metric, ok := snap.Metrics["credits"]; ok && metric.Used != nil && *metric.Used > 0 { + return *metric.Used + } + return 0 +} + +func sortedSeriesFromByDay(byDay map[string]float64) []core.TimePoint { + days := lo.Keys(byDay) + sort.Strings(days) + + out := make([]core.TimePoint, 0, len(days)) + for _, day := range days { + out = append(out, core.TimePoint{ + Date: day, + Value: byDay[day], + }) + } + return out +} From 2579f4ac8a02ada2615ea8375752345b208d2c8c Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 13:08:35 +0100 Subject: [PATCH 09/32] refactor: split telemetry queries and openrouter generations --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 9 +- internal/providers/openrouter/generations.go | 659 +++++++++++++++ internal/providers/openrouter/openrouter.go | 678 +-------------- internal/telemetry/usage_view.go | 795 +----------------- internal/telemetry/usage_view_languages.go | 101 +++ internal/telemetry/usage_view_queries.go | 676 +++++++++++++++ 6 files changed, 1455 insertions(+), 1463 deletions(-) create mode 100644 internal/providers/openrouter/generations.go create mode 100644 internal/telemetry/usage_view_languages.go create mode 100644 internal/telemetry/usage_view_queries.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index ab5cdb9..7577aba 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -40,6 +40,9 @@ This table captures every issue found in this pass. It is broad and high-signal, | R20 | Fixed | TUI model/client/provider parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/tiles_composition.go` | Model, client, provider, upstream-provider, and interface-client aggregation/parsing now live in shared core extractors, leaving the TUI composition layer as a thin adapter over typed breakdown entries. | The remaining TUI parsing drift is now mostly in analytics/detail-specific sections rather than the main composition bars. | | R21 | Fixed | OpenRouter provider-resolution split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go` | Hosting-provider resolution, BYOK cost inference, and provider-name heuristics moved out of the main OpenRouter provider file into a dedicated helper unit. | Continue splitting analytics/generation pagination/projection concerns. | | R22 | Fixed | Telemetry snapshot projection split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_projection.go` | Snapshot projection, stale-metric cleanup, daily-series projection, and windowed metric emission moved out of the main usage-view file into a dedicated projection unit. | Continue with the same split for the SQL/query layer. | +| R23 | Fixed | Telemetry query-layer split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_languages.go` | The SQL aggregation/query helpers and language-inference helpers moved out of the main usage-view file into dedicated query/language units, leaving the main file focused on orchestration and shared aggregate types. | Continue shrinking orchestration/materialization into smaller units if needed. | +| R24 | Fixed | OpenRouter generation-path split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/generations.go` | Generation payload types and generation-fetch/enrichment/aggregation logic moved out of the main OpenRouter provider file into a dedicated generation unit. | Continue with analytics/client/API helper splits. | +| R25 | Fixed | OpenRouter clock injection | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/generations.go` | OpenRouter’s time-sensitive analytics and generation flows now use an injectable clock instead of reading `time.Now()` directly in the provider hot path. | Extend the same pattern to remaining providers and analytics helpers. | ## Action Table @@ -48,11 +51,11 @@ This table captures every issue found in this pass. It is broad and high-signal, | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go:393-584`, `internal/dashboardapp/service.go` | The side effects are now injected, but `Model` still owns a very large amount of event-handling and state-transition logic. | Continue splitting update/action logic into smaller TUI units and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go:663-729`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go` | The main composition bars now consume shared extractors, but analytics/detail-specific sections still decode some raw metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go` | `openrouter.go` is smaller after the provider-resolution split, but it still mixes auth probing, credits, keys, analytics parsing, generation pagination, metadata enrichment, and output projection in one large file. | Continue splitting into `api_client`, `analytics`, `generations`, `projection`, and `types` units. | Easier maintenance, smaller diff surface, faster targeted testing. | +| A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go`, `internal/providers/openrouter/generations.go` | `openrouter.go` is materially smaller after the provider-resolution and generation-path splits, but it still mixes auth probing, credits, keys, analytics parsing, and some output/projection helpers. | Continue splitting into `api_client`, `analytics`, and remaining projection/helper units. | Easier maintenance, smaller diff surface, faster targeted testing. | | A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | -| A6 | P1 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go` | `usage_view.go` is materially smaller after the helper/projection split, but it still mixes query planning, SQL execution, aggregation orchestration, and query-specific shaping logic. | Continue splitting into `query_*`, `aggregate_*`, and remaining orchestration units. Add a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | +| A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go` | The usage-view code is materially smaller after the helper/projection/query splits, but the orchestration/materialization path still owns temp-table lifecycle, query fanout, and aggregate assembly in one place. | Continue splitting remaining orchestration/materialization concerns and consider a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A7 | P2 | Daemon service monolith | `internal/daemon/server.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_loops.go` | The daemon is materially less coupled after the logging/cache/http/loop split, but polling, collection, retention, and spool maintenance still share one large runtime helper unit. | Continue splitting the loop-heavy runtime into `polling`, `collection`, `retention`, and `spool` units. | Lower mental load and easier concurrency review. | -| A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/openrouter/openrouter.go:728`, `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Cursor’s main time-sensitive path now uses an injectable clock, but several other providers and analytics helpers still read `time.Now()` directly, often mixing local time and UTC. | Extend the clock abstraction to the remaining provider and analytics subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | +| A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Cursor and OpenRouter now use injectable clocks in their main time-sensitive paths, but other providers and analytics helpers still read `time.Now()` directly, often mixing local time and UTC. | Extend the clock abstraction to the remaining provider and analytics subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | | A15 | P3 | Performance optimization opportunity in render path | `internal/tui/model.go:441-450`, `internal/tui/tiles_composition.go:302-322`, `internal/tui/detail.go:752-1046`, `internal/tui/analytics.go:663-729` | The UI recomputes display/composition structures from raw metric maps repeatedly during rendering. It is correct, but the work is duplicated across views and frames. | Cache derived display/composition sections per snapshot update instead of rebuilding them in each view path. | Lower render cost and less duplicated parsing logic. | diff --git a/internal/providers/openrouter/generations.go b/internal/providers/openrouter/generations.go new file mode 100644 index 0000000..420542e --- /dev/null +++ b/internal/providers/openrouter/generations.go @@ -0,0 +1,659 @@ +package openrouter + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +type generationEntry struct { + ID string `json:"id"` + Model string `json:"model"` + TotalCost float64 `json:"total_cost"` + Usage float64 `json:"usage"` + IsByok bool `json:"is_byok"` + UpstreamInferenceCost *float64 `json:"upstream_inference_cost"` + Cancelled bool `json:"cancelled"` + PromptTokens int `json:"tokens_prompt"` + CompletionTokens int `json:"tokens_completion"` + NativePromptTokens *int `json:"native_tokens_prompt"` + NativeCompletionTokens *int `json:"native_tokens_completion"` + NativeReasoningTokens *int `json:"native_tokens_reasoning"` + NativeCachedTokens *int `json:"native_tokens_cached"` + NativeImageTokens *int `json:"native_tokens_completion_images"` + CreatedAt string `json:"created_at"` + Streamed bool `json:"streamed"` + GenerationTime *int `json:"generation_time"` + Latency *int `json:"latency"` + ProviderName string `json:"provider_name"` + Provider string `json:"provider"` + ProviderID string `json:"provider_id"` + ProviderSlug string `json:"provider_slug"` + UpstreamProvider string `json:"upstream_provider"` + UpstreamProviderName string `json:"upstream_provider_name"` + CacheDiscount *float64 `json:"cache_discount"` + Origin string `json:"origin"` + AppID *int `json:"app_id"` + NumMediaPrompt *int `json:"num_media_prompt"` + NumMediaCompletion *int `json:"num_media_completion"` + NumInputAudioPrompt *int `json:"num_input_audio_prompt"` + NumSearchResults *int `json:"num_search_results"` + Finish string `json:"finish_reason"` + NativeFinish string `json:"native_finish_reason"` + UpstreamID string `json:"upstream_id"` + ModerationLatency *int `json:"moderation_latency"` + ExternalUser string `json:"external_user"` + APIType string `json:"api_type"` + Router string `json:"router"` + ProviderResponses []generationProviderResponse `json:"provider_responses"` +} + +type generationProviderResponse struct { + ProviderName string `json:"provider_name"` + Provider string `json:"provider"` + ProviderID string `json:"provider_id"` + Status *int `json:"status"` +} + +type generationStatsResponse struct { + Data []generationEntry `json:"data"` +} + +type generationDetailResponse struct { + Data generationEntry `json:"data"` +} + +func (p *Provider) fetchGenerationStats(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { + allGenerations, err := p.fetchAllGenerations(ctx, baseURL, apiKey) + if err != nil { + if errors.Is(err, errGenerationListUnsupported) { + snap.Raw["generation_note"] = "generation list endpoint unavailable without IDs" + snap.Raw["generations_fetched"] = "0" + return nil + } + return err + } + + if len(allGenerations) == 0 { + snap.Raw["generations_fetched"] = "0" + return nil + } + + detailLookups, detailHits := p.enrichGenerationProviderMetadata(ctx, baseURL, apiKey, allGenerations) + if detailLookups > 0 { + snap.Raw["generation_provider_detail_lookups"] = fmt.Sprintf("%d", detailLookups) + snap.Raw["generation_provider_detail_hits"] = fmt.Sprintf("%d", detailHits) + } + + snap.Raw["generations_fetched"] = fmt.Sprintf("%d", len(allGenerations)) + + now := p.now().UTC() + todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC) + sevenDaysAgo := now.AddDate(0, 0, -7) + burnCutoff := now.Add(-60 * time.Minute) + + modelStatsMap := make(map[string]*modelStats) + providerStatsMap := make(map[string]*providerStats) + + var todayPrompt, todayCompletion, todayRequests int + var todayNativePrompt, todayNativeCompletion int + var todayReasoning, todayCached, todayImageTokens int + var todayMediaPrompt, todayMediaCompletion, todayAudioInputs, todaySearchResults, todayCancelled int + var todayStreamed int + var todayCost float64 + var todayLatencyMs, todayLatencyCount int + var todayGenerationMs, todayGenerationCount int + var todayModerationMs, todayModerationCount int + var totalRequests int + totalCancelled := 0 + apiTypeCountsToday := make(map[string]int) + finishReasonCounts := make(map[string]int) + originCounts := make(map[string]int) + routerCounts := make(map[string]int) + + var cost7d, cost30d, burnCost float64 + var todayByokCost, cost7dByok, cost30dByok float64 + + dailyCost := make(map[string]float64) + dailyRequests := make(map[string]float64) + dailyProviderTokens := make(map[string]map[string]float64) + dailyProviderRequests := make(map[string]map[string]float64) + dailyModelTokens := make(map[string]map[string]float64) + providerResolutionCounts := make(map[providerResolutionSource]int) + + for _, generation := range allGenerations { + totalRequests++ + generationCost := generation.TotalCost + if generationCost == 0 && generation.Usage > 0 { + generationCost = generation.Usage + } + + if generation.Cancelled { + totalCancelled++ + } + + ts, err := time.Parse(time.RFC3339, generation.CreatedAt) + if err != nil { + ts, err = time.Parse(time.RFC3339Nano, generation.CreatedAt) + if err != nil { + continue + } + } + + cost30d += generationCost + if ts.After(sevenDaysAgo) { + cost7d += generationCost + } + byokCost := generationByokCost(generation) + cost30dByok += byokCost + if ts.After(sevenDaysAgo) { + cost7dByok += byokCost + } + if ts.After(burnCutoff) { + burnCost += generationCost + } + + dateKey := ts.UTC().Format("2006-01-02") + dailyCost[dateKey] += generationCost + dailyRequests[dateKey]++ + + modelKey := normalizeModelName(generation.Model) + if modelKey == "" { + modelKey = "unknown" + } + if _, ok := dailyModelTokens[modelKey]; !ok { + dailyModelTokens[modelKey] = make(map[string]float64) + } + dailyModelTokens[modelKey][dateKey] += float64(generation.PromptTokens + generation.CompletionTokens) + + ms, ok := modelStatsMap[modelKey] + if !ok { + ms = &modelStats{Providers: make(map[string]int)} + modelStatsMap[modelKey] = ms + } + ms.Requests++ + ms.PromptTokens += generation.PromptTokens + ms.CompletionTokens += generation.CompletionTokens + if generation.NativePromptTokens != nil { + ms.NativePrompt += *generation.NativePromptTokens + } + if generation.NativeCompletionTokens != nil { + ms.NativeCompletion += *generation.NativeCompletionTokens + } + if generation.NativeReasoningTokens != nil { + ms.ReasoningTokens += *generation.NativeReasoningTokens + } + if generation.NativeCachedTokens != nil { + ms.CachedTokens += *generation.NativeCachedTokens + } + if generation.NativeImageTokens != nil { + ms.ImageTokens += *generation.NativeImageTokens + } + ms.TotalCost += generationCost + if generation.Latency != nil && *generation.Latency > 0 { + ms.TotalLatencyMs += *generation.Latency + ms.LatencyCount++ + } + if generation.GenerationTime != nil && *generation.GenerationTime > 0 { + ms.TotalGenMs += *generation.GenerationTime + ms.GenerationCount++ + } + if generation.ModerationLatency != nil && *generation.ModerationLatency > 0 { + ms.TotalModeration += *generation.ModerationLatency + ms.ModerationCount++ + } + if generation.CacheDiscount != nil && *generation.CacheDiscount > 0 { + ms.CacheDiscountUSD += *generation.CacheDiscount + } + hostingProvider, source := resolveGenerationHostingProviderWithSource(generation) + providerResolutionCounts[source]++ + if hostingProvider != "" { + ms.Providers[hostingProvider]++ + } + + providerKey := hostingProvider + if providerKey == "" { + providerKey = "unknown" + } + providerClientKey := sanitizeName(strings.ToLower(providerKey)) + if dailyProviderTokens[providerClientKey] == nil { + dailyProviderTokens[providerClientKey] = make(map[string]float64) + } + requestTokens := float64(generation.PromptTokens + generation.CompletionTokens) + if generation.NativeReasoningTokens != nil { + requestTokens += float64(*generation.NativeReasoningTokens) + } + dailyProviderTokens[providerClientKey][dateKey] += requestTokens + if dailyProviderRequests[providerClientKey] == nil { + dailyProviderRequests[providerClientKey] = make(map[string]float64) + } + dailyProviderRequests[providerClientKey][dateKey]++ + + ps, ok := providerStatsMap[providerKey] + if !ok { + ps = &providerStats{Models: make(map[string]int)} + providerStatsMap[providerKey] = ps + } + ps.Requests++ + ps.PromptTokens += generation.PromptTokens + ps.CompletionTokens += generation.CompletionTokens + if generation.NativeReasoningTokens != nil { + ps.ReasoningTokens += *generation.NativeReasoningTokens + } + ps.ByokCost += byokCost + ps.TotalCost += generationCost + ps.Models[modelKey]++ + + if !ts.After(todayStart) { + continue + } + + todayRequests++ + todayPrompt += generation.PromptTokens + todayCompletion += generation.CompletionTokens + if generation.NativePromptTokens != nil { + todayNativePrompt += *generation.NativePromptTokens + } + if generation.NativeCompletionTokens != nil { + todayNativeCompletion += *generation.NativeCompletionTokens + } + todayCost += generationCost + todayByokCost += byokCost + if generation.Cancelled { + todayCancelled++ + } + if generation.Streamed { + todayStreamed++ + } + if generation.NativeReasoningTokens != nil { + todayReasoning += *generation.NativeReasoningTokens + } + if generation.NativeCachedTokens != nil { + todayCached += *generation.NativeCachedTokens + } + if generation.NativeImageTokens != nil { + todayImageTokens += *generation.NativeImageTokens + } + if generation.NumMediaPrompt != nil { + todayMediaPrompt += *generation.NumMediaPrompt + } + if generation.NumMediaCompletion != nil { + todayMediaCompletion += *generation.NumMediaCompletion + } + if generation.NumInputAudioPrompt != nil { + todayAudioInputs += *generation.NumInputAudioPrompt + } + if generation.NumSearchResults != nil { + todaySearchResults += *generation.NumSearchResults + } + if generation.Latency != nil && *generation.Latency > 0 { + todayLatencyMs += *generation.Latency + todayLatencyCount++ + } + if generation.GenerationTime != nil && *generation.GenerationTime > 0 { + todayGenerationMs += *generation.GenerationTime + todayGenerationCount++ + } + if generation.ModerationLatency != nil && *generation.ModerationLatency > 0 { + todayModerationMs += *generation.ModerationLatency + todayModerationCount++ + } + if generation.APIType != "" { + apiTypeCountsToday[generation.APIType]++ + } + if generation.Finish != "" { + finishReasonCounts[generation.Finish]++ + } + if generation.Origin != "" { + originCounts[generation.Origin]++ + } + if generation.Router != "" { + routerCounts[generation.Router]++ + } + } + + if todayRequests > 0 { + reqs := float64(todayRequests) + snap.Metrics["today_requests"] = core.Metric{Used: &reqs, Unit: "requests", Window: "today"} + inp := float64(todayPrompt) + snap.Metrics["today_input_tokens"] = core.Metric{Used: &inp, Unit: "tokens", Window: "today"} + out := float64(todayCompletion) + snap.Metrics["today_output_tokens"] = core.Metric{Used: &out, Unit: "tokens", Window: "today"} + if todayNativePrompt > 0 { + v := float64(todayNativePrompt) + snap.Metrics["today_native_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} + } + if todayNativeCompletion > 0 { + v := float64(todayNativeCompletion) + snap.Metrics["today_native_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} + } + snap.Metrics["today_cost"] = core.Metric{Used: &todayCost, Unit: "USD", Window: "today"} + if todayByokCost > 0 { + snap.Metrics["today_byok_cost"] = core.Metric{Used: &todayByokCost, Unit: "USD", Window: "today"} + snap.Raw["byok_in_use"] = "true" + } + if todayReasoning > 0 { + v := float64(todayReasoning) + snap.Metrics["today_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} + } + if todayCached > 0 { + v := float64(todayCached) + snap.Metrics["today_cached_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} + } + if todayImageTokens > 0 { + v := float64(todayImageTokens) + snap.Metrics["today_image_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} + } + if todayMediaPrompt > 0 { + v := float64(todayMediaPrompt) + snap.Metrics["today_media_prompts"] = core.Metric{Used: &v, Unit: "count", Window: "today"} + } + if todayMediaCompletion > 0 { + v := float64(todayMediaCompletion) + snap.Metrics["today_media_completions"] = core.Metric{Used: &v, Unit: "count", Window: "today"} + } + if todayAudioInputs > 0 { + v := float64(todayAudioInputs) + snap.Metrics["today_audio_inputs"] = core.Metric{Used: &v, Unit: "count", Window: "today"} + } + if todaySearchResults > 0 { + v := float64(todaySearchResults) + snap.Metrics["today_search_results"] = core.Metric{Used: &v, Unit: "count", Window: "today"} + } + if todayCancelled > 0 { + v := float64(todayCancelled) + snap.Metrics["today_cancelled"] = core.Metric{Used: &v, Unit: "count", Window: "today"} + } + if todayStreamed > 0 { + v := float64(todayStreamed) + snap.Metrics["today_streamed_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "today"} + pct := v / reqs * 100 + snap.Metrics["today_streamed_percent"] = core.Metric{Used: &pct, Unit: "%", Window: "today"} + } + if todayLatencyCount > 0 { + avgLatency := float64(todayLatencyMs) / float64(todayLatencyCount) / 1000.0 + snap.Metrics["today_avg_latency"] = core.Metric{Used: &avgLatency, Unit: "seconds", Window: "today"} + } + if todayGenerationCount > 0 { + avgGeneration := float64(todayGenerationMs) / float64(todayGenerationCount) / 1000.0 + snap.Metrics["today_avg_generation_time"] = core.Metric{Used: &avgGeneration, Unit: "seconds", Window: "today"} + } + if todayModerationCount > 0 { + avgModeration := float64(todayModerationMs) / float64(todayModerationCount) / 1000.0 + snap.Metrics["today_avg_moderation_latency"] = core.Metric{Used: &avgModeration, Unit: "seconds", Window: "today"} + } + } + + for apiType, count := range apiTypeCountsToday { + if count <= 0 { + continue + } + v := float64(count) + snap.Metrics["today_"+sanitizeName(apiType)+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "today"} + } + if len(finishReasonCounts) > 0 { + snap.Raw["today_finish_reasons"] = summarizeTopCounts(finishReasonCounts, 4) + } + if len(originCounts) > 0 { + snap.Raw["today_origins"] = summarizeTopCounts(originCounts, 3) + } + if len(routerCounts) > 0 { + snap.Raw["today_routers"] = summarizeTopCounts(routerCounts, 3) + } + + reqs := float64(totalRequests) + snap.Metrics["recent_requests"] = core.Metric{Used: &reqs, Unit: "requests", Window: "recent"} + snap.Metrics["7d_api_cost"] = core.Metric{Used: &cost7d, Unit: "USD", Window: "7d"} + snap.Metrics["30d_api_cost"] = core.Metric{Used: &cost30d, Unit: "USD", Window: "30d"} + if cost7dByok > 0 { + snap.Metrics["7d_byok_cost"] = core.Metric{Used: &cost7dByok, Unit: "USD", Window: "7d"} + snap.Raw["byok_in_use"] = "true" + } + if cost30dByok > 0 { + snap.Metrics["30d_byok_cost"] = core.Metric{Used: &cost30dByok, Unit: "USD", Window: "30d"} + snap.Raw["byok_in_use"] = "true" + } + if burnCost > 0 { + burnRate := burnCost + dailyProjected := burnRate * 24 + snap.Metrics["burn_rate"] = core.Metric{Used: &burnRate, Unit: "USD/hour", Window: "1h"} + snap.Metrics["daily_projected"] = core.Metric{Used: &dailyProjected, Unit: "USD", Window: "24h"} + } + + snap.DailySeries["cost"] = mapToSortedTimePoints(dailyCost) + snap.DailySeries["requests"] = mapToSortedTimePoints(dailyRequests) + emitClientDailySeries(snap, dailyProviderTokens, dailyProviderRequests) + + type modelTokenTotal struct { + model string + total float64 + byDate map[string]float64 + } + var modelTotals []modelTokenTotal + for model, dateMap := range dailyModelTokens { + var total float64 + for _, value := range dateMap { + total += value + } + modelTotals = append(modelTotals, modelTokenTotal{model: model, total: total, byDate: dateMap}) + } + sort.Slice(modelTotals, func(i, j int) bool { + return modelTotals[i].total > modelTotals[j].total + }) + topN := 5 + if len(modelTotals) < topN { + topN = len(modelTotals) + } + for _, modelTotal := range modelTotals[:topN] { + snap.DailySeries["tokens_"+sanitizeName(modelTotal.model)] = mapToSortedTimePoints(modelTotal.byDate) + } + + hasAnalyticsModelRows := strings.TrimSpace(snap.Raw["activity_rows"]) != "" && strings.TrimSpace(snap.Raw["activity_rows"]) != "0" + if hasAnalyticsModelRows { + if analyticsRowsStale(snap, p.now().UTC()) { + snap.Raw["activity_rows_stale"] = "true" + } else { + snap.Raw["activity_rows_stale"] = "false" + } + } + emitPerModelMetrics(modelStatsMap, snap) + emitPerProviderMetrics(providerStatsMap, snap) + snap.Raw["model_mix_source"] = "generation_live" + if len(providerResolutionCounts) > 0 { + summary := make(map[string]int, len(providerResolutionCounts)) + for key, value := range providerResolutionCounts { + if value <= 0 { + continue + } + summary[string(key)] = value + } + if txt := summarizeTopCounts(summary, 8); txt != "" { + snap.Raw["provider_resolution"] = txt + } + } + modelRequests := make(map[string]float64, len(modelStatsMap)) + for model, stats := range modelStatsMap { + if stats == nil || stats.Requests <= 0 { + continue + } + modelRequests[model] = float64(stats.Requests) + } + emitModelDerivedToolUsageMetrics(snap, modelRequests, "30d inferred", "inferred_from_model_requests") + emitToolOutcomeMetrics(snap, totalRequests, totalCancelled, "30d") + + return nil +} + +func analyticsRowsStale(snap *core.UsageSnapshot, now time.Time) bool { + cachedAtRaw := strings.TrimSpace(snap.Raw["activity_cached_at"]) + if cachedAtRaw != "" { + if t, err := time.Parse(time.RFC3339, cachedAtRaw); err == nil { + return now.UTC().Sub(t.UTC()) > 10*time.Minute + } + } + maxDateRaw := strings.TrimSpace(snap.Raw["activity_max_date"]) + if maxDateRaw == "" { + if dateRange := strings.TrimSpace(snap.Raw["activity_date_range"]); dateRange != "" { + if idx := strings.LastIndex(dateRange, ".."); idx >= 0 { + maxDateRaw = strings.TrimSpace(dateRange[idx+2:]) + } + } + } + if maxDateRaw == "" { + return false + } + day, err := time.Parse("2006-01-02", maxDateRaw) + if err != nil { + return false + } + todayUTC := time.Date(now.UTC().Year(), now.UTC().Month(), now.UTC().Day(), 0, 0, 0, 0, time.UTC) + return day.UTC().Before(todayUTC) +} + +func (p *Provider) fetchAllGenerations(ctx context.Context, baseURL, apiKey string) ([]generationEntry, error) { + var all []generationEntry + offset := 0 + cutoff := p.now().UTC().Add(-generationMaxAge) + + for offset < maxGenerationsToFetch { + remaining := maxGenerationsToFetch - offset + limit := generationPageSize + if remaining < limit { + limit = remaining + } + + endpoint := fmt.Sprintf("%s/generation?limit=%d&offset=%d", baseURL, limit, offset) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return all, err + } + req.Header.Set("Authorization", "Bearer "+apiKey) + + resp, err := p.Client().Do(req) + if err != nil { + return all, err + } + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return all, err + } + if resp.StatusCode != http.StatusOK { + if resp.StatusCode == http.StatusBadRequest { + lowerBody := strings.ToLower(string(body)) + lowerMsg := strings.ToLower(parseAPIErrorMessage(body)) + if strings.Contains(lowerMsg, "expected string") && strings.Contains(lowerMsg, "id") { + return all, errGenerationListUnsupported + } + hasID := strings.Contains(lowerBody, "\"id\"") || strings.Contains(lowerBody, "\\\"id\\\"") || strings.Contains(lowerBody, "for id") + if strings.Contains(lowerBody, "expected string") && hasID { + return all, errGenerationListUnsupported + } + } + return all, fmt.Errorf("HTTP %d", resp.StatusCode) + } + + var generationStats generationStatsResponse + if err := json.Unmarshal(body, &generationStats); err != nil { + return all, err + } + + hitCutoff := false + for _, entry := range generationStats.Data { + ts, err := time.Parse(time.RFC3339, entry.CreatedAt) + if err != nil { + ts, _ = time.Parse(time.RFC3339Nano, entry.CreatedAt) + } + if !ts.IsZero() && ts.Before(cutoff) { + hitCutoff = true + break + } + all = append(all, entry) + } + + if hitCutoff || len(generationStats.Data) < limit { + break + } + offset += len(generationStats.Data) + } + return all, nil +} + +func (p *Provider) enrichGenerationProviderMetadata(ctx context.Context, baseURL, apiKey string, rows []generationEntry) (int, int) { + attempts := 0 + hits := 0 + for i := range rows { + if attempts >= maxGenerationProviderDetailLookups { + break + } + if rows[i].ID == "" { + continue + } + if providerNameFromResponses(rows[i].ProviderResponses) != "" { + continue + } + if !isLikelyRouterClientProviderName(rows[i].ProviderName) && strings.TrimSpace(rows[i].ProviderName) != "" { + continue + } + + attempts++ + detail, err := p.fetchGenerationDetail(ctx, baseURL, apiKey, rows[i].ID) + if err != nil { + continue + } + resolvedBefore := resolveGenerationHostingProvider(rows[i]) + if len(detail.ProviderResponses) > 0 { + rows[i].ProviderResponses = detail.ProviderResponses + } + if providerName := strings.TrimSpace(detail.ProviderName); providerName != "" { + rows[i].ProviderName = providerName + } + if upstream := strings.TrimSpace(detail.UpstreamID); upstream != "" { + rows[i].UpstreamID = upstream + } + if resolvedAfter := resolveGenerationHostingProvider(rows[i]); resolvedAfter != "" && resolvedAfter != resolvedBefore { + hits++ + } + } + return attempts, hits +} + +func (p *Provider) fetchGenerationDetail(ctx context.Context, baseURL, apiKey, generationID string) (generationEntry, error) { + if strings.TrimSpace(generationID) == "" { + return generationEntry{}, fmt.Errorf("missing generation id") + } + endpoint := fmt.Sprintf("%s/generation?id=%s", baseURL, url.QueryEscape(generationID)) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return generationEntry{}, err + } + req.Header.Set("Authorization", "Bearer "+apiKey) + + resp, err := p.Client().Do(req) + if err != nil { + return generationEntry{}, err + } + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return generationEntry{}, err + } + if resp.StatusCode != http.StatusOK { + return generationEntry{}, fmt.Errorf("HTTP %d", resp.StatusCode) + } + + var detail generationDetailResponse + if err := json.Unmarshal(body, &detail); err != nil { + return generationEntry{}, err + } + return detail.Data, nil +} diff --git a/internal/providers/openrouter/openrouter.go b/internal/providers/openrouter/openrouter.go index b648769..adc80ac 100644 --- a/internal/providers/openrouter/openrouter.go +++ b/internal/providers/openrouter/openrouter.go @@ -8,7 +8,6 @@ import ( "io" "math" "net/http" - "net/url" "sort" "strings" "time" @@ -99,63 +98,6 @@ type keyListEntry struct { ExpiresAt *string `json:"expires_at"` } -type generationEntry struct { - ID string `json:"id"` - Model string `json:"model"` - TotalCost float64 `json:"total_cost"` - Usage float64 `json:"usage"` - IsByok bool `json:"is_byok"` - UpstreamInferenceCost *float64 `json:"upstream_inference_cost"` - Cancelled bool `json:"cancelled"` - PromptTokens int `json:"tokens_prompt"` - CompletionTokens int `json:"tokens_completion"` - NativePromptTokens *int `json:"native_tokens_prompt"` - NativeCompletionTokens *int `json:"native_tokens_completion"` - NativeReasoningTokens *int `json:"native_tokens_reasoning"` - NativeCachedTokens *int `json:"native_tokens_cached"` - NativeImageTokens *int `json:"native_tokens_completion_images"` - CreatedAt string `json:"created_at"` - Streamed bool `json:"streamed"` - GenerationTime *int `json:"generation_time"` - Latency *int `json:"latency"` - ProviderName string `json:"provider_name"` - Provider string `json:"provider"` - ProviderID string `json:"provider_id"` - ProviderSlug string `json:"provider_slug"` - UpstreamProvider string `json:"upstream_provider"` - UpstreamProviderName string `json:"upstream_provider_name"` - CacheDiscount *float64 `json:"cache_discount"` - Origin string `json:"origin"` - AppID *int `json:"app_id"` - NumMediaPrompt *int `json:"num_media_prompt"` - NumMediaCompletion *int `json:"num_media_completion"` - NumInputAudioPrompt *int `json:"num_input_audio_prompt"` - NumSearchResults *int `json:"num_search_results"` - Finish string `json:"finish_reason"` - NativeFinish string `json:"native_finish_reason"` - UpstreamID string `json:"upstream_id"` - ModerationLatency *int `json:"moderation_latency"` - ExternalUser string `json:"external_user"` - APIType string `json:"api_type"` - Router string `json:"router"` - ProviderResponses []generationProviderResponse `json:"provider_responses"` -} - -type generationProviderResponse struct { - ProviderName string `json:"provider_name"` - Provider string `json:"provider"` - ProviderID string `json:"provider_id"` - Status *int `json:"status"` -} - -type generationStatsResponse struct { - Data []generationEntry `json:"data"` -} - -type generationDetailResponse struct { - Data generationEntry `json:"data"` -} - type providerResolutionSource string const ( @@ -266,6 +208,7 @@ type endpointStats struct { type Provider struct { providerbase.Base + clock core.Clock } func New() *Provider { @@ -287,9 +230,17 @@ func New() *Provider { }, Dashboard: dashboardWidget(), }), + clock: core.SystemClock{}, } } +func (p *Provider) now() time.Time { + if p == nil || p.clock == nil { + return time.Now() + } + return p.clock.Now() +} + func (p *Provider) DetailWidget() core.DetailWidget { return core.DetailWidget{ Sections: []core.DetailSection{ @@ -725,7 +676,7 @@ func (p *Provider) fetchAnalytics(ctx context.Context, baseURL, apiKey string, s var activityEndpoint string var activityCachedAt string forbiddenMsg := "" - yesterdayUTC := time.Now().UTC().AddDate(0, 0, -1).Format("2006-01-02") + yesterdayUTC := p.now().UTC().AddDate(0, 0, -1).Format("2006-01-02") for _, endpoint := range []string{ "/activity", @@ -824,7 +775,7 @@ func (p *Provider) fetchAnalytics(ctx context.Context, baseURL, apiKey string, s endpoints := make(map[string]struct{}) activeDays := make(map[string]struct{}) - now := time.Now().UTC() + now := p.now().UTC() todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC) sevenDaysAgo := now.AddDate(0, 0, -7) thirtyDaysAgo := now.AddDate(0, 0, -30) @@ -1454,613 +1405,6 @@ func mapToSortedTimePoints(m map[string]float64) []core.TimePoint { return points } -func (p *Provider) fetchGenerationStats(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { - allGenerations, err := p.fetchAllGenerations(ctx, baseURL, apiKey) - if err != nil { - if errors.Is(err, errGenerationListUnsupported) { - snap.Raw["generation_note"] = "generation list endpoint unavailable without IDs" - snap.Raw["generations_fetched"] = "0" - return nil - } - return err - } - - if len(allGenerations) == 0 { - snap.Raw["generations_fetched"] = "0" - return nil - } - - detailLookups, detailHits := p.enrichGenerationProviderMetadata(ctx, baseURL, apiKey, allGenerations) - if detailLookups > 0 { - snap.Raw["generation_provider_detail_lookups"] = fmt.Sprintf("%d", detailLookups) - snap.Raw["generation_provider_detail_hits"] = fmt.Sprintf("%d", detailHits) - } - - snap.Raw["generations_fetched"] = fmt.Sprintf("%d", len(allGenerations)) - - now := time.Now().UTC() - todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC) - sevenDaysAgo := now.AddDate(0, 0, -7) - burnCutoff := now.Add(-60 * time.Minute) - - modelStatsMap := make(map[string]*modelStats) - providerStatsMap := make(map[string]*providerStats) - - var todayPrompt, todayCompletion, todayRequests int - var todayNativePrompt, todayNativeCompletion int - var todayReasoning, todayCached, todayImageTokens int - var todayMediaPrompt, todayMediaCompletion, todayAudioInputs, todaySearchResults, todayCancelled int - var todayStreamed int - var todayCost float64 - var todayLatencyMs, todayLatencyCount int - var todayGenerationMs, todayGenerationCount int - var todayModerationMs, todayModerationCount int - var totalRequests int - totalCancelled := 0 - apiTypeCountsToday := make(map[string]int) - finishReasonCounts := make(map[string]int) - originCounts := make(map[string]int) - routerCounts := make(map[string]int) - - var cost7d, cost30d, burnCost float64 - var todayByokCost, cost7dByok, cost30dByok float64 - - dailyCost := make(map[string]float64) - dailyRequests := make(map[string]float64) - dailyProviderTokens := make(map[string]map[string]float64) - dailyProviderRequests := make(map[string]map[string]float64) - dailyModelTokens := make(map[string]map[string]float64) // model -> date -> tokens - providerResolutionCounts := make(map[providerResolutionSource]int) - - for _, g := range allGenerations { - totalRequests++ - generationCost := g.TotalCost - if generationCost == 0 && g.Usage > 0 { - generationCost = g.Usage - } - - if g.Cancelled { - totalCancelled++ - } - - ts, err := time.Parse(time.RFC3339, g.CreatedAt) - if err != nil { - ts, err = time.Parse(time.RFC3339Nano, g.CreatedAt) - if err != nil { - continue - } - } - - // Period cost aggregation (all fetched generations, up to 30 days) - cost30d += generationCost - if ts.After(sevenDaysAgo) { - cost7d += generationCost - } - byokCost := generationByokCost(g) - cost30dByok += byokCost - if ts.After(sevenDaysAgo) { - cost7dByok += byokCost - } - - // Burn rate: last 60 minutes - if ts.After(burnCutoff) { - burnCost += generationCost - } - - // Daily aggregation - dateKey := ts.UTC().Format("2006-01-02") - dailyCost[dateKey] += generationCost - dailyRequests[dateKey]++ - - modelKey := normalizeModelName(g.Model) - if modelKey == "" { - modelKey = "unknown" - } - if _, ok := dailyModelTokens[modelKey]; !ok { - dailyModelTokens[modelKey] = make(map[string]float64) - } - dailyModelTokens[modelKey][dateKey] += float64(g.PromptTokens + g.CompletionTokens) - - ms, ok := modelStatsMap[modelKey] - if !ok { - ms = &modelStats{Providers: make(map[string]int)} - modelStatsMap[modelKey] = ms - } - ms.Requests++ - ms.PromptTokens += g.PromptTokens - ms.CompletionTokens += g.CompletionTokens - if g.NativePromptTokens != nil { - ms.NativePrompt += *g.NativePromptTokens - } - if g.NativeCompletionTokens != nil { - ms.NativeCompletion += *g.NativeCompletionTokens - } - if g.NativeReasoningTokens != nil { - ms.ReasoningTokens += *g.NativeReasoningTokens - } - if g.NativeCachedTokens != nil { - ms.CachedTokens += *g.NativeCachedTokens - } - if g.NativeImageTokens != nil { - ms.ImageTokens += *g.NativeImageTokens - } - ms.TotalCost += generationCost - if g.Latency != nil && *g.Latency > 0 { - ms.TotalLatencyMs += *g.Latency - ms.LatencyCount++ - } - if g.GenerationTime != nil && *g.GenerationTime > 0 { - ms.TotalGenMs += *g.GenerationTime - ms.GenerationCount++ - } - if g.ModerationLatency != nil && *g.ModerationLatency > 0 { - ms.TotalModeration += *g.ModerationLatency - ms.ModerationCount++ - } - if g.CacheDiscount != nil && *g.CacheDiscount > 0 { - ms.CacheDiscountUSD += *g.CacheDiscount - } - hostingProvider, source := resolveGenerationHostingProviderWithSource(g) - providerResolutionCounts[source]++ - if hostingProvider != "" { - ms.Providers[hostingProvider]++ - } - - provKey := hostingProvider - if provKey == "" { - provKey = "unknown" - } - providerClientKey := sanitizeName(strings.ToLower(provKey)) - if dailyProviderTokens[providerClientKey] == nil { - dailyProviderTokens[providerClientKey] = make(map[string]float64) - } - requestTokens := float64(g.PromptTokens + g.CompletionTokens) - if g.NativeReasoningTokens != nil { - requestTokens += float64(*g.NativeReasoningTokens) - } - dailyProviderTokens[providerClientKey][dateKey] += requestTokens - if dailyProviderRequests[providerClientKey] == nil { - dailyProviderRequests[providerClientKey] = make(map[string]float64) - } - dailyProviderRequests[providerClientKey][dateKey]++ - - ps, ok := providerStatsMap[provKey] - if !ok { - ps = &providerStats{Models: make(map[string]int)} - providerStatsMap[provKey] = ps - } - ps.Requests++ - ps.PromptTokens += g.PromptTokens - ps.CompletionTokens += g.CompletionTokens - if g.NativeReasoningTokens != nil { - ps.ReasoningTokens += *g.NativeReasoningTokens - } - ps.ByokCost += byokCost - ps.TotalCost += generationCost - ps.Models[modelKey]++ - - if !ts.After(todayStart) { - continue - } - - todayRequests++ - todayPrompt += g.PromptTokens - todayCompletion += g.CompletionTokens - if g.NativePromptTokens != nil { - todayNativePrompt += *g.NativePromptTokens - } - if g.NativeCompletionTokens != nil { - todayNativeCompletion += *g.NativeCompletionTokens - } - todayCost += generationCost - todayByokCost += byokCost - if g.Cancelled { - todayCancelled++ - } - if g.Streamed { - todayStreamed++ - } - if g.NativeReasoningTokens != nil { - todayReasoning += *g.NativeReasoningTokens - } - if g.NativeCachedTokens != nil { - todayCached += *g.NativeCachedTokens - } - if g.NativeImageTokens != nil { - todayImageTokens += *g.NativeImageTokens - } - if g.NumMediaPrompt != nil { - todayMediaPrompt += *g.NumMediaPrompt - } - if g.NumMediaCompletion != nil { - todayMediaCompletion += *g.NumMediaCompletion - } - if g.NumInputAudioPrompt != nil { - todayAudioInputs += *g.NumInputAudioPrompt - } - if g.NumSearchResults != nil { - todaySearchResults += *g.NumSearchResults - } - - if g.Latency != nil && *g.Latency > 0 { - todayLatencyMs += *g.Latency - todayLatencyCount++ - } - if g.GenerationTime != nil && *g.GenerationTime > 0 { - todayGenerationMs += *g.GenerationTime - todayGenerationCount++ - } - if g.ModerationLatency != nil && *g.ModerationLatency > 0 { - todayModerationMs += *g.ModerationLatency - todayModerationCount++ - } - if g.APIType != "" { - apiTypeCountsToday[g.APIType]++ - } - if g.Finish != "" { - finishReasonCounts[g.Finish]++ - } - if g.Origin != "" { - originCounts[g.Origin]++ - } - if g.Router != "" { - routerCounts[g.Router]++ - } - } - - if todayRequests > 0 { - reqs := float64(todayRequests) - snap.Metrics["today_requests"] = core.Metric{Used: &reqs, Unit: "requests", Window: "today"} - - inp := float64(todayPrompt) - snap.Metrics["today_input_tokens"] = core.Metric{Used: &inp, Unit: "tokens", Window: "today"} - - out := float64(todayCompletion) - snap.Metrics["today_output_tokens"] = core.Metric{Used: &out, Unit: "tokens", Window: "today"} - if todayNativePrompt > 0 { - v := float64(todayNativePrompt) - snap.Metrics["today_native_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} - } - if todayNativeCompletion > 0 { - v := float64(todayNativeCompletion) - snap.Metrics["today_native_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} - } - - snap.Metrics["today_cost"] = core.Metric{Used: &todayCost, Unit: "USD", Window: "today"} - if todayByokCost > 0 { - snap.Metrics["today_byok_cost"] = core.Metric{Used: &todayByokCost, Unit: "USD", Window: "today"} - snap.Raw["byok_in_use"] = "true" - } - if todayReasoning > 0 { - v := float64(todayReasoning) - snap.Metrics["today_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} - } - if todayCached > 0 { - v := float64(todayCached) - snap.Metrics["today_cached_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} - } - if todayImageTokens > 0 { - v := float64(todayImageTokens) - snap.Metrics["today_image_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "today"} - } - if todayMediaPrompt > 0 { - v := float64(todayMediaPrompt) - snap.Metrics["today_media_prompts"] = core.Metric{Used: &v, Unit: "count", Window: "today"} - } - if todayMediaCompletion > 0 { - v := float64(todayMediaCompletion) - snap.Metrics["today_media_completions"] = core.Metric{Used: &v, Unit: "count", Window: "today"} - } - if todayAudioInputs > 0 { - v := float64(todayAudioInputs) - snap.Metrics["today_audio_inputs"] = core.Metric{Used: &v, Unit: "count", Window: "today"} - } - if todaySearchResults > 0 { - v := float64(todaySearchResults) - snap.Metrics["today_search_results"] = core.Metric{Used: &v, Unit: "count", Window: "today"} - } - if todayCancelled > 0 { - v := float64(todayCancelled) - snap.Metrics["today_cancelled"] = core.Metric{Used: &v, Unit: "count", Window: "today"} - } - if todayStreamed > 0 { - v := float64(todayStreamed) - snap.Metrics["today_streamed_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "today"} - pct := v / reqs * 100 - snap.Metrics["today_streamed_percent"] = core.Metric{Used: &pct, Unit: "%", Window: "today"} - } - - if todayLatencyCount > 0 { - avgLatency := float64(todayLatencyMs) / float64(todayLatencyCount) / 1000.0 - snap.Metrics["today_avg_latency"] = core.Metric{Used: &avgLatency, Unit: "seconds", Window: "today"} - } - if todayGenerationCount > 0 { - avgGeneration := float64(todayGenerationMs) / float64(todayGenerationCount) / 1000.0 - snap.Metrics["today_avg_generation_time"] = core.Metric{Used: &avgGeneration, Unit: "seconds", Window: "today"} - } - if todayModerationCount > 0 { - avgModeration := float64(todayModerationMs) / float64(todayModerationCount) / 1000.0 - snap.Metrics["today_avg_moderation_latency"] = core.Metric{Used: &avgModeration, Unit: "seconds", Window: "today"} - } - } - - for apiType, count := range apiTypeCountsToday { - if count <= 0 { - continue - } - v := float64(count) - snap.Metrics["today_"+sanitizeName(apiType)+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "today"} - } - if len(finishReasonCounts) > 0 { - snap.Raw["today_finish_reasons"] = summarizeTopCounts(finishReasonCounts, 4) - } - if len(originCounts) > 0 { - snap.Raw["today_origins"] = summarizeTopCounts(originCounts, 3) - } - if len(routerCounts) > 0 { - snap.Raw["today_routers"] = summarizeTopCounts(routerCounts, 3) - } - - reqs := float64(totalRequests) - snap.Metrics["recent_requests"] = core.Metric{Used: &reqs, Unit: "requests", Window: "recent"} - - // Period cost metrics - snap.Metrics["7d_api_cost"] = core.Metric{Used: &cost7d, Unit: "USD", Window: "7d"} - snap.Metrics["30d_api_cost"] = core.Metric{Used: &cost30d, Unit: "USD", Window: "30d"} - if cost7dByok > 0 { - snap.Metrics["7d_byok_cost"] = core.Metric{Used: &cost7dByok, Unit: "USD", Window: "7d"} - snap.Raw["byok_in_use"] = "true" - } - if cost30dByok > 0 { - snap.Metrics["30d_byok_cost"] = core.Metric{Used: &cost30dByok, Unit: "USD", Window: "30d"} - snap.Raw["byok_in_use"] = "true" - } - - // Burn rate - if burnCost > 0 { - burnRate := burnCost // cost in the last 60 minutes ≈ cost/hour - dailyProjected := burnRate * 24 - snap.Metrics["burn_rate"] = core.Metric{Used: &burnRate, Unit: "USD/hour", Window: "1h"} - snap.Metrics["daily_projected"] = core.Metric{Used: &dailyProjected, Unit: "USD", Window: "24h"} - } - - // DailySeries: cost, requests, and per-model tokens - snap.DailySeries["cost"] = mapToSortedTimePoints(dailyCost) - snap.DailySeries["requests"] = mapToSortedTimePoints(dailyRequests) - emitClientDailySeries(snap, dailyProviderTokens, dailyProviderRequests) - - // Per-model token series (top 5 models by total tokens) - type modelTokenTotal struct { - model string - total float64 - byDate map[string]float64 - } - var modelTotals []modelTokenTotal - for model, dateMap := range dailyModelTokens { - var total float64 - for _, v := range dateMap { - total += v - } - modelTotals = append(modelTotals, modelTokenTotal{model, total, dateMap}) - } - sort.Slice(modelTotals, func(i, j int) bool { - return modelTotals[i].total > modelTotals[j].total - }) - topN := 5 - if len(modelTotals) < topN { - topN = len(modelTotals) - } - for _, mt := range modelTotals[:topN] { - key := "tokens_" + sanitizeName(mt.model) - snap.DailySeries[key] = mapToSortedTimePoints(mt.byDate) - } - - hasAnalyticsModelRows := strings.TrimSpace(snap.Raw["activity_rows"]) != "" && strings.TrimSpace(snap.Raw["activity_rows"]) != "0" - if hasAnalyticsModelRows { - if analyticsRowsStale(snap, time.Now().UTC()) { - snap.Raw["activity_rows_stale"] = "true" - } else { - snap.Raw["activity_rows_stale"] = "false" - } - } - // Always compute model/provider burn from live generation feed. - // Analytics endpoints are cached by OpenRouter and can lag model mix updates. - emitPerModelMetrics(modelStatsMap, snap) - emitPerProviderMetrics(providerStatsMap, snap) - snap.Raw["model_mix_source"] = "generation_live" - if len(providerResolutionCounts) > 0 { - summary := make(map[string]int, len(providerResolutionCounts)) - for k, v := range providerResolutionCounts { - if v <= 0 { - continue - } - summary[string(k)] = v - } - if txt := summarizeTopCounts(summary, 8); txt != "" { - snap.Raw["provider_resolution"] = txt - } - } - modelRequests := make(map[string]float64, len(modelStatsMap)) - for model, stats := range modelStatsMap { - if stats == nil || stats.Requests <= 0 { - continue - } - modelRequests[model] = float64(stats.Requests) - } - emitModelDerivedToolUsageMetrics(snap, modelRequests, "30d inferred", "inferred_from_model_requests") - emitToolOutcomeMetrics(snap, totalRequests, totalCancelled, "30d") - - return nil -} - -func analyticsRowsStale(snap *core.UsageSnapshot, now time.Time) bool { - cachedAtRaw := strings.TrimSpace(snap.Raw["activity_cached_at"]) - if cachedAtRaw != "" { - if t, err := time.Parse(time.RFC3339, cachedAtRaw); err == nil { - // Activity cache older than 10 minutes is considered stale for model mix. - return now.UTC().Sub(t.UTC()) > 10*time.Minute - } - } - - maxDateRaw := strings.TrimSpace(snap.Raw["activity_max_date"]) - if maxDateRaw == "" { - if dateRange := strings.TrimSpace(snap.Raw["activity_date_range"]); dateRange != "" { - if idx := strings.LastIndex(dateRange, ".."); idx >= 0 { - maxDateRaw = strings.TrimSpace(dateRange[idx+2:]) - } - } - } - if maxDateRaw == "" { - return false - } - day, err := time.Parse("2006-01-02", maxDateRaw) - if err != nil { - return false - } - todayUTC := time.Date(now.UTC().Year(), now.UTC().Month(), now.UTC().Day(), 0, 0, 0, 0, time.UTC) - return day.UTC().Before(todayUTC) -} - -func (p *Provider) fetchAllGenerations(ctx context.Context, baseURL, apiKey string) ([]generationEntry, error) { - var all []generationEntry - offset := 0 - cutoff := time.Now().UTC().Add(-generationMaxAge) - - for offset < maxGenerationsToFetch { - remaining := maxGenerationsToFetch - offset - limit := generationPageSize - if remaining < limit { - limit = remaining - } - - url := fmt.Sprintf("%s/generation?limit=%d&offset=%d", baseURL, limit, offset) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return all, err - } - req.Header.Set("Authorization", "Bearer "+apiKey) - - resp, err := p.Client().Do(req) - if err != nil { - return all, err - } - body, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return all, err - } - if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusBadRequest { - lowerBody := strings.ToLower(string(body)) - lowerMsg := strings.ToLower(parseAPIErrorMessage(body)) - if strings.Contains(lowerMsg, "expected string") && strings.Contains(lowerMsg, "id") { - return all, errGenerationListUnsupported - } - hasID := strings.Contains(lowerBody, "\"id\"") || strings.Contains(lowerBody, "\\\"id\\\"") || strings.Contains(lowerBody, "for id") - if strings.Contains(lowerBody, "expected string") && hasID { - return all, errGenerationListUnsupported - } - } - return all, fmt.Errorf("HTTP %d", resp.StatusCode) - } - - var gen generationStatsResponse - if err := json.Unmarshal(body, &gen); err != nil { - return all, err - } - - hitCutoff := false - for _, entry := range gen.Data { - ts, err := time.Parse(time.RFC3339, entry.CreatedAt) - if err != nil { - ts, _ = time.Parse(time.RFC3339Nano, entry.CreatedAt) - } - if !ts.IsZero() && ts.Before(cutoff) { - hitCutoff = true - break - } - all = append(all, entry) - } - - if hitCutoff || len(gen.Data) < limit { - break - } - offset += len(gen.Data) - } - - return all, nil -} - -func (p *Provider) enrichGenerationProviderMetadata(ctx context.Context, baseURL, apiKey string, rows []generationEntry) (int, int) { - attempts := 0 - hits := 0 - for i := range rows { - if attempts >= maxGenerationProviderDetailLookups { - break - } - if rows[i].ID == "" { - continue - } - if providerNameFromResponses(rows[i].ProviderResponses) != "" { - continue - } - if !isLikelyRouterClientProviderName(rows[i].ProviderName) && strings.TrimSpace(rows[i].ProviderName) != "" { - continue - } - - attempts++ - detail, err := p.fetchGenerationDetail(ctx, baseURL, apiKey, rows[i].ID) - if err != nil { - continue - } - resolvedBefore := resolveGenerationHostingProvider(rows[i]) - if len(detail.ProviderResponses) > 0 { - rows[i].ProviderResponses = detail.ProviderResponses - } - if providerName := strings.TrimSpace(detail.ProviderName); providerName != "" { - rows[i].ProviderName = providerName - } - if upstream := strings.TrimSpace(detail.UpstreamID); upstream != "" { - rows[i].UpstreamID = upstream - } - if resolvedAfter := resolveGenerationHostingProvider(rows[i]); resolvedAfter != "" && resolvedAfter != resolvedBefore { - hits++ - } - } - return attempts, hits -} - -func (p *Provider) fetchGenerationDetail(ctx context.Context, baseURL, apiKey, generationID string) (generationEntry, error) { - if strings.TrimSpace(generationID) == "" { - return generationEntry{}, fmt.Errorf("missing generation id") - } - endpoint := fmt.Sprintf("%s/generation?id=%s", baseURL, url.QueryEscape(generationID)) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) - if err != nil { - return generationEntry{}, err - } - req.Header.Set("Authorization", "Bearer "+apiKey) - - resp, err := p.Client().Do(req) - if err != nil { - return generationEntry{}, err - } - body, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return generationEntry{}, err - } - if resp.StatusCode != http.StatusOK { - return generationEntry{}, fmt.Errorf("HTTP %d", resp.StatusCode) - } - - var detail generationDetailResponse - if err := json.Unmarshal(body, &detail); err != nil { - return generationEntry{}, err - } - return detail.Data, nil -} - func parseAPIErrorMessage(body []byte) string { var apiErr apiErrorResponse if err := json.Unmarshal(body, &apiErr); err != nil { diff --git a/internal/telemetry/usage_view.go b/internal/telemetry/usage_view.go index 6661508..4a3ae0c 100644 --- a/internal/telemetry/usage_view.go +++ b/internal/telemetry/usage_view.go @@ -4,12 +4,10 @@ import ( "context" "database/sql" "fmt" - "sort" "strings" "time" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" _ "github.com/mattn/go-sqlite3" ) @@ -132,28 +130,8 @@ type telemetryUsageAgg struct { type usageFilter struct { ProviderIDs []string AccountID string - TimeWindowHours int // 0 = no filter - materializedTbl string // if set, queries read from this temp table instead of rebuilding the CTE -} - -func clientDimensionExpr() string { - return `COALESCE( - NULLIF(TRIM( - COALESCE( - json_extract(source_payload, '$.client'), - json_extract(source_payload, '$.payload.client'), - json_extract(source_payload, '$._normalized.client'), - json_extract(source_payload, '$.cursor_source'), - json_extract(source_payload, '$.source.client'), - '' - ) - ), ''), - CASE - WHEN LOWER(TRIM(source_system)) = 'codex' THEN 'CLI' - ELSE NULL - END, - COALESCE(NULLIF(TRIM(source_system), ''), NULLIF(TRIM(workspace_id), ''), 'unknown') - )` + TimeWindowHours int + materializedTbl string } func applyCanonicalUsageViewWithDB( @@ -471,773 +449,4 @@ func loadUsageViewForFilter(ctx context.Context, db *sql.DB, filter usageFilter) return agg, nil } -func queryModelAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryModelAgg, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - query := usageCTE + ` - SELECT - COALESCE(NULLIF(TRIM(COALESCE(model_canonical, model_raw)), ''), 'unknown') AS model_key, - SUM(COALESCE(input_tokens, 0)) AS input_tokens, - SUM(COALESCE(output_tokens, 0)) AS output_tokens, - SUM(COALESCE(cache_read_tokens, 0) + COALESCE(cache_write_tokens, 0)) AS cached_tokens, - SUM(COALESCE(reasoning_tokens, 0)) AS reasoning_tokens, - SUM(COALESCE(total_tokens, - COALESCE(input_tokens, 0) + - COALESCE(output_tokens, 0) + - COALESCE(reasoning_tokens, 0) + - COALESCE(cache_read_tokens, 0) + - COALESCE(cache_write_tokens, 0))) AS total_tokens, - SUM(COALESCE(cost_usd, 0)) AS cost_usd, - SUM(COALESCE(requests, 1)) AS requests, - SUM(CASE WHEN date(occurred_at) = date('now') THEN COALESCE(requests, 1) ELSE 0 END) AS requests_today - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error' - GROUP BY model_key - ORDER BY total_tokens DESC, requests DESC - LIMIT 500 - ` - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage model query: %w", err) - } - defer rows.Close() - - var out []telemetryModelAgg - for rows.Next() { - var row telemetryModelAgg - if err := rows.Scan( - &row.Model, - &row.InputTokens, - &row.OutputTokens, - &row.CachedTokens, - &row.Reasoning, - &row.TotalTokens, - &row.CostUSD, - &row.Requests, - &row.Requests1d, - ); err != nil { - continue - } - out = append(out, row) - } - return out, nil -} - -func querySourceAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetrySourceAgg, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - query := usageCTE + ` - SELECT - ` + clientDimensionExpr() + ` AS source_name, - SUM(COALESCE(requests, 1)) AS requests, - SUM(CASE WHEN date(occurred_at) = date('now') THEN COALESCE(requests, 1) ELSE 0 END) AS requests_today, - SUM(COALESCE(total_tokens, - COALESCE(input_tokens, 0) + - COALESCE(output_tokens, 0) + - COALESCE(reasoning_tokens, 0) + - COALESCE(cache_read_tokens, 0) + - COALESCE(cache_write_tokens, 0))) AS total_tokens, - SUM(COALESCE(input_tokens, 0)) AS input_tokens, - SUM(COALESCE(output_tokens, 0)) AS output_tokens, - SUM(COALESCE(cache_read_tokens, 0) + COALESCE(cache_write_tokens, 0)) AS cached_tokens, - SUM(COALESCE(reasoning_tokens, 0)) AS reasoning_tokens, - COUNT(DISTINCT COALESCE(NULLIF(TRIM(session_id), ''), 'unknown')) AS sessions - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error' - GROUP BY source_name - ORDER BY requests DESC - LIMIT 500 - ` - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage source query: %w", err) - } - defer rows.Close() - - var out []telemetrySourceAgg - for rows.Next() { - var row telemetrySourceAgg - if err := rows.Scan( - &row.Source, - &row.Requests, - &row.Requests1d, - &row.Tokens, - &row.Input, - &row.Output, - &row.Cached, - &row.Reasoning, - &row.Sessions, - ); err != nil { - continue - } - out = append(out, row) - } - return out, nil -} - -func queryProjectAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryProjectAgg, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - query := usageCTE + ` - SELECT - COALESCE(NULLIF(TRIM(workspace_id), ''), '') AS project_name, - SUM(COALESCE(requests, 1)) AS requests, - SUM(CASE WHEN date(occurred_at) = date('now') THEN COALESCE(requests, 1) ELSE 0 END) AS requests_today - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error' - AND NULLIF(TRIM(workspace_id), '') IS NOT NULL - GROUP BY project_name - ORDER BY requests DESC - LIMIT 500 - ` - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage project query: %w", err) - } - defer rows.Close() - - var out []telemetryProjectAgg - for rows.Next() { - var row telemetryProjectAgg - if err := rows.Scan(&row.Project, &row.Requests, &row.Requests1d); err != nil { - continue - } - out = append(out, row) - } - return out, nil -} - -func queryToolAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryToolAgg, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - query := usageCTE + ` - SELECT - COALESCE(NULLIF(TRIM(LOWER(tool_name)), ''), 'unknown') AS tool_name, - SUM(COALESCE(requests, 1)) AS calls, - SUM(CASE WHEN date(occurred_at) = date('now') THEN COALESCE(requests, 1) ELSE 0 END) AS calls_today, - SUM(CASE WHEN status = 'ok' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_ok, - SUM(CASE WHEN date(occurred_at) = date('now') AND status = 'ok' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_ok_today, - SUM(CASE WHEN status = 'error' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_error, - SUM(CASE WHEN date(occurred_at) = date('now') AND status = 'error' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_error_today, - SUM(CASE WHEN status = 'aborted' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_aborted, - SUM(CASE WHEN date(occurred_at) = date('now') AND status = 'aborted' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_aborted_today - FROM deduped_usage - WHERE 1=1 - AND event_type = 'tool_usage' - GROUP BY tool_name - ORDER BY calls DESC - LIMIT 500 - ` - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage tool query: %w", err) - } - defer rows.Close() - - var out []telemetryToolAgg - for rows.Next() { - var row telemetryToolAgg - if err := rows.Scan( - &row.Tool, - &row.Calls, - &row.Calls1d, - &row.CallsOK, - &row.CallsOK1d, - &row.CallsError, - &row.CallsError1d, - &row.CallsAborted, - &row.CallsAborted1d, - ); err != nil { - continue - } - out = append(out, row) - } - return out, nil -} - -func queryLanguageAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryLanguageAgg, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - // Query file paths from usage events. Language is inferred in Go - // from the file extension since SQLite lacks convenient path functions. - // - // File paths live in different locations depending on the source: - // - JSONL collector: $.file or $.payload.file - // - Hook events: $.tool_input.file_path (Read/Edit/Write) - // $.tool_input.path (Grep/Glob) - // - Hook response: $.tool_response.file.filePath (Read response) - // - Cursor tracking: $.file or $.file_extension (message_usage events) - query := usageCTE + ` - SELECT - COALESCE( - NULLIF(TRIM(json_extract(source_payload, '$.file')), ''), - NULLIF(TRIM(json_extract(source_payload, '$.payload.file')), ''), - NULLIF(TRIM(json_extract(source_payload, '$.tool_input.file_path')), ''), - NULLIF(TRIM(json_extract(source_payload, '$.tool_input.path')), ''), - NULLIF(TRIM(json_extract(source_payload, '$.tool_response.file.filePath')), ''), - NULLIF(TRIM(json_extract(source_payload, '$.file_extension')), ''), - '' - ) AS file_path, - COALESCE(requests, 1) AS requests - FROM deduped_usage - WHERE event_type IN ('tool_usage', 'message_usage') - AND status != 'error' - ` - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage language query: %w", err) - } - defer rows.Close() - - langCounts := make(map[string]float64) - for rows.Next() { - var filePath string - var requests float64 - if err := rows.Scan(&filePath, &requests); err != nil { - continue - } - lang := inferLanguageFromFilePath(filePath) - if lang != "" { - langCounts[lang] += requests - } - } - - out := make([]telemetryLanguageAgg, 0, len(langCounts)) - for lang, count := range langCounts { - out = append(out, telemetryLanguageAgg{Language: lang, Requests: count}) - } - sort.Slice(out, func(i, j int) bool { - return out[i].Requests > out[j].Requests - }) - return out, nil -} - -// inferLanguageFromFilePath maps a file path, file extension, or bare -// extension string to a programming language name. -func inferLanguageFromFilePath(path string) string { - p := strings.TrimSpace(path) - if p == "" { - return "" - } - // Check base name for special files. - base := p - if idx := strings.LastIndex(p, "/"); idx >= 0 { - base = p[idx+1:] - } - if idx := strings.LastIndex(base, "\\"); idx >= 0 { - base = base[idx+1:] - } - switch strings.ToLower(base) { - case "dockerfile": - return "docker" - case "makefile": - return "make" - } - // Check file extension. - idx := strings.LastIndex(p, ".") - if idx < 0 { - // Handle bare extension without dot (e.g., "go", "py" from file_extension fields). - if lang := extToLanguage("." + strings.ToLower(p)); lang != "" { - return lang - } - return "" - } - ext := strings.ToLower(p[idx:]) - return extToLanguage(ext) -} - -// extToLanguage maps a dotted file extension to a language name. -func extToLanguage(ext string) string { - switch ext { - case ".go": - return "go" - case ".py": - return "python" - case ".ts", ".tsx": - return "typescript" - case ".js", ".jsx": - return "javascript" - case ".tf", ".tfvars", ".hcl": - return "terraform" - case ".sh", ".bash", ".zsh", ".fish": - return "shell" - case ".md", ".mdx": - return "markdown" - case ".json": - return "json" - case ".yml", ".yaml": - return "yaml" - case ".sql": - return "sql" - case ".rs": - return "rust" - case ".java": - return "java" - case ".c", ".h": - return "c" - case ".cc", ".cpp", ".cxx", ".hpp": - return "cpp" - case ".rb": - return "ruby" - case ".php": - return "php" - case ".swift": - return "swift" - case ".kt", ".kts": - return "kotlin" - case ".cs": - return "csharp" - case ".vue": - return "vue" - case ".svelte": - return "svelte" - case ".toml": - return "toml" - case ".xml": - return "xml" - case ".css", ".scss", ".less": - return "css" - case ".html", ".htm": - return "html" - case ".dart": - return "dart" - case ".zig": - return "zig" - case ".lua": - return "lua" - case ".r": - return "r" - case ".proto": - return "protobuf" - case ".ex", ".exs": - return "elixir" - case ".graphql", ".gql": - return "graphql" - } - return "" -} - -func queryProviderAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryProviderAgg, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - // Provider resolution order: - // 1) hook-enriched upstream provider from source payload (if present), - // 2) fallback to provider_id. - // - // Provider hosting names must come from real payload fields, not inferred - // model-id heuristics. - query := usageCTE + ` - SELECT - COALESCE( - NULLIF(TRIM( - COALESCE( - json_extract(source_payload, '$._normalized.upstream_provider'), - json_extract(source_payload, '$.upstream_provider'), - json_extract(source_payload, '$.payload._normalized.upstream_provider'), - json_extract(source_payload, '$.payload.upstream_provider'), - '' - ) - ), ''), - COALESCE(NULLIF(TRIM(provider_id), ''), 'unknown') - ) AS provider_name, - SUM(COALESCE(cost_usd, 0)) AS cost_usd, - SUM(COALESCE(requests, 1)) AS requests, - SUM(COALESCE(input_tokens, 0)) AS input_tokens, - SUM(COALESCE(output_tokens, 0)) AS output_tokens - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error' - GROUP BY provider_name - ORDER BY cost_usd DESC, requests DESC - LIMIT 200 - ` - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage provider query: %w", err) - } - defer rows.Close() - - var out []telemetryProviderAgg - for rows.Next() { - var row telemetryProviderAgg - if err := rows.Scan(&row.Provider, &row.CostUSD, &row.Requests, &row.Input, &row.Output); err != nil { - continue - } - out = append(out, row) - } - return out, nil -} - -func queryActivityAgg(ctx context.Context, db *sql.DB, filter usageFilter) (telemetryActivityAgg, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - query := usageCTE + ` - SELECT - COUNT(DISTINCT CASE WHEN event_type = 'message_usage' AND status != 'error' THEN - COALESCE(NULLIF(TRIM(message_id), ''), COALESCE(NULLIF(TRIM(turn_id), ''), dedup_key)) - END) AS messages, - COUNT(DISTINCT CASE WHEN event_type = 'message_usage' AND status != 'error' THEN - NULLIF(TRIM(session_id), '') - END) AS sessions, - SUM(CASE WHEN event_type = 'tool_usage' THEN COALESCE(requests, 1) ELSE 0 END) AS tool_calls, - SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(input_tokens, 0) ELSE 0 END) AS input_tokens, - SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(output_tokens, 0) ELSE 0 END) AS output_tokens, - SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(cache_read_tokens, 0) ELSE 0 END) AS cached_tokens, - SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(reasoning_tokens, 0) ELSE 0 END) AS reasoning_tokens, - SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(total_tokens, 0) ELSE 0 END) AS total_tokens, - SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(cost_usd, 0) ELSE 0 END) AS total_cost - FROM deduped_usage - WHERE 1=1 - ` - var out telemetryActivityAgg - err := db.QueryRowContext(ctx, query, whereArgs...).Scan( - &out.Messages, &out.Sessions, &out.ToolCalls, - &out.InputTokens, &out.OutputTokens, &out.CachedTokens, - &out.ReasonTokens, &out.TotalTokens, &out.TotalCost, - ) - if err != nil { - return out, fmt.Errorf("canonical usage activity query: %w", err) - } - return out, nil -} - -func queryCodeStatsAgg(ctx context.Context, db *sql.DB, filter usageFilter) (telemetryCodeStatsAgg, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - // Count distinct file paths from tool_usage events to estimate files changed. - // Only count mutating tools (edit, write, create, delete, rename, move). - // Also sum lines_added/lines_removed from message_usage event payloads - // (e.g. Cursor composer sessions store these). - query := usageCTE + ` - SELECT - COUNT(DISTINCT CASE - WHEN event_type = 'tool_usage' - AND (LOWER(tool_name) LIKE '%edit%' - OR LOWER(tool_name) LIKE '%write%' - OR LOWER(tool_name) LIKE '%create%' - OR LOWER(tool_name) LIKE '%delete%' - OR LOWER(tool_name) LIKE '%rename%' - OR LOWER(tool_name) LIKE '%move%') - THEN NULLIF(TRIM(COALESCE( - json_extract(source_payload, '$.file'), - json_extract(source_payload, '$.payload.file'), - json_extract(source_payload, '$.tool_input.file_path'), - json_extract(source_payload, '$.tool_input.path'), - '' - )), '') - END) AS files_changed, - SUM(COALESCE(CAST(json_extract(source_payload, '$.lines_added') AS REAL), 0)) AS lines_added, - SUM(COALESCE(CAST(json_extract(source_payload, '$.lines_removed') AS REAL), 0)) AS lines_removed - FROM deduped_usage - WHERE event_type IN ('tool_usage', 'message_usage') - AND status != 'error' - ` - var out telemetryCodeStatsAgg - err := db.QueryRowContext(ctx, query, whereArgs...).Scan(&out.FilesChanged, &out.LinesAdded, &out.LinesRemoved) - if err != nil { - return out, fmt.Errorf("canonical usage code stats query: %w", err) - } - return out, nil -} - -func queryDailyTotals(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryDayPoint, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - dailyTimeFilter := "" - if filter.TimeWindowHours <= 0 { - dailyTimeFilter = "\n\t\t\t AND occurred_at >= datetime('now', '-30 day')" - } - query := usageCTE + fmt.Sprintf(` - SELECT - date(occurred_at) AS day, - SUM(COALESCE(cost_usd, 0)) AS cost_usd, - SUM(COALESCE(requests, 1)) AS requests, - SUM(COALESCE(total_tokens, - COALESCE(input_tokens, 0) + - COALESCE(output_tokens, 0) + - COALESCE(reasoning_tokens, 0) + - COALESCE(cache_read_tokens, 0) + - COALESCE(cache_write_tokens, 0))) AS tokens - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error'%s - GROUP BY day - ORDER BY day ASC - `, dailyTimeFilter) - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage daily query: %w", err) - } - defer rows.Close() - - var out []telemetryDayPoint - for rows.Next() { - var row telemetryDayPoint - if err := rows.Scan(&row.Day, &row.CostUSD, &row.Requests, &row.Tokens); err != nil { - continue - } - out = append(out, row) - } - return out, nil -} - -func queryDailyByDimension(ctx context.Context, db *sql.DB, filter usageFilter, dimension string) (map[string][]core.TimePoint, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - dailyTimeFilter := "" - if filter.TimeWindowHours <= 0 { - dailyTimeFilter = "\n\t\t\t AND occurred_at >= datetime('now', '-30 day')" - } - var query string - - switch dimension { - case "model": - query = usageCTE + fmt.Sprintf(` - SELECT date(occurred_at) AS day, - COALESCE(NULLIF(TRIM(COALESCE(model_canonical, model_raw)), ''), 'unknown') AS dim_key, - SUM(COALESCE(requests, 1)) AS value - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error'%s - GROUP BY day, dim_key - `, dailyTimeFilter) - case "source": - query = usageCTE + fmt.Sprintf(` - SELECT date(occurred_at) AS day, - COALESCE(NULLIF(TRIM(workspace_id), ''), COALESCE(NULLIF(TRIM(source_system), ''), 'unknown')) AS dim_key, - SUM(COALESCE(requests, 1)) AS value - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error'%s - GROUP BY day, dim_key - `, dailyTimeFilter) - case "project": - query = usageCTE + fmt.Sprintf(` - SELECT date(occurred_at) AS day, - COALESCE(NULLIF(TRIM(workspace_id), ''), '') AS dim_key, - SUM(COALESCE(requests, 1)) AS value - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error' - AND NULLIF(TRIM(workspace_id), '') IS NOT NULL%s - GROUP BY day, dim_key - `, dailyTimeFilter) - case "client": - query = usageCTE + fmt.Sprintf(` - SELECT date(occurred_at) AS day, - %s AS dim_key, - SUM(COALESCE(requests, 1)) AS value - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error'%s - GROUP BY day, dim_key - `, clientDimensionExpr(), dailyTimeFilter) - default: - return map[string][]core.TimePoint{}, nil - } - - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage daily dimension query (%s): %w", dimension, err) - } - defer rows.Close() - - byDim := make(map[string]map[string]float64) - for rows.Next() { - var day, key string - var value float64 - if err := rows.Scan(&day, &key, &value); err != nil { - continue - } - key = sanitizeMetricID(key) - if key == "" { - key = "unknown" - } - if dimension == "project" && key == "unknown" { - continue - } - if byDim[key] == nil { - byDim[key] = make(map[string]float64) - } - byDim[key][day] += value - } - - out := make(map[string][]core.TimePoint, len(byDim)) - for key, dayMap := range byDim { - out[key] = sortedSeriesFromByDay(dayMap) - } - return out, nil -} - -func queryDailyClientTokens(ctx context.Context, db *sql.DB, filter usageFilter) (map[string][]core.TimePoint, error) { - usageCTE, whereArgs := dedupedUsageCTE(filter) - dailyTimeFilter := "" - if filter.TimeWindowHours <= 0 { - dailyTimeFilter = "\n\t\t\t AND occurred_at >= datetime('now', '-30 day')" - } - query := usageCTE + fmt.Sprintf(` - SELECT - date(occurred_at) AS day, - %s AS source_name, - SUM(COALESCE(total_tokens, - COALESCE(input_tokens, 0) + - COALESCE(output_tokens, 0) + - COALESCE(reasoning_tokens, 0) + - COALESCE(cache_read_tokens, 0) + - COALESCE(cache_write_tokens, 0))) AS tokens - FROM deduped_usage - WHERE 1=1 - AND event_type = 'message_usage' - AND status != 'error'%s - GROUP BY day, source_name - `, clientDimensionExpr(), dailyTimeFilter) - rows, err := db.QueryContext(ctx, query, whereArgs...) - if err != nil { - return nil, fmt.Errorf("canonical usage daily client token query: %w", err) - } - defer rows.Close() - - byClient := make(map[string]map[string]float64) - for rows.Next() { - var day, client string - var value float64 - if err := rows.Scan(&day, &client, &value); err != nil { - continue - } - client = sanitizeMetricID(client) - if client == "" { - client = "unknown" - } - if byClient[client] == nil { - byClient[client] = make(map[string]float64) - } - byClient[client][day] += value - } - - out := make(map[string][]core.TimePoint, len(byClient)) - for key, dayMap := range byClient { - out[key] = sortedSeriesFromByDay(dayMap) - } - return out, nil -} - -func dedupedUsageCTE(filter usageFilter) (string, []any) { - // If a materialized temp table exists, just alias it — no CTE rebuild needed. - if filter.materializedTbl != "" { - return fmt.Sprintf(`WITH deduped_usage AS (SELECT * FROM %s) `, filter.materializedTbl), nil - } - where, args := usageWhereClause("e", filter) - cte := fmt.Sprintf(` - WITH scoped_usage AS ( - SELECT - e.*, - COALESCE(r.source_system, '') AS source_system, - COALESCE(r.source_channel, '') AS source_channel, - COALESCE(r.source_payload, '{}') AS source_payload - FROM usage_events e - JOIN usage_raw_events r ON r.raw_event_id = e.raw_event_id - WHERE %s - AND e.event_type IN ('message_usage', 'tool_usage') - ), - ranked_usage AS ( - SELECT - scoped_usage.*, - CASE - WHEN COALESCE(NULLIF(TRIM(tool_call_id), ''), '') != '' THEN 'tool:' || LOWER(TRIM(tool_call_id)) - WHEN LOWER(TRIM(event_type)) = 'message_usage' - AND LOWER(TRIM(source_system)) = 'codex' - AND COALESCE(NULLIF(TRIM(turn_id), ''), '') != '' - THEN 'message_turn:' || LOWER(TRIM(turn_id)) - WHEN COALESCE(NULLIF(TRIM(message_id), ''), '') != '' THEN 'message:' || LOWER(TRIM(message_id)) - WHEN COALESCE(NULLIF(TRIM(turn_id), ''), '') != '' THEN 'turn:' || LOWER(TRIM(turn_id)) - ELSE 'fallback:' || dedup_key - END AS logical_event_id, - CASE COALESCE(NULLIF(TRIM(source_channel), ''), '') - WHEN 'hook' THEN 4 - WHEN 'sse' THEN 3 - WHEN 'sqlite' THEN 2 - WHEN 'jsonl' THEN 2 - WHEN 'api' THEN 1 - ELSE 0 - END AS source_priority, - ( - CASE WHEN COALESCE(total_tokens, 0) > 0 THEN 4 ELSE 0 END + - CASE WHEN COALESCE(cost_usd, 0) > 0 THEN 2 ELSE 0 END + - CASE WHEN COALESCE(NULLIF(TRIM(COALESCE(model_canonical, model_raw)), ''), '') != '' THEN 1 ELSE 0 END + - CASE - WHEN COALESCE(NULLIF(TRIM(provider_id), ''), '') != '' - AND LOWER(TRIM(provider_id)) NOT IN ('unknown', 'opencode') - THEN 1 - ELSE 0 - END - ) AS quality_score - FROM scoped_usage - ), - deduped_usage AS ( - SELECT * - FROM ( - SELECT - ranked_usage.*, - ROW_NUMBER() OVER ( - PARTITION BY - LOWER(TRIM(source_system)), - LOWER(TRIM(event_type)), - LOWER(TRIM(COALESCE(session_id, ''))), - logical_event_id - ORDER BY source_priority DESC, quality_score DESC, occurred_at DESC, event_id DESC - ) AS rn - FROM ranked_usage - ) - WHERE rn = 1 - ) - `, where) - return cte, args -} - -func usageWhereClause(alias string, filter usageFilter) (string, []any) { - prefix := "" - if strings.TrimSpace(alias) != "" { - prefix = strings.TrimSpace(alias) + "." - } - providerIDs := normalizeProviderIDs(filter.ProviderIDs) - if len(providerIDs) == 0 { - return prefix + "provider_id = ''", nil - } - where := "" - args := make([]any, 0, len(providerIDs)+1) - if len(providerIDs) == 1 { - where = prefix + "provider_id = ?" - args = append(args, providerIDs[0]) - } else { - placeholders := make([]string, 0, len(providerIDs)) - for _, providerID := range providerIDs { - placeholders = append(placeholders, "?") - args = append(args, providerID) - } - where = prefix + "provider_id IN (" + strings.Join(placeholders, ",") + ")" - } - if strings.TrimSpace(filter.AccountID) != "" { - where += " AND " + prefix + "account_id = ?" - args = append(args, strings.TrimSpace(filter.AccountID)) - } - if filter.TimeWindowHours > 0 { - where += fmt.Sprintf(" AND %soccurred_at >= datetime('now', '-%d hour')", prefix, filter.TimeWindowHours) - } - return where, args -} - -func normalizeProviderIDs(in []string) []string { - if len(in) == 0 { - return nil - } - normalized := lo.Map(in, func(s string, _ int) string { - return strings.ToLower(strings.TrimSpace(s)) - }) - result := lo.Uniq(lo.Compact(normalized)) - sort.Strings(result) - return result -} - // parseMCPToolName extracts server and function from an MCP tool name. diff --git a/internal/telemetry/usage_view_languages.go b/internal/telemetry/usage_view_languages.go new file mode 100644 index 0000000..239f1e6 --- /dev/null +++ b/internal/telemetry/usage_view_languages.go @@ -0,0 +1,101 @@ +package telemetry + +import "strings" + +func inferLanguageFromFilePath(path string) string { + p := strings.TrimSpace(path) + if p == "" { + return "" + } + base := p + if idx := strings.LastIndex(p, "/"); idx >= 0 { + base = p[idx+1:] + } + if idx := strings.LastIndex(base, "\\"); idx >= 0 { + base = base[idx+1:] + } + switch strings.ToLower(base) { + case "dockerfile": + return "docker" + case "makefile": + return "make" + } + idx := strings.LastIndex(p, ".") + if idx < 0 { + if lang := extToLanguage("." + strings.ToLower(p)); lang != "" { + return lang + } + return "" + } + return extToLanguage(strings.ToLower(p[idx:])) +} + +func extToLanguage(ext string) string { + switch ext { + case ".go": + return "go" + case ".py": + return "python" + case ".ts", ".tsx": + return "typescript" + case ".js", ".jsx": + return "javascript" + case ".tf", ".tfvars", ".hcl": + return "terraform" + case ".sh", ".bash", ".zsh", ".fish": + return "shell" + case ".md", ".mdx": + return "markdown" + case ".json": + return "json" + case ".yml", ".yaml": + return "yaml" + case ".sql": + return "sql" + case ".rs": + return "rust" + case ".java": + return "java" + case ".c", ".h": + return "c" + case ".cc", ".cpp", ".cxx", ".hpp": + return "cpp" + case ".rb": + return "ruby" + case ".php": + return "php" + case ".swift": + return "swift" + case ".kt", ".kts": + return "kotlin" + case ".cs": + return "csharp" + case ".vue": + return "vue" + case ".svelte": + return "svelte" + case ".toml": + return "toml" + case ".xml": + return "xml" + case ".css", ".scss", ".less": + return "css" + case ".html", ".htm": + return "html" + case ".dart": + return "dart" + case ".zig": + return "zig" + case ".lua": + return "lua" + case ".r": + return "r" + case ".proto": + return "protobuf" + case ".ex", ".exs": + return "elixir" + case ".graphql", ".gql": + return "graphql" + } + return "" +} diff --git a/internal/telemetry/usage_view_queries.go b/internal/telemetry/usage_view_queries.go new file mode 100644 index 0000000..ce8fa59 --- /dev/null +++ b/internal/telemetry/usage_view_queries.go @@ -0,0 +1,676 @@ +package telemetry + +import ( + "context" + "database/sql" + "fmt" + "sort" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/samber/lo" +) + +func clientDimensionExpr() string { + return `COALESCE( + NULLIF(TRIM( + COALESCE( + json_extract(source_payload, '$.client'), + json_extract(source_payload, '$.payload.client'), + json_extract(source_payload, '$._normalized.client'), + json_extract(source_payload, '$.cursor_source'), + json_extract(source_payload, '$.source.client'), + '' + ) + ), ''), + CASE + WHEN LOWER(TRIM(source_system)) = 'codex' THEN 'CLI' + ELSE NULL + END, + COALESCE(NULLIF(TRIM(source_system), ''), NULLIF(TRIM(workspace_id), ''), 'unknown') + )` +} + +func queryModelAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryModelAgg, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + query := usageCTE + ` + SELECT + COALESCE(NULLIF(TRIM(COALESCE(model_canonical, model_raw)), ''), 'unknown') AS model_key, + SUM(COALESCE(input_tokens, 0)) AS input_tokens, + SUM(COALESCE(output_tokens, 0)) AS output_tokens, + SUM(COALESCE(cache_read_tokens, 0) + COALESCE(cache_write_tokens, 0)) AS cached_tokens, + SUM(COALESCE(reasoning_tokens, 0)) AS reasoning_tokens, + SUM(COALESCE(total_tokens, + COALESCE(input_tokens, 0) + + COALESCE(output_tokens, 0) + + COALESCE(reasoning_tokens, 0) + + COALESCE(cache_read_tokens, 0) + + COALESCE(cache_write_tokens, 0))) AS total_tokens, + SUM(COALESCE(cost_usd, 0)) AS cost_usd, + SUM(COALESCE(requests, 1)) AS requests, + SUM(CASE WHEN date(occurred_at) = date('now') THEN COALESCE(requests, 1) ELSE 0 END) AS requests_today + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error' + GROUP BY model_key + ORDER BY total_tokens DESC, requests DESC + LIMIT 500 + ` + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage model query: %w", err) + } + defer rows.Close() + + var out []telemetryModelAgg + for rows.Next() { + var row telemetryModelAgg + if err := rows.Scan( + &row.Model, + &row.InputTokens, + &row.OutputTokens, + &row.CachedTokens, + &row.Reasoning, + &row.TotalTokens, + &row.CostUSD, + &row.Requests, + &row.Requests1d, + ); err != nil { + continue + } + out = append(out, row) + } + return out, nil +} + +func querySourceAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetrySourceAgg, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + query := usageCTE + ` + SELECT + ` + clientDimensionExpr() + ` AS source_name, + SUM(COALESCE(requests, 1)) AS requests, + SUM(CASE WHEN date(occurred_at) = date('now') THEN COALESCE(requests, 1) ELSE 0 END) AS requests_today, + SUM(COALESCE(total_tokens, + COALESCE(input_tokens, 0) + + COALESCE(output_tokens, 0) + + COALESCE(reasoning_tokens, 0) + + COALESCE(cache_read_tokens, 0) + + COALESCE(cache_write_tokens, 0))) AS total_tokens, + SUM(COALESCE(input_tokens, 0)) AS input_tokens, + SUM(COALESCE(output_tokens, 0)) AS output_tokens, + SUM(COALESCE(cache_read_tokens, 0) + COALESCE(cache_write_tokens, 0)) AS cached_tokens, + SUM(COALESCE(reasoning_tokens, 0)) AS reasoning_tokens, + COUNT(DISTINCT COALESCE(NULLIF(TRIM(session_id), ''), 'unknown')) AS sessions + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error' + GROUP BY source_name + ORDER BY requests DESC + LIMIT 500 + ` + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage source query: %w", err) + } + defer rows.Close() + + var out []telemetrySourceAgg + for rows.Next() { + var row telemetrySourceAgg + if err := rows.Scan( + &row.Source, + &row.Requests, + &row.Requests1d, + &row.Tokens, + &row.Input, + &row.Output, + &row.Cached, + &row.Reasoning, + &row.Sessions, + ); err != nil { + continue + } + out = append(out, row) + } + return out, nil +} + +func queryProjectAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryProjectAgg, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + query := usageCTE + ` + SELECT + COALESCE(NULLIF(TRIM(workspace_id), ''), '') AS project_name, + SUM(COALESCE(requests, 1)) AS requests, + SUM(CASE WHEN date(occurred_at) = date('now') THEN COALESCE(requests, 1) ELSE 0 END) AS requests_today + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error' + AND NULLIF(TRIM(workspace_id), '') IS NOT NULL + GROUP BY project_name + ORDER BY requests DESC + LIMIT 500 + ` + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage project query: %w", err) + } + defer rows.Close() + + var out []telemetryProjectAgg + for rows.Next() { + var row telemetryProjectAgg + if err := rows.Scan(&row.Project, &row.Requests, &row.Requests1d); err != nil { + continue + } + out = append(out, row) + } + return out, nil +} + +func queryToolAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryToolAgg, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + query := usageCTE + ` + SELECT + COALESCE(NULLIF(TRIM(LOWER(tool_name)), ''), 'unknown') AS tool_name, + SUM(COALESCE(requests, 1)) AS calls, + SUM(CASE WHEN date(occurred_at) = date('now') THEN COALESCE(requests, 1) ELSE 0 END) AS calls_today, + SUM(CASE WHEN status = 'ok' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_ok, + SUM(CASE WHEN date(occurred_at) = date('now') AND status = 'ok' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_ok_today, + SUM(CASE WHEN status = 'error' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_error, + SUM(CASE WHEN date(occurred_at) = date('now') AND status = 'error' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_error_today, + SUM(CASE WHEN status = 'aborted' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_aborted, + SUM(CASE WHEN date(occurred_at) = date('now') AND status = 'aborted' THEN COALESCE(requests, 1) ELSE 0 END) AS calls_aborted_today + FROM deduped_usage + WHERE 1=1 + AND event_type = 'tool_usage' + GROUP BY tool_name + ORDER BY calls DESC + LIMIT 500 + ` + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage tool query: %w", err) + } + defer rows.Close() + + var out []telemetryToolAgg + for rows.Next() { + var row telemetryToolAgg + if err := rows.Scan( + &row.Tool, + &row.Calls, + &row.Calls1d, + &row.CallsOK, + &row.CallsOK1d, + &row.CallsError, + &row.CallsError1d, + &row.CallsAborted, + &row.CallsAborted1d, + ); err != nil { + continue + } + out = append(out, row) + } + return out, nil +} + +func queryLanguageAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryLanguageAgg, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + query := usageCTE + ` + SELECT + COALESCE( + NULLIF(TRIM(json_extract(source_payload, '$.file')), ''), + NULLIF(TRIM(json_extract(source_payload, '$.payload.file')), ''), + NULLIF(TRIM(json_extract(source_payload, '$.tool_input.file_path')), ''), + NULLIF(TRIM(json_extract(source_payload, '$.tool_input.path')), ''), + NULLIF(TRIM(json_extract(source_payload, '$.tool_response.file.filePath')), ''), + NULLIF(TRIM(json_extract(source_payload, '$.file_extension')), ''), + '' + ) AS file_path, + COALESCE(requests, 1) AS requests + FROM deduped_usage + WHERE event_type IN ('tool_usage', 'message_usage') + AND status != 'error' + ` + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage language query: %w", err) + } + defer rows.Close() + + langCounts := make(map[string]float64) + for rows.Next() { + var filePath string + var requests float64 + if err := rows.Scan(&filePath, &requests); err != nil { + continue + } + lang := inferLanguageFromFilePath(filePath) + if lang != "" { + langCounts[lang] += requests + } + } + + out := make([]telemetryLanguageAgg, 0, len(langCounts)) + for lang, count := range langCounts { + out = append(out, telemetryLanguageAgg{Language: lang, Requests: count}) + } + sort.Slice(out, func(i, j int) bool { + return out[i].Requests > out[j].Requests + }) + return out, nil +} + +func queryProviderAgg(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryProviderAgg, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + query := usageCTE + ` + SELECT + COALESCE( + NULLIF(TRIM( + COALESCE( + json_extract(source_payload, '$._normalized.upstream_provider'), + json_extract(source_payload, '$.upstream_provider'), + json_extract(source_payload, '$.payload._normalized.upstream_provider'), + json_extract(source_payload, '$.payload.upstream_provider'), + '' + ) + ), ''), + COALESCE(NULLIF(TRIM(provider_id), ''), 'unknown') + ) AS provider_name, + SUM(COALESCE(cost_usd, 0)) AS cost_usd, + SUM(COALESCE(requests, 1)) AS requests, + SUM(COALESCE(input_tokens, 0)) AS input_tokens, + SUM(COALESCE(output_tokens, 0)) AS output_tokens + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error' + GROUP BY provider_name + ORDER BY cost_usd DESC, requests DESC + LIMIT 200 + ` + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage provider query: %w", err) + } + defer rows.Close() + + var out []telemetryProviderAgg + for rows.Next() { + var row telemetryProviderAgg + if err := rows.Scan(&row.Provider, &row.CostUSD, &row.Requests, &row.Input, &row.Output); err != nil { + continue + } + out = append(out, row) + } + return out, nil +} + +func queryActivityAgg(ctx context.Context, db *sql.DB, filter usageFilter) (telemetryActivityAgg, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + query := usageCTE + ` + SELECT + COUNT(DISTINCT CASE WHEN event_type = 'message_usage' AND status != 'error' THEN + COALESCE(NULLIF(TRIM(message_id), ''), COALESCE(NULLIF(TRIM(turn_id), ''), dedup_key)) + END) AS messages, + COUNT(DISTINCT CASE WHEN event_type = 'message_usage' AND status != 'error' THEN + NULLIF(TRIM(session_id), '') + END) AS sessions, + SUM(CASE WHEN event_type = 'tool_usage' THEN COALESCE(requests, 1) ELSE 0 END) AS tool_calls, + SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(input_tokens, 0) ELSE 0 END) AS input_tokens, + SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(output_tokens, 0) ELSE 0 END) AS output_tokens, + SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(cache_read_tokens, 0) ELSE 0 END) AS cached_tokens, + SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(reasoning_tokens, 0) ELSE 0 END) AS reasoning_tokens, + SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(total_tokens, 0) ELSE 0 END) AS total_tokens, + SUM(CASE WHEN event_type = 'message_usage' AND status != 'error' THEN COALESCE(cost_usd, 0) ELSE 0 END) AS total_cost + FROM deduped_usage + WHERE 1=1 + ` + var out telemetryActivityAgg + err := db.QueryRowContext(ctx, query, whereArgs...).Scan( + &out.Messages, &out.Sessions, &out.ToolCalls, + &out.InputTokens, &out.OutputTokens, &out.CachedTokens, + &out.ReasonTokens, &out.TotalTokens, &out.TotalCost, + ) + if err != nil { + return out, fmt.Errorf("canonical usage activity query: %w", err) + } + return out, nil +} + +func queryCodeStatsAgg(ctx context.Context, db *sql.DB, filter usageFilter) (telemetryCodeStatsAgg, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + query := usageCTE + ` + SELECT + COUNT(DISTINCT CASE + WHEN event_type = 'tool_usage' + AND (LOWER(tool_name) LIKE '%edit%' + OR LOWER(tool_name) LIKE '%write%' + OR LOWER(tool_name) LIKE '%create%' + OR LOWER(tool_name) LIKE '%delete%' + OR LOWER(tool_name) LIKE '%rename%' + OR LOWER(tool_name) LIKE '%move%') + THEN NULLIF(TRIM(COALESCE( + json_extract(source_payload, '$.file'), + json_extract(source_payload, '$.payload.file'), + json_extract(source_payload, '$.tool_input.file_path'), + json_extract(source_payload, '$.tool_input.path'), + '' + )), '') + END) AS files_changed, + SUM(COALESCE(CAST(json_extract(source_payload, '$.lines_added') AS REAL), 0)) AS lines_added, + SUM(COALESCE(CAST(json_extract(source_payload, '$.lines_removed') AS REAL), 0)) AS lines_removed + FROM deduped_usage + WHERE event_type IN ('tool_usage', 'message_usage') + AND status != 'error' + ` + var out telemetryCodeStatsAgg + err := db.QueryRowContext(ctx, query, whereArgs...).Scan(&out.FilesChanged, &out.LinesAdded, &out.LinesRemoved) + if err != nil { + return out, fmt.Errorf("canonical usage code stats query: %w", err) + } + return out, nil +} + +func queryDailyTotals(ctx context.Context, db *sql.DB, filter usageFilter) ([]telemetryDayPoint, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + dailyTimeFilter := "" + if filter.TimeWindowHours <= 0 { + dailyTimeFilter = "\n\t\t\t AND occurred_at >= datetime('now', '-30 day')" + } + query := usageCTE + fmt.Sprintf(` + SELECT + date(occurred_at) AS day, + SUM(COALESCE(cost_usd, 0)) AS cost_usd, + SUM(COALESCE(requests, 1)) AS requests, + SUM(COALESCE(total_tokens, + COALESCE(input_tokens, 0) + + COALESCE(output_tokens, 0) + + COALESCE(reasoning_tokens, 0) + + COALESCE(cache_read_tokens, 0) + + COALESCE(cache_write_tokens, 0))) AS tokens + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error'%s + GROUP BY day + ORDER BY day ASC + `, dailyTimeFilter) + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage daily query: %w", err) + } + defer rows.Close() + + var out []telemetryDayPoint + for rows.Next() { + var row telemetryDayPoint + if err := rows.Scan(&row.Day, &row.CostUSD, &row.Requests, &row.Tokens); err != nil { + continue + } + out = append(out, row) + } + return out, nil +} + +func queryDailyByDimension(ctx context.Context, db *sql.DB, filter usageFilter, dimension string) (map[string][]core.TimePoint, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + dailyTimeFilter := "" + if filter.TimeWindowHours <= 0 { + dailyTimeFilter = "\n\t\t\t AND occurred_at >= datetime('now', '-30 day')" + } + var query string + + switch dimension { + case "model": + query = usageCTE + fmt.Sprintf(` + SELECT date(occurred_at) AS day, + COALESCE(NULLIF(TRIM(COALESCE(model_canonical, model_raw)), ''), 'unknown') AS dim_key, + SUM(COALESCE(requests, 1)) AS value + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error'%s + GROUP BY day, dim_key + `, dailyTimeFilter) + case "source": + query = usageCTE + fmt.Sprintf(` + SELECT date(occurred_at) AS day, + COALESCE(NULLIF(TRIM(workspace_id), ''), COALESCE(NULLIF(TRIM(source_system), ''), 'unknown')) AS dim_key, + SUM(COALESCE(requests, 1)) AS value + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error'%s + GROUP BY day, dim_key + `, dailyTimeFilter) + case "project": + query = usageCTE + fmt.Sprintf(` + SELECT date(occurred_at) AS day, + COALESCE(NULLIF(TRIM(workspace_id), ''), '') AS dim_key, + SUM(COALESCE(requests, 1)) AS value + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error' + AND NULLIF(TRIM(workspace_id), '') IS NOT NULL%s + GROUP BY day, dim_key + `, dailyTimeFilter) + case "client": + query = usageCTE + fmt.Sprintf(` + SELECT date(occurred_at) AS day, + %s AS dim_key, + SUM(COALESCE(requests, 1)) AS value + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error'%s + GROUP BY day, dim_key + `, clientDimensionExpr(), dailyTimeFilter) + default: + return map[string][]core.TimePoint{}, nil + } + + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage daily dimension query (%s): %w", dimension, err) + } + defer rows.Close() + + byDim := make(map[string]map[string]float64) + for rows.Next() { + var day, key string + var value float64 + if err := rows.Scan(&day, &key, &value); err != nil { + continue + } + key = sanitizeMetricID(key) + if key == "" { + key = "unknown" + } + if dimension == "project" && key == "unknown" { + continue + } + if byDim[key] == nil { + byDim[key] = make(map[string]float64) + } + byDim[key][day] += value + } + + out := make(map[string][]core.TimePoint, len(byDim)) + for key, dayMap := range byDim { + out[key] = sortedSeriesFromByDay(dayMap) + } + return out, nil +} + +func queryDailyClientTokens(ctx context.Context, db *sql.DB, filter usageFilter) (map[string][]core.TimePoint, error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + dailyTimeFilter := "" + if filter.TimeWindowHours <= 0 { + dailyTimeFilter = "\n\t\t\t AND occurred_at >= datetime('now', '-30 day')" + } + query := usageCTE + fmt.Sprintf(` + SELECT + date(occurred_at) AS day, + %s AS source_name, + SUM(COALESCE(total_tokens, + COALESCE(input_tokens, 0) + + COALESCE(output_tokens, 0) + + COALESCE(reasoning_tokens, 0) + + COALESCE(cache_read_tokens, 0) + + COALESCE(cache_write_tokens, 0))) AS tokens + FROM deduped_usage + WHERE 1=1 + AND event_type = 'message_usage' + AND status != 'error'%s + GROUP BY day, source_name + `, clientDimensionExpr(), dailyTimeFilter) + rows, err := db.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, fmt.Errorf("canonical usage daily client token query: %w", err) + } + defer rows.Close() + + byClient := make(map[string]map[string]float64) + for rows.Next() { + var day, client string + var value float64 + if err := rows.Scan(&day, &client, &value); err != nil { + continue + } + client = sanitizeMetricID(client) + if client == "" { + client = "unknown" + } + if byClient[client] == nil { + byClient[client] = make(map[string]float64) + } + byClient[client][day] += value + } + + out := make(map[string][]core.TimePoint, len(byClient)) + for key, dayMap := range byClient { + out[key] = sortedSeriesFromByDay(dayMap) + } + return out, nil +} + +func dedupedUsageCTE(filter usageFilter) (string, []any) { + if filter.materializedTbl != "" { + return fmt.Sprintf(`WITH deduped_usage AS (SELECT * FROM %s) `, filter.materializedTbl), nil + } + where, args := usageWhereClause("e", filter) + cte := fmt.Sprintf(` + WITH scoped_usage AS ( + SELECT + e.*, + COALESCE(r.source_system, '') AS source_system, + COALESCE(r.source_channel, '') AS source_channel, + COALESCE(r.source_payload, '{}') AS source_payload + FROM usage_events e + JOIN usage_raw_events r ON r.raw_event_id = e.raw_event_id + WHERE %s + AND e.event_type IN ('message_usage', 'tool_usage') + ), + ranked_usage AS ( + SELECT + scoped_usage.*, + CASE + WHEN COALESCE(NULLIF(TRIM(tool_call_id), ''), '') != '' THEN 'tool:' || LOWER(TRIM(tool_call_id)) + WHEN LOWER(TRIM(event_type)) = 'message_usage' + AND LOWER(TRIM(source_system)) = 'codex' + AND COALESCE(NULLIF(TRIM(turn_id), ''), '') != '' + THEN 'message_turn:' || LOWER(TRIM(turn_id)) + WHEN COALESCE(NULLIF(TRIM(message_id), ''), '') != '' THEN 'message:' || LOWER(TRIM(message_id)) + WHEN COALESCE(NULLIF(TRIM(turn_id), ''), '') != '' THEN 'turn:' || LOWER(TRIM(turn_id)) + ELSE 'fallback:' || dedup_key + END AS logical_event_id, + CASE COALESCE(NULLIF(TRIM(source_channel), ''), '') + WHEN 'hook' THEN 4 + WHEN 'sse' THEN 3 + WHEN 'sqlite' THEN 2 + WHEN 'jsonl' THEN 2 + WHEN 'api' THEN 1 + ELSE 0 + END AS source_priority, + ( + CASE WHEN COALESCE(total_tokens, 0) > 0 THEN 4 ELSE 0 END + + CASE WHEN COALESCE(cost_usd, 0) > 0 THEN 2 ELSE 0 END + + CASE WHEN COALESCE(NULLIF(TRIM(COALESCE(model_canonical, model_raw)), ''), '') != '' THEN 1 ELSE 0 END + + CASE + WHEN COALESCE(NULLIF(TRIM(provider_id), ''), '') != '' + AND LOWER(TRIM(provider_id)) NOT IN ('unknown', 'opencode') + THEN 1 + ELSE 0 + END + ) AS quality_score + FROM scoped_usage + ), + deduped_usage AS ( + SELECT * + FROM ( + SELECT + ranked_usage.*, + ROW_NUMBER() OVER ( + PARTITION BY + LOWER(TRIM(source_system)), + LOWER(TRIM(event_type)), + LOWER(TRIM(COALESCE(session_id, ''))), + logical_event_id + ORDER BY source_priority DESC, quality_score DESC, occurred_at DESC, event_id DESC + ) AS rn + FROM ranked_usage + ) + WHERE rn = 1 + ) + `, where) + return cte, args +} + +func usageWhereClause(alias string, filter usageFilter) (string, []any) { + prefix := "" + if strings.TrimSpace(alias) != "" { + prefix = strings.TrimSpace(alias) + "." + } + providerIDs := normalizeProviderIDs(filter.ProviderIDs) + if len(providerIDs) == 0 { + return prefix + "provider_id = ''", nil + } + where := "" + args := make([]any, 0, len(providerIDs)+1) + if len(providerIDs) == 1 { + where = prefix + "provider_id = ?" + args = append(args, providerIDs[0]) + } else { + placeholders := make([]string, 0, len(providerIDs)) + for _, providerID := range providerIDs { + placeholders = append(placeholders, "?") + args = append(args, providerID) + } + where = prefix + "provider_id IN (" + strings.Join(placeholders, ",") + ")" + } + if strings.TrimSpace(filter.AccountID) != "" { + where += " AND " + prefix + "account_id = ?" + args = append(args, strings.TrimSpace(filter.AccountID)) + } + if filter.TimeWindowHours > 0 { + where += fmt.Sprintf(" AND %soccurred_at >= datetime('now', '-%d hour')", prefix, filter.TimeWindowHours) + } + return where, args +} + +func normalizeProviderIDs(in []string) []string { + if len(in) == 0 { + return nil + } + normalized := lo.Map(in, func(s string, _ int) string { + return strings.ToLower(strings.TrimSpace(s)) + }) + result := lo.Uniq(lo.Compact(normalized)) + sort.Strings(result) + return result +} From 441debf74db991ad8a414fb935419f9d1a5a2422 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 13:13:16 +0100 Subject: [PATCH 10/32] refactor: split cursor api and cache helpers --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 3 +- internal/providers/cursor/api.go | 83 ++++++ internal/providers/cursor/cache.go | 169 +++++++++++ internal/providers/cursor/cursor.go | 263 ------------------ 4 files changed, 254 insertions(+), 264 deletions(-) create mode 100644 internal/providers/cursor/api.go create mode 100644 internal/providers/cursor/cache.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 7577aba..f62b001 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -43,6 +43,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | R23 | Fixed | Telemetry query-layer split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_languages.go` | The SQL aggregation/query helpers and language-inference helpers moved out of the main usage-view file into dedicated query/language units, leaving the main file focused on orchestration and shared aggregate types. | Continue shrinking orchestration/materialization into smaller units if needed. | | R24 | Fixed | OpenRouter generation-path split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/generations.go` | Generation payload types and generation-fetch/enrichment/aggregation logic moved out of the main OpenRouter provider file into a dedicated generation unit. | Continue with analytics/client/API helper splits. | | R25 | Fixed | OpenRouter clock injection | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/generations.go` | OpenRouter’s time-sensitive analytics and generation flows now use an injectable clock instead of reading `time.Now()` directly in the provider hot path. | Extend the same pattern to remaining providers and analytics helpers. | +| R26 | Fixed | Cursor API/cache helper split | `internal/providers/cursor/cursor.go`, `internal/providers/cursor/api.go`, `internal/providers/cursor/cache.go` | Cursor HTTP helper methods and billing/model cache helpers now live in dedicated units instead of the main provider file, removing duplicate implementations and narrowing `cursor.go` to orchestration and local-data projection. | Continue splitting SQLite projection and token-loading responsibilities. | ## Action Table @@ -52,7 +53,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go:393-584`, `internal/dashboardapp/service.go` | The side effects are now injected, but `Model` still owns a very large amount of event-handling and state-transition logic. | Continue splitting update/action logic into smaller TUI units and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go:663-729`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go` | The main composition bars now consume shared extractors, but analytics/detail-specific sections still decode some raw metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go`, `internal/providers/openrouter/generations.go` | `openrouter.go` is materially smaller after the provider-resolution and generation-path splits, but it still mixes auth probing, credits, keys, analytics parsing, and some output/projection helpers. | Continue splitting into `api_client`, `analytics`, and remaining projection/helper units. | Easier maintenance, smaller diff surface, faster targeted testing. | -| A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1006`, `internal/providers/cursor/cursor.go:1087-2086` | Cursor provider combines API orchestration, local SQLite readers, token extraction, and two independent caches in one class. | Split into `api`, `trackingdb`, `statedb`, `cache`, and `snapshot_projection` modules. Move token extraction out of provider hot path. | Cleaner boundaries and less risk of local/API logic regressions. | +| A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1842`, `internal/providers/cursor/state_records.go`, `internal/providers/cursor/tracking_records.go` | Cursor provider no longer owns the API/cache helpers, but it still combines fetch orchestration, token loading, local SQLite projection, and some remaining snapshot assembly in one large file. | Continue splitting token-loading, local DB projection, and snapshot assembly into dedicated modules so `cursor.go` becomes a thin coordinator. | Cleaner boundaries and less risk of local/API logic regressions. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go` | The usage-view code is materially smaller after the helper/projection/query splits, but the orchestration/materialization path still owns temp-table lifecycle, query fanout, and aggregate assembly in one place. | Continue splitting remaining orchestration/materialization concerns and consider a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A7 | P2 | Daemon service monolith | `internal/daemon/server.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_loops.go` | The daemon is materially less coupled after the logging/cache/http/loop split, but polling, collection, retention, and spool maintenance still share one large runtime helper unit. | Continue splitting the loop-heavy runtime into `polling`, `collection`, `retention`, and `spool` units. | Lower mental load and easier concurrency review. | | A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Cursor and OpenRouter now use injectable clocks in their main time-sensitive paths, but other providers and analytics helpers still read `time.Now()` directly, often mixing local time and UTC. | Extend the clock abstraction to the remaining provider and analytics subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | diff --git a/internal/providers/cursor/api.go b/internal/providers/cursor/api.go new file mode 100644 index 0000000..6e7adcd --- /dev/null +++ b/internal/providers/cursor/api.go @@ -0,0 +1,83 @@ +package cursor + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" +) + +func (p *Provider) callDashboardAPI(ctx context.Context, token, method string, result interface{}) error { + url := fmt.Sprintf("%s/aiserver.v1.DashboardService/%s", cursorAPIBase, method) + return p.doPost(ctx, token, url, result) +} + +func (p *Provider) callDashboardAPIWithBody(ctx context.Context, token, method string, body []byte, result interface{}) error { + url := fmt.Sprintf("%s/aiserver.v1.DashboardService/%s", cursorAPIBase, method) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/json") + + resp, err := p.Client().Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + respBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(respBody)) + } + + return json.NewDecoder(resp.Body).Decode(result) +} + +func (p *Provider) callRESTAPI(ctx context.Context, token, path string, result interface{}) error { + url := cursorAPIBase + path + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/json") + + resp, err := p.Client().Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body)) + } + + return json.NewDecoder(resp.Body).Decode(result) +} + +func (p *Provider) doPost(ctx context.Context, token, url string, result interface{}) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader([]byte("{}"))) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/json") + + resp, err := p.Client().Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + respBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(respBody)) + } + + return json.NewDecoder(resp.Body).Decode(result) +} diff --git a/internal/providers/cursor/cache.go b/internal/providers/cursor/cache.go new file mode 100644 index 0000000..c69ae6f --- /dev/null +++ b/internal/providers/cursor/cache.go @@ -0,0 +1,169 @@ +package cursor + +import ( + "strconv" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (p *Provider) storeModelAggregationCache(accountID, billingCycleStart, billingCycleEnd string, aggregations []modelAggregation, effectiveLimitUSD float64) { + if accountID == "" || len(aggregations) == 0 { + return + } + copied := make([]modelAggregation, len(aggregations)) + copy(copied, aggregations) + + p.mu.Lock() + defer p.mu.Unlock() + if p.modelAggregationCache == nil { + p.modelAggregationCache = make(map[string]cachedModelAggregation) + } + entry := cachedModelAggregation{ + BillingCycleStart: billingCycleStart, + BillingCycleEnd: billingCycleEnd, + Aggregations: copied, + EffectiveLimitUSD: effectiveLimitUSD, + } + if prev, ok := p.modelAggregationCache[accountID]; ok && len(prev.BillingMetrics) > 0 { + entry.BillingMetrics = prev.BillingMetrics + } + p.modelAggregationCache[accountID] = entry +} + +func (p *Provider) applyCachedModelAggregations(accountID, billingCycleStart, billingCycleEnd string, snap *core.UsageSnapshot) bool { + if accountID == "" { + return false + } + + p.mu.RLock() + cached, ok := p.modelAggregationCache[accountID] + p.mu.RUnlock() + if !ok || len(cached.Aggregations) == 0 { + return false + } + if billingCycleStart != "" && cached.BillingCycleStart != "" && billingCycleStart != cached.BillingCycleStart { + return false + } + if billingCycleEnd != "" && cached.BillingCycleEnd != "" && billingCycleEnd != cached.BillingCycleEnd { + return false + } + + copied := make([]modelAggregation, len(cached.Aggregations)) + copy(copied, cached.Aggregations) + return applyModelAggregations(snap, copied) +} + +var billingMetricKeys = []string{ + "plan_spend", "plan_percent_used", "plan_auto_percent_used", "plan_api_percent_used", + "spend_limit", "individual_spend", "team_budget", "team_budget_self", "team_budget_others", + "plan_included", "plan_bonus", "plan_total_spend_usd", "plan_limit_usd", +} + +func cloneMetric(m core.Metric) core.Metric { + out := core.Metric{Unit: m.Unit, Window: m.Window} + if m.Limit != nil { + out.Limit = core.Float64Ptr(*m.Limit) + } + if m.Remaining != nil { + out.Remaining = core.Float64Ptr(*m.Remaining) + } + if m.Used != nil { + out.Used = core.Float64Ptr(*m.Used) + } + return out +} + +func (p *Provider) storeBillingMetricsCache(accountID string, snap *core.UsageSnapshot) { + if accountID == "" { + return + } + cached := make(map[string]core.Metric, len(billingMetricKeys)) + for _, key := range billingMetricKeys { + if metric, ok := snap.Metrics[key]; ok { + cached[key] = cloneMetric(metric) + } + } + if len(cached) == 0 { + return + } + + p.mu.Lock() + defer p.mu.Unlock() + if p.modelAggregationCache == nil { + p.modelAggregationCache = make(map[string]cachedModelAggregation) + } + entry := p.modelAggregationCache[accountID] + entry.BillingMetrics = cached + p.modelAggregationCache[accountID] = entry +} + +func (p *Provider) applyCachedBillingMetrics(accountID string, snap *core.UsageSnapshot) { + if accountID == "" { + return + } + p.mu.RLock() + cached, ok := p.modelAggregationCache[accountID] + p.mu.RUnlock() + if !ok || len(cached.BillingMetrics) == 0 { + return + } + for key, metric := range cached.BillingMetrics { + if _, exists := snap.Metrics[key]; !exists { + snap.Metrics[key] = cloneMetric(metric) + } + } +} + +func (p *Provider) ensureCreditGauges(accountID string, snap *core.UsageSnapshot) { + if _, ok := snap.Metrics["plan_spend"]; ok { + return + } + if _, ok := snap.Metrics["spend_limit"]; ok { + return + } + + var costUSD float64 + if metric, ok := snap.Metrics["billing_total_cost"]; ok && metric.Used != nil && *metric.Used > 0 { + costUSD = *metric.Used + } else if metric, ok := snap.Metrics["composer_cost"]; ok && metric.Used != nil && *metric.Used > 0 { + costUSD = *metric.Used + } + if costUSD <= 0 { + return + } + + if _, ok := snap.Metrics["plan_total_spend_usd"]; !ok { + snap.Metrics["plan_total_spend_usd"] = core.Metric{ + Used: core.Float64Ptr(costUSD), + Unit: "USD", + Window: "billing-cycle", + } + } + + var limitUSD float64 + if metric, ok := snap.Metrics["plan_included_amount"]; ok && metric.Used != nil && *metric.Used > 0 { + limitUSD = *metric.Used + } + if limitUSD <= 0 { + p.mu.RLock() + if cached, ok := p.modelAggregationCache[accountID]; ok && cached.EffectiveLimitUSD > 0 { + limitUSD = cached.EffectiveLimitUSD + } + p.mu.RUnlock() + } + if limitUSD <= 0 { + if raw, ok := snap.Raw["plan_included_amount_cents"]; ok { + if cents, err := strconv.ParseFloat(raw, 64); err == nil && cents > 0 { + limitUSD = cents / 100.0 + } + } + } + if limitUSD > 0 { + snap.Metrics["plan_spend"] = core.Metric{ + Used: core.Float64Ptr(costUSD), + Limit: core.Float64Ptr(limitUSD), + Unit: "USD", + Window: "billing-cycle", + } + } +} diff --git a/internal/providers/cursor/cursor.go b/internal/providers/cursor/cursor.go index a6d378b..d93179b 100644 --- a/internal/providers/cursor/cursor.go +++ b/internal/providers/cursor/cursor.go @@ -1,15 +1,11 @@ package cursor import ( - "bytes" "context" "database/sql" - "encoding/json" "fmt" - "io" "log" "math" - "net/http" "sort" "strconv" "strings" @@ -701,81 +697,6 @@ func (p *Provider) fetchFromAPI(ctx context.Context, token string, snap *core.Us return nil } -func (p *Provider) callDashboardAPI(ctx context.Context, token, method string, result interface{}) error { - url := fmt.Sprintf("%s/aiserver.v1.DashboardService/%s", cursorAPIBase, method) - return p.doPost(ctx, token, url, result) -} - -func (p *Provider) callDashboardAPIWithBody(ctx context.Context, token, method string, body []byte, result interface{}) error { - url := fmt.Sprintf("%s/aiserver.v1.DashboardService/%s", cursorAPIBase, method) - req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body)) - if err != nil { - return err - } - req.Header.Set("Authorization", "Bearer "+token) - req.Header.Set("Content-Type", "application/json") - - resp, err := p.Client().Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - respBody, _ := io.ReadAll(resp.Body) - return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(respBody)) - } - - return json.NewDecoder(resp.Body).Decode(result) -} - -func (p *Provider) callRESTAPI(ctx context.Context, token, path string, result interface{}) error { - url := cursorAPIBase + path - - req, err := http.NewRequestWithContext(ctx, "GET", url, nil) - if err != nil { - return err - } - req.Header.Set("Authorization", "Bearer "+token) - req.Header.Set("Content-Type", "application/json") - - resp, err := p.Client().Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body)) - } - - return json.NewDecoder(resp.Body).Decode(result) -} - -func (p *Provider) doPost(ctx context.Context, token, url string, result interface{}) error { - body := []byte("{}") - req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body)) - if err != nil { - return err - } - req.Header.Set("Authorization", "Bearer "+token) - req.Header.Set("Content-Type", "application/json") - - resp, err := p.Client().Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - respBody, _ := io.ReadAll(resp.Body) - return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(respBody)) - } - - return json.NewDecoder(resp.Body).Decode(result) -} - func applyModelAggregations(snap *core.UsageSnapshot, aggregations []modelAggregation) bool { if len(aggregations) == 0 { return false @@ -926,190 +847,6 @@ func parseModelTokenCount(raw string) (float64, bool) { return v, true } -func (p *Provider) storeModelAggregationCache(accountID, billingCycleStart, billingCycleEnd string, aggregations []modelAggregation, effectiveLimitUSD float64) { - if accountID == "" || len(aggregations) == 0 { - return - } - copied := make([]modelAggregation, len(aggregations)) - copy(copied, aggregations) - - p.mu.Lock() - defer p.mu.Unlock() - if p.modelAggregationCache == nil { - p.modelAggregationCache = make(map[string]cachedModelAggregation) - } - entry := cachedModelAggregation{ - BillingCycleStart: billingCycleStart, - BillingCycleEnd: billingCycleEnd, - Aggregations: copied, - EffectiveLimitUSD: effectiveLimitUSD, - } - // Preserve billing metrics from a previous storeBillingMetricsCache call. - if prev, ok := p.modelAggregationCache[accountID]; ok && len(prev.BillingMetrics) > 0 { - entry.BillingMetrics = prev.BillingMetrics - } - p.modelAggregationCache[accountID] = entry -} - -func (p *Provider) applyCachedModelAggregations(accountID, billingCycleStart, billingCycleEnd string, snap *core.UsageSnapshot) bool { - if accountID == "" { - return false - } - - p.mu.RLock() - cached, ok := p.modelAggregationCache[accountID] - p.mu.RUnlock() - if !ok || len(cached.Aggregations) == 0 { - return false - } - - if billingCycleStart != "" && cached.BillingCycleStart != "" && billingCycleStart != cached.BillingCycleStart { - return false - } - if billingCycleEnd != "" && cached.BillingCycleEnd != "" && billingCycleEnd != cached.BillingCycleEnd { - return false - } - - copied := make([]modelAggregation, len(cached.Aggregations)) - copy(copied, cached.Aggregations) - return applyModelAggregations(snap, copied) -} - -// billingMetricKeys lists the metric keys cached for local-only fallback. -var billingMetricKeys = []string{ - "plan_spend", "plan_percent_used", "plan_auto_percent_used", "plan_api_percent_used", - "spend_limit", "individual_spend", "team_budget", "team_budget_self", "team_budget_others", - "plan_included", "plan_bonus", "plan_total_spend_usd", "plan_limit_usd", -} - -func cloneMetric(m core.Metric) core.Metric { - out := core.Metric{Unit: m.Unit, Window: m.Window} - if m.Limit != nil { - out.Limit = core.Float64Ptr(*m.Limit) - } - if m.Remaining != nil { - out.Remaining = core.Float64Ptr(*m.Remaining) - } - if m.Used != nil { - out.Used = core.Float64Ptr(*m.Used) - } - return out -} - -// storeBillingMetricsCache snapshots the current billing metrics so they can -// be restored when the API is temporarily unavailable. -func (p *Provider) storeBillingMetricsCache(accountID string, snap *core.UsageSnapshot) { - if accountID == "" { - return - } - cached := make(map[string]core.Metric, len(billingMetricKeys)) - for _, key := range billingMetricKeys { - if m, ok := snap.Metrics[key]; ok { - cached[key] = cloneMetric(m) - } - } - if len(cached) == 0 { - return - } - - p.mu.Lock() - defer p.mu.Unlock() - if p.modelAggregationCache == nil { - p.modelAggregationCache = make(map[string]cachedModelAggregation) - } - entry := p.modelAggregationCache[accountID] - entry.BillingMetrics = cached - p.modelAggregationCache[accountID] = entry -} - -// applyCachedBillingMetrics restores billing metrics from cache into the -// snapshot so that credit gauges render when the API is temporarily down. -func (p *Provider) applyCachedBillingMetrics(accountID string, snap *core.UsageSnapshot) { - if accountID == "" { - return - } - p.mu.RLock() - cached, ok := p.modelAggregationCache[accountID] - p.mu.RUnlock() - if !ok || len(cached.BillingMetrics) == 0 { - return - } - for key, m := range cached.BillingMetrics { - if _, exists := snap.Metrics[key]; !exists { - snap.Metrics[key] = cloneMetric(m) - } - } -} - -// ensureCreditGauges synthesizes credit metrics from local data when API -// didn't provide them. This runs as a final step in Fetch() so the Credits -// tag and gauge bars render regardless of API availability. -func (p *Provider) ensureCreditGauges(accountID string, snap *core.UsageSnapshot) { - // Already have gauge-eligible credit metrics from API — nothing to do. - if _, ok := snap.Metrics["plan_spend"]; ok { - return - } - if _, ok := snap.Metrics["spend_limit"]; ok { - return - } - - // Determine total cost from best available source. - var costUSD float64 - if m, ok := snap.Metrics["billing_total_cost"]; ok && m.Used != nil && *m.Used > 0 { - costUSD = *m.Used - } else if m, ok := snap.Metrics["composer_cost"]; ok && m.Used != nil && *m.Used > 0 { - costUSD = *m.Used - } - if costUSD <= 0 { - return - } - - // Always expose plan_total_spend_usd so the Credits tag renders in the - // TUI even without a limit (computeDisplayInfoRaw checks this key). - if _, ok := snap.Metrics["plan_total_spend_usd"]; !ok { - snap.Metrics["plan_total_spend_usd"] = core.Metric{ - Used: core.Float64Ptr(costUSD), - Unit: "USD", - Window: "billing-cycle", - } - } - - // Try to find a limit so we can create a gauge bar. - var limitUSD float64 - - // 1) From plan_included_amount (GetPlanInfo may have succeeded). - if m, ok := snap.Metrics["plan_included_amount"]; ok && m.Used != nil && *m.Used > 0 { - limitUSD = *m.Used - } - - // 2) From cached effective limit. - if limitUSD <= 0 { - p.mu.RLock() - if cached, ok := p.modelAggregationCache[accountID]; ok && cached.EffectiveLimitUSD > 0 { - limitUSD = cached.EffectiveLimitUSD - } - p.mu.RUnlock() - } - - // 3) From plan_included_amount_cents in Raw (may have been set by API). - if limitUSD <= 0 { - if raw, ok := snap.Raw["plan_included_amount_cents"]; ok { - if cents, err := strconv.ParseFloat(raw, 64); err == nil && cents > 0 { - limitUSD = cents / 100.0 - } - } - } - - if limitUSD > 0 { - snap.Metrics["plan_spend"] = core.Metric{ - Used: core.Float64Ptr(costUSD), - Limit: core.Float64Ptr(limitUSD), - Unit: "USD", - Window: "billing-cycle", - } - } -} - func (p *Provider) readTrackingDB(ctx context.Context, dbPath string, snap *core.UsageSnapshot) error { db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) if err != nil { From 80be6aaf57f1c5517c3e46068001802d36d6992e Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 13:31:37 +0100 Subject: [PATCH 11/32] refactor: split tui and daemon runtime concerns --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 13 +- internal/core/analytics_normalize.go | 19 +- internal/core/analytics_snapshot.go | 188 ++++ internal/daemon/server_collect.go | 161 +++ internal/daemon/server_loops.go | 534 ---------- internal/daemon/server_poll.go | 137 +++ internal/daemon/server_spool.go | 251 +++++ internal/tui/analytics.go | 67 +- internal/tui/analytics_data.go | 199 +--- internal/tui/detail.go | 148 +-- internal/tui/model.go | 949 ------------------ internal/tui/model_commands.go | 217 ++++ internal/tui/model_input.go | 698 +++++++++++++ 13 files changed, 1740 insertions(+), 1841 deletions(-) create mode 100644 internal/core/analytics_snapshot.go create mode 100644 internal/daemon/server_collect.go delete mode 100644 internal/daemon/server_loops.go create mode 100644 internal/daemon/server_poll.go create mode 100644 internal/daemon/server_spool.go create mode 100644 internal/tui/model_commands.go create mode 100644 internal/tui/model_input.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index f62b001..16db02f 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -44,19 +44,24 @@ This table captures every issue found in this pass. It is broad and high-signal, | R24 | Fixed | OpenRouter generation-path split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/generations.go` | Generation payload types and generation-fetch/enrichment/aggregation logic moved out of the main OpenRouter provider file into a dedicated generation unit. | Continue with analytics/client/API helper splits. | | R25 | Fixed | OpenRouter clock injection | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/generations.go` | OpenRouter’s time-sensitive analytics and generation flows now use an injectable clock instead of reading `time.Now()` directly in the provider hot path. | Extend the same pattern to remaining providers and analytics helpers. | | R26 | Fixed | Cursor API/cache helper split | `internal/providers/cursor/cursor.go`, `internal/providers/cursor/api.go`, `internal/providers/cursor/cache.go` | Cursor HTTP helper methods and billing/model cache helpers now live in dedicated units instead of the main provider file, removing duplicate implementations and narrowing `cursor.go` to orchestration and local-data projection. | Continue splitting SQLite projection and token-loading responsibilities. | +| R27 | Fixed | TUI model file split | `internal/tui/model.go`, `internal/tui/model_input.go`, `internal/tui/model_commands.go` | Bubble Tea update/input/command orchestration no longer lives in one monolithic file. The model state definition stays in `model.go`, while input/update and command wiring moved into dedicated units. | Continue decomposing render-heavy files over time. | +| R28 | Fixed | Shared analytics model extraction | `internal/core/analytics_snapshot.go`, `internal/tui/analytics_data.go`, `internal/tui/detail.go` | Analytics and detail views now consume one shared core extractor for model usage instead of maintaining separate metric/raw parsing paths. | Extend the same pattern to more analytics/detail sections if new derived views appear. | +| R29 | Fixed | Shared analytics series selection | `internal/core/analytics_snapshot.go`, `internal/tui/analytics.go` | Token/model series selection and fallback weighting for analytics charts moved out of TUI render code into shared core helpers. | Keep new per-series heuristics out of render code. | +| R30 | Fixed | Daemon loop family split | `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go` | Collection/retention, spool/hook-spool, and provider polling loops now live in separate daemon files instead of a single loop-heavy unit. | Keep future loop additions in the matching family file instead of re-growing a monolith. | +| R31 | Fixed | Analytics timestamp normalization | `internal/core/analytics_normalize.go` | Synthesized analytics daily-series dates now derive from the snapshot timestamp in UTC instead of ad hoc local `time.Now()` fallbacks. | Continue the same UTC/clock cleanup in remaining providers such as Ollama. | ## Action Table | ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | -| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go:393-584`, `internal/dashboardapp/service.go` | The side effects are now injected, but `Model` still owns a very large amount of event-handling and state-transition logic. | Continue splitting update/action logic into smaller TUI units and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go:663-729`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go` | The main composition bars now consume shared extractors, but analytics/detail-specific sections still decode some raw metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/dashboardapp/service.go` | Side effects are injected and the model file is split, but TUI state-transition and rendering logic is still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | +| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go` | Composition bars and analytics model views now consume shared extractors, but some analytics/detail sections still decode raw metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go`, `internal/providers/openrouter/generations.go` | `openrouter.go` is materially smaller after the provider-resolution and generation-path splits, but it still mixes auth probing, credits, keys, analytics parsing, and some output/projection helpers. | Continue splitting into `api_client`, `analytics`, and remaining projection/helper units. | Easier maintenance, smaller diff surface, faster targeted testing. | | A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1842`, `internal/providers/cursor/state_records.go`, `internal/providers/cursor/tracking_records.go` | Cursor provider no longer owns the API/cache helpers, but it still combines fetch orchestration, token loading, local SQLite projection, and some remaining snapshot assembly in one large file. | Continue splitting token-loading, local DB projection, and snapshot assembly into dedicated modules so `cursor.go` becomes a thin coordinator. | Cleaner boundaries and less risk of local/API logic regressions. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go` | The usage-view code is materially smaller after the helper/projection/query splits, but the orchestration/materialization path still owns temp-table lifecycle, query fanout, and aggregate assembly in one place. | Continue splitting remaining orchestration/materialization concerns and consider a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | -| A7 | P2 | Daemon service monolith | `internal/daemon/server.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_loops.go` | The daemon is materially less coupled after the logging/cache/http/loop split, but polling, collection, retention, and spool maintenance still share one large runtime helper unit. | Continue splitting the loop-heavy runtime into `polling`, `collection`, `retention`, and `spool` units. | Lower mental load and easier concurrency review. | -| A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/ollama/ollama.go:1088`, `internal/core/analytics_normalize.go:61-103` | Cursor and OpenRouter now use injectable clocks in their main time-sensitive paths, but other providers and analytics helpers still read `time.Now()` directly, often mixing local time and UTC. | Extend the clock abstraction to the remaining provider and analytics subsystems and standardize UTC/local semantics per provider. | Better determinism and fewer timezone edge cases. | +| A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | +| A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/ollama/ollama.go:702`, `internal/providers/ollama/ollama.go:1075`, `internal/providers/ollama/ollama.go:1088`, `internal/providers/ollama/ollama.go:1575` | Cursor and OpenRouter now use injectable clocks in their main time-sensitive paths, and analytics normalization now uses snapshot UTC time, but Ollama still reads `time.Now()` directly in several behavioral paths. | Extend the clock abstraction to Ollama and any remaining provider-specific time windows/reset calculations. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | | A15 | P3 | Performance optimization opportunity in render path | `internal/tui/model.go:441-450`, `internal/tui/tiles_composition.go:302-322`, `internal/tui/detail.go:752-1046`, `internal/tui/analytics.go:663-729` | The UI recomputes display/composition structures from raw metric maps repeatedly during rendering. It is correct, but the work is duplicated across views and frames. | Cache derived display/composition sections per snapshot update instead of rebuilding them in each view path. | Lower render cost and less duplicated parsing logic. | diff --git a/internal/core/analytics_normalize.go b/internal/core/analytics_normalize.go index 5614ca0..1054784 100644 --- a/internal/core/analytics_normalize.go +++ b/internal/core/analytics_normalize.go @@ -56,11 +56,7 @@ func aliasInto(s *UsageSnapshot, canonical string, aliases ...string) { } func synthesizeCoreSeriesFromMetrics(s *UsageSnapshot) { - today := s.Timestamp - if today.IsZero() { - today = time.Now() - } - todayDate := today.Format("2006-01-02") + todayDate := analyticsReferenceTime(s).Format("2006-01-02") metricUsed := func(keys ...string) float64 { for _, k := range keys { @@ -98,11 +94,7 @@ func synthesizeModelSeriesFromRecords(s *UsageSnapshot) { if len(s.ModelUsage) == 0 { return } - today := s.Timestamp - if today.IsZero() { - today = time.Now() - } - date := today.Format("2006-01-02") + date := analyticsReferenceTime(s).Format("2006-01-02") perModel := make(map[string]float64) for _, rec := range s.ModelUsage { @@ -183,3 +175,10 @@ func normalizeSeriesModelKey(model string) string { } return model } + +func analyticsReferenceTime(s *UsageSnapshot) time.Time { + if s != nil && !s.Timestamp.IsZero() { + return s.Timestamp.UTC() + } + return time.Now().UTC() +} diff --git a/internal/core/analytics_snapshot.go b/internal/core/analytics_snapshot.go new file mode 100644 index 0000000..93227b1 --- /dev/null +++ b/internal/core/analytics_snapshot.go @@ -0,0 +1,188 @@ +package core + +import ( + "sort" + "strings" +) + +type AnalyticsModelUsageEntry struct { + Name string + CostUSD float64 + InputTokens float64 + OutputTokens float64 + Confidence float64 + Window string +} + +type NamedSeries struct { + Name string + Points []TimePoint +} + +func ExtractAnalyticsModelUsage(s UsageSnapshot) []AnalyticsModelUsageEntry { + records := s.ModelUsage + if len(records) == 0 { + records = BuildModelUsageFromSnapshotMetrics(s) + } + if len(records) == 0 { + return nil + } + + type agg struct { + cost float64 + input float64 + output float64 + confidence float64 + window string + } + + byModel := make(map[string]*agg) + order := make([]string, 0, len(records)) + ensure := func(name string) *agg { + if entry, ok := byModel[name]; ok { + return entry + } + entry := &agg{} + byModel[name] = entry + order = append(order, name) + return entry + } + + for _, rec := range records { + name := analyticsModelDisplayName(rec) + if name == "" { + continue + } + entry := ensure(name) + if rec.CostUSD != nil && *rec.CostUSD > 0 { + entry.cost += *rec.CostUSD + } + if rec.InputTokens != nil { + entry.input += *rec.InputTokens + } + if rec.OutputTokens != nil { + entry.output += *rec.OutputTokens + } + if rec.TotalTokens != nil && rec.InputTokens == nil && rec.OutputTokens == nil { + entry.input += *rec.TotalTokens + } + if rec.Confidence > entry.confidence { + entry.confidence = rec.Confidence + } + if entry.window == "" { + entry.window = rec.Window + } + } + + out := make([]AnalyticsModelUsageEntry, 0, len(order)) + for _, name := range order { + entry := byModel[name] + if entry.cost <= 0 && entry.input <= 0 && entry.output <= 0 { + continue + } + out = append(out, AnalyticsModelUsageEntry{ + Name: name, + CostUSD: entry.cost, + InputTokens: entry.input, + OutputTokens: entry.output, + Confidence: entry.confidence, + Window: entry.window, + }) + } + sort.Slice(out, func(i, j int) bool { + ti := out[i].InputTokens + out[i].OutputTokens + tj := out[j].InputTokens + out[j].OutputTokens + if ti != tj { + return ti > tj + } + if out[i].CostUSD != out[j].CostUSD { + return out[i].CostUSD > out[j].CostUSD + } + return out[i].Name < out[j].Name + }) + return out +} + +func ExtractAnalyticsModelSeries(series map[string][]TimePoint) []NamedSeries { + keys := make([]string, 0, len(series)) + for key := range series { + switch { + case strings.HasPrefix(key, "tokens_"): + keys = append(keys, key) + case strings.HasPrefix(key, "usage_model_"): + if !hasAnalyticsTokenSeries(series) { + keys = append(keys, key) + } + } + } + sort.Strings(keys) + + out := make([]NamedSeries, 0, len(keys)) + for _, key := range keys { + name := strings.TrimPrefix(key, "tokens_") + name = strings.TrimPrefix(name, "usage_model_") + if name == "" || len(series[key]) == 0 { + continue + } + out = append(out, NamedSeries{Name: name, Points: series[key]}) + } + return out +} + +func SelectAnalyticsWeightSeries(series map[string][]TimePoint) []TimePoint { + for _, key := range []string{ + "tokens_total", + "messages", + "sessions", + "tool_calls", + "requests", + "tab_accepted", + "composer_accepted", + } { + if pts := series[key]; len(pts) > 0 { + return pts + } + } + for _, named := range ExtractAnalyticsModelSeries(series) { + if len(named.Points) > 0 { + return named.Points + } + } + keys := make([]string, 0, len(series)) + for key := range series { + if strings.HasPrefix(key, "usage_client_") { + keys = append(keys, key) + } + } + sort.Strings(keys) + for _, key := range keys { + if len(series[key]) > 0 { + return series[key] + } + } + return nil +} + +func hasAnalyticsTokenSeries(series map[string][]TimePoint) bool { + for key, points := range series { + if strings.HasPrefix(key, "tokens_") && len(points) > 0 { + return true + } + } + return false +} + +func analyticsModelDisplayName(rec ModelUsageRecord) string { + if rec.Dimensions != nil { + if groupID := strings.TrimSpace(rec.Dimensions["canonical_group_id"]); groupID != "" { + return groupID + } + } + if raw := strings.TrimSpace(rec.RawModelID); raw != "" { + return raw + } + if canonical := strings.TrimSpace(rec.CanonicalLineageID); canonical != "" { + return canonical + } + return "unknown" +} diff --git a/internal/daemon/server_collect.go b/internal/daemon/server_collect.go new file mode 100644 index 0000000..9080fa9 --- /dev/null +++ b/internal/daemon/server_collect.go @@ -0,0 +1,161 @@ +package daemon + +import ( + "context" + "fmt" + "time" + + "github.com/janekbaraniewski/openusage/internal/config" + "github.com/janekbaraniewski/openusage/internal/telemetry" +) + +func (s *Service) runCollectLoop(ctx context.Context) { + ticker := time.NewTicker(s.cfg.CollectInterval) + defer ticker.Stop() + + s.infof("collect_loop_start", "interval=%s", s.cfg.CollectInterval) + s.collectAndFlush(ctx) + for { + select { + case <-ctx.Done(): + s.infof("collect_loop_stop", "reason=context_done") + return + case <-ticker.C: + s.collectAndFlush(ctx) + } + } +} + +func (s *Service) collectAndFlush(ctx context.Context) { + if s == nil { + return + } + started := time.Now() + const backlogFlushLimit = 2000 + + var allReqs []telemetry.IngestRequest + totalCollected := 0 + var warnings []string + + for _, collector := range s.collectors { + reqs, err := collector.Collect(ctx) + if err != nil { + warnings = append(warnings, fmt.Sprintf("%s: %v", collector.Name(), err)) + continue + } + totalCollected += len(reqs) + allReqs = append(allReqs, reqs...) + } + + direct, retries := s.ingestBatch(ctx, allReqs) + flush, enqueued, flushWarnings := s.flushBacklog(ctx, retries, backlogFlushLimit) + warnings = append(warnings, flushWarnings...) + + durationMs := time.Since(started).Milliseconds() + if totalCollected > 0 || direct.processed > 0 || enqueued > 0 || flush.Processed > 0 || len(warnings) > 0 { + s.infof( + "collect_cycle", + "duration_ms=%d collected=%d direct_processed=%d direct_ingested=%d direct_deduped=%d direct_failed=%d enqueued=%d flush_processed=%d flush_ingested=%d flush_deduped=%d flush_failed=%d warnings=%d", + durationMs, totalCollected, + direct.processed, direct.ingested, direct.deduped, direct.failed, + enqueued, flush.Processed, flush.Ingested, flush.Deduped, flush.Failed, + len(warnings), + ) + for _, warning := range warnings { + s.warnf("collect_warning", "message=%q", warning) + } + s.pruneTelemetryOrphans(ctx) + return + } + + if durationMs >= 1500 && s.shouldLog("collect_slow", 30*time.Second) { + s.infof("collect_idle_slow", "duration_ms=%d", durationMs) + } + + s.pruneTelemetryOrphans(ctx) +} + +func (s *Service) pruneTelemetryOrphans(ctx context.Context) { + if s == nil || s.store == nil { + return + } + if !s.shouldLog("prune_orphan_raw_events_tick", 45*time.Second) { + return + } + + const pruneBatchSize = 10000 + pruneCtx, cancel := context.WithTimeout(ctx, 4*time.Second) + defer cancel() + + removed, err := s.store.PruneOrphanRawEvents(pruneCtx, pruneBatchSize) + if err != nil { + if s.shouldLog("prune_orphan_raw_events_error", 20*time.Second) { + s.warnf("prune_orphan_raw_events_error", "error=%v", err) + } + return + } + if removed > 0 { + s.infof("prune_orphan_raw_events", "removed=%d batch_size=%d", removed, pruneBatchSize) + } + + payloadCtx, payloadCancel := context.WithTimeout(ctx, 4*time.Second) + defer payloadCancel() + pruned, pruneErr := s.store.PruneRawEventPayloads(payloadCtx, 1, pruneBatchSize) + if pruneErr == nil && pruned > 0 { + s.infof("prune_raw_payloads", "pruned=%d", pruned) + } +} + +func (s *Service) runRetentionLoop(ctx context.Context) { + s.pruneOldData(ctx) + ticker := time.NewTicker(6 * time.Hour) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + s.infof("retention_loop_stop", "reason=context_done") + return + case <-ticker.C: + s.pruneOldData(ctx) + } + } +} + +func (s *Service) pruneOldData(ctx context.Context) { + if s == nil || s.store == nil { + return + } + cfg, err := config.Load() + if err != nil { + if s.shouldLog("retention_config_error", 30*time.Second) { + s.warnf("retention_config_error", "error=%v", err) + } + return + } + retentionDays := cfg.Data.RetentionDays + if retentionDays <= 0 { + retentionDays = 30 + } + + pruneCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + deleted, err := s.store.PruneOldEvents(pruneCtx, retentionDays) + if err != nil { + if s.shouldLog("retention_prune_error", 30*time.Second) { + s.warnf("retention_prune_error", "error=%v", err) + } + return + } + if deleted > 0 { + s.infof("retention_prune", "deleted=%d retention_days=%d", deleted, retentionDays) + orphanCtx, orphanCancel := context.WithTimeout(ctx, 10*time.Second) + defer orphanCancel() + orphaned, orphanErr := s.store.PruneOrphanRawEvents(orphanCtx, 50000) + if orphanErr != nil { + s.warnf("retention_orphan_prune_error", "error=%v", orphanErr) + } else if orphaned > 0 { + s.infof("retention_orphan_prune", "removed=%d", orphaned) + } + } +} diff --git a/internal/daemon/server_loops.go b/internal/daemon/server_loops.go deleted file mode 100644 index ae05357..0000000 --- a/internal/daemon/server_loops.go +++ /dev/null @@ -1,534 +0,0 @@ -package daemon - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/janekbaraniewski/openusage/internal/config" - "github.com/janekbaraniewski/openusage/internal/core" - "github.com/janekbaraniewski/openusage/internal/providers" - "github.com/janekbaraniewski/openusage/internal/telemetry" -) - -func (s *Service) runCollectLoop(ctx context.Context) { - ticker := time.NewTicker(s.cfg.CollectInterval) - defer ticker.Stop() - - s.infof("collect_loop_start", "interval=%s", s.cfg.CollectInterval) - s.collectAndFlush(ctx) - for { - select { - case <-ctx.Done(): - s.infof("collect_loop_stop", "reason=context_done") - return - case <-ticker.C: - s.collectAndFlush(ctx) - } - } -} - -func (s *Service) runSpoolMaintenanceLoop(ctx context.Context) { - if s == nil { - return - } - flushTicker := time.NewTicker(5 * time.Second) - cleanupTicker := time.NewTicker(60 * time.Second) - defer flushTicker.Stop() - defer cleanupTicker.Stop() - - s.infof("spool_loop_start", "flush_interval=%s cleanup_interval=%s", 5*time.Second, 60*time.Second) - s.flushSpoolBacklog(ctx, 10000) - s.cleanupSpool() - - for { - select { - case <-ctx.Done(): - s.infof("spool_loop_stop", "reason=context_done") - return - case <-flushTicker.C: - s.flushSpoolBacklog(ctx, 10000) - case <-cleanupTicker.C: - s.cleanupSpool() - } - } -} - -func (s *Service) flushSpoolBacklog(ctx context.Context, maxTotal int) { - if s == nil || s.pipeline == nil { - return - } - - flush, warnings := FlushInBatches(ctx, s.pipeline, maxTotal) - if flush.Processed > 0 || len(warnings) > 0 { - s.infof( - "spool_flush", - "processed=%d ingested=%d deduped=%d failed=%d warnings=%d", - flush.Processed, flush.Ingested, flush.Deduped, flush.Failed, len(warnings), - ) - for _, warning := range warnings { - s.warnf("spool_flush_warning", "message=%q", warning) - } - } -} - -func (s *Service) cleanupSpool() { - if s == nil || strings.TrimSpace(s.cfg.SpoolDir) == "" { - return - } - - policy := telemetry.SpoolCleanupPolicy{ - MaxAge: 96 * time.Hour, - MaxFiles: 25000, - MaxBytes: 768 << 20, - } - - s.spoolMu.Lock() - result, err := telemetry.NewSpool(s.cfg.SpoolDir).Cleanup(policy) - s.spoolMu.Unlock() - if err != nil { - if s.shouldLog("spool_cleanup_error", 20*time.Second) { - s.warnf("spool_cleanup_error", "error=%v", err) - } - return - } - if result.RemovedFiles > 0 { - s.infof( - "spool_cleanup", - "removed_files=%d removed_bytes=%d remaining_files=%d remaining_bytes=%d", - result.RemovedFiles, - result.RemovedBytes, - result.RemainingFiles, - result.RemainingBytes, - ) - return - } - if s.shouldLog("spool_cleanup_steady", 30*time.Minute) { - s.infof( - "spool_cleanup_steady", - "remaining_files=%d remaining_bytes=%d", - result.RemainingFiles, - result.RemainingBytes, - ) - } -} - -func (s *Service) runHookSpoolLoop(ctx context.Context) { - if s == nil { - return - } - hookSpoolDir, err := telemetry.DefaultHookSpoolDir() - if err != nil { - s.warnf("hook_spool_loop", "resolve dir error=%v", err) - return - } - - processInterval := 5 * time.Second - cleanupInterval := 5 * time.Minute - processTicker := time.NewTicker(processInterval) - cleanupTicker := time.NewTicker(cleanupInterval) - defer processTicker.Stop() - defer cleanupTicker.Stop() - - s.infof( - "hook_spool_loop_start", - "dir=%s process_interval=%s cleanup_interval=%s", - hookSpoolDir, - processInterval, - cleanupInterval, - ) - s.processHookSpool(ctx, hookSpoolDir) - s.cleanupHookSpool(hookSpoolDir) - - for { - select { - case <-ctx.Done(): - s.infof("hook_spool_loop_stop", "reason=context_done") - return - case <-processTicker.C: - s.processHookSpool(ctx, hookSpoolDir) - case <-cleanupTicker.C: - s.cleanupHookSpool(hookSpoolDir) - } - } -} - -type rawHookFile struct { - Source string `json:"source"` - AccountID string `json:"account_id"` - Payload json.RawMessage `json:"payload"` -} - -const hookSpoolBatchLimit = 200 - -func (s *Service) processHookSpool(ctx context.Context, dir string) { - files, err := filepath.Glob(filepath.Join(dir, "*.json")) - if err != nil || len(files) == 0 { - return - } - - processed := 0 - for _, path := range files { - if processed >= hookSpoolBatchLimit { - break - } - if ctx.Err() != nil { - return - } - - data, readErr := os.ReadFile(path) - if readErr != nil { - _ = os.Remove(path) - processed++ - continue - } - - var raw rawHookFile - if json.Unmarshal(data, &raw) != nil || len(raw.Payload) == 0 { - _ = os.Remove(path) - processed++ - continue - } - - source, ok := providers.TelemetrySourceBySystem(raw.Source) - if !ok { - _ = os.Remove(path) - processed++ - continue - } - - reqs, parseErr := telemetry.ParseSourceHookPayload( - source, raw.Payload, - source.DefaultCollectOptions(), - strings.TrimSpace(raw.AccountID), - ) - if parseErr != nil || len(reqs) == 0 { - _ = os.Remove(path) - processed++ - continue - } - - tally, _ := s.ingestBatch(ctx, reqs) - _ = os.Remove(path) - processed++ - - s.infof("hook_spool_ingest", - "file=%s source=%s processed=%d ingested=%d deduped=%d failed=%d", - filepath.Base(path), raw.Source, - tally.processed, tally.ingested, tally.deduped, tally.failed, - ) - } -} - -func (s *Service) cleanupHookSpool(dir string) { - files, err := filepath.Glob(filepath.Join(dir, "*.json")) - if err != nil || len(files) == 0 { - tmps, _ := filepath.Glob(filepath.Join(dir, "*.json.tmp")) - for _, tmp := range tmps { - _ = os.Remove(tmp) - } - return - } - - now := time.Now() - removed := 0 - remaining := make([]string, 0, len(files)) - for _, path := range files { - info, statErr := os.Stat(path) - if statErr != nil { - _ = os.Remove(path) - removed++ - continue - } - if now.Sub(info.ModTime()) > 24*time.Hour { - _ = os.Remove(path) - removed++ - continue - } - remaining = append(remaining, path) - } - - if len(remaining) > 500 { - for _, path := range remaining[:len(remaining)-500] { - _ = os.Remove(path) - removed++ - } - remaining = remaining[len(remaining)-500:] - } - - tmps, _ := filepath.Glob(filepath.Join(dir, "*.json.tmp")) - for _, tmp := range tmps { - _ = os.Remove(tmp) - removed++ - } - - if removed > 0 { - s.infof("hook_spool_cleanup", "removed=%d remaining=%d", removed, len(remaining)) - } -} - -func (s *Service) collectAndFlush(ctx context.Context) { - if s == nil { - return - } - started := time.Now() - const backlogFlushLimit = 2000 - - var allReqs []telemetry.IngestRequest - totalCollected := 0 - var warnings []string - - for _, collector := range s.collectors { - reqs, err := collector.Collect(ctx) - if err != nil { - warnings = append(warnings, fmt.Sprintf("%s: %v", collector.Name(), err)) - continue - } - totalCollected += len(reqs) - allReqs = append(allReqs, reqs...) - } - - direct, retries := s.ingestBatch(ctx, allReqs) - flush, enqueued, flushWarnings := s.flushBacklog(ctx, retries, backlogFlushLimit) - warnings = append(warnings, flushWarnings...) - - durationMs := time.Since(started).Milliseconds() - if totalCollected > 0 || direct.processed > 0 || enqueued > 0 || flush.Processed > 0 || len(warnings) > 0 { - s.infof( - "collect_cycle", - "duration_ms=%d collected=%d direct_processed=%d direct_ingested=%d direct_deduped=%d direct_failed=%d enqueued=%d flush_processed=%d flush_ingested=%d flush_deduped=%d flush_failed=%d warnings=%d", - durationMs, totalCollected, - direct.processed, direct.ingested, direct.deduped, direct.failed, - enqueued, flush.Processed, flush.Ingested, flush.Deduped, flush.Failed, - len(warnings), - ) - for _, warning := range warnings { - s.warnf("collect_warning", "message=%q", warning) - } - s.pruneTelemetryOrphans(ctx) - return - } - - if durationMs >= 1500 && s.shouldLog("collect_slow", 30*time.Second) { - s.infof("collect_idle_slow", "duration_ms=%d", durationMs) - } - - s.pruneTelemetryOrphans(ctx) -} - -func (s *Service) pruneTelemetryOrphans(ctx context.Context) { - if s == nil || s.store == nil { - return - } - if !s.shouldLog("prune_orphan_raw_events_tick", 45*time.Second) { - return - } - - const pruneBatchSize = 10000 - pruneCtx, cancel := context.WithTimeout(ctx, 4*time.Second) - defer cancel() - - removed, err := s.store.PruneOrphanRawEvents(pruneCtx, pruneBatchSize) - if err != nil { - if s.shouldLog("prune_orphan_raw_events_error", 20*time.Second) { - s.warnf("prune_orphan_raw_events_error", "error=%v", err) - } - return - } - if removed > 0 { - s.infof("prune_orphan_raw_events", "removed=%d batch_size=%d", removed, pruneBatchSize) - } - - payloadCtx, payloadCancel := context.WithTimeout(ctx, 4*time.Second) - defer payloadCancel() - pruned, pruneErr := s.store.PruneRawEventPayloads(payloadCtx, 1, pruneBatchSize) - if pruneErr == nil && pruned > 0 { - s.infof("prune_raw_payloads", "pruned=%d", pruned) - } -} - -func (s *Service) runRetentionLoop(ctx context.Context) { - s.pruneOldData(ctx) - ticker := time.NewTicker(6 * time.Hour) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - s.infof("retention_loop_stop", "reason=context_done") - return - case <-ticker.C: - s.pruneOldData(ctx) - } - } -} - -func (s *Service) pruneOldData(ctx context.Context) { - if s == nil || s.store == nil { - return - } - cfg, err := config.Load() - if err != nil { - if s.shouldLog("retention_config_error", 30*time.Second) { - s.warnf("retention_config_error", "error=%v", err) - } - return - } - retentionDays := cfg.Data.RetentionDays - if retentionDays <= 0 { - retentionDays = 30 - } - - pruneCtx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - deleted, err := s.store.PruneOldEvents(pruneCtx, retentionDays) - if err != nil { - if s.shouldLog("retention_prune_error", 30*time.Second) { - s.warnf("retention_prune_error", "error=%v", err) - } - return - } - if deleted > 0 { - s.infof("retention_prune", "deleted=%d retention_days=%d", deleted, retentionDays) - orphanCtx, orphanCancel := context.WithTimeout(ctx, 10*time.Second) - defer orphanCancel() - orphaned, orphanErr := s.store.PruneOrphanRawEvents(orphanCtx, 50000) - if orphanErr != nil { - s.warnf("retention_orphan_prune_error", "error=%v", orphanErr) - } else if orphaned > 0 { - s.infof("retention_orphan_prune", "removed=%d", orphaned) - } - } -} - -func (s *Service) runPollLoop(ctx context.Context) { - ticker := time.NewTicker(s.cfg.PollInterval) - defer ticker.Stop() - - s.infof("poll_loop_start", "interval=%s", s.cfg.PollInterval) - s.pollProviders(ctx) - for { - select { - case <-ctx.Done(): - s.infof("poll_loop_stop", "reason=context_done") - return - case <-ticker.C: - s.pollProviders(ctx) - } - } -} - -func (s *Service) pollProviders(ctx context.Context) { - if s == nil || s.quotaIngest == nil { - return - } - started := time.Now() - - accounts, modelNorm, err := LoadAccountsAndNorm() - if err != nil { - if s.shouldLog("poll_config_warning", 20*time.Second) { - s.warnf("poll_config_warning", "error=%v", err) - } - return - } - if len(accounts) == 0 { - if s.shouldLog("poll_no_accounts", 30*time.Second) { - s.infof("poll_skipped", "reason=no_enabled_accounts") - } - return - } - - type providerResult struct { - accountID string - snapshot core.UsageSnapshot - } - - results := make(chan providerResult, len(accounts)) - var wg sync.WaitGroup - - for _, acct := range accounts { - wg.Add(1) - go func(account core.AccountConfig) { - defer wg.Done() - - provider, ok := s.providerByID[account.Provider] - if !ok { - results <- providerResult{ - accountID: account.ID, - snapshot: core.UsageSnapshot{ - ProviderID: account.Provider, - AccountID: account.ID, - Timestamp: time.Now().UTC(), - Status: core.StatusError, - Message: fmt.Sprintf("no provider adapter registered for %q (restart/reinstall telemetry daemon if recently added)", account.Provider), - }, - } - return - } - - fetchCtx, cancel := context.WithTimeout(ctx, 8*time.Second) - defer cancel() - - snap, fetchErr := provider.Fetch(fetchCtx, account) - if fetchErr != nil { - snap = core.UsageSnapshot{ - ProviderID: account.Provider, - AccountID: account.ID, - Timestamp: time.Now().UTC(), - Status: core.StatusError, - Message: fetchErr.Error(), - } - } - snap = core.NormalizeUsageSnapshotWithConfig(snap, modelNorm) - results <- providerResult{accountID: account.ID, snapshot: snap} - }(acct) - } - - go func() { - wg.Wait() - close(results) - }() - - snapshots := make(map[string]core.UsageSnapshot, len(accounts)) - statusCounts := map[core.Status]int{} - errorCount := 0 - for result := range results { - snapshots[result.accountID] = result.snapshot - statusCounts[result.snapshot.Status]++ - if result.snapshot.Status == core.StatusError { - errorCount++ - } - } - if len(snapshots) == 0 { - return - } - - ingestCtx, cancel := context.WithTimeout(ctx, 12*time.Second) - defer cancel() - ingestErr := s.ingestQuotaSnapshots(ingestCtx, snapshots) - if ingestErr != nil && s.shouldLog("poll_ingest_warning", 10*time.Second) { - s.warnf("poll_ingest_warning", "error=%v", ingestErr) - } - - durationMs := time.Since(started).Milliseconds() - if ingestErr != nil || errorCount > 0 || s.shouldLog("poll_cycle_info", 45*time.Second) { - s.infof( - "poll_cycle", - "duration_ms=%d accounts=%d snapshots=%d status_ok=%d status_auth=%d status_limited=%d status_error=%d status_unknown=%d ingest_error=%t", - durationMs, - len(accounts), - len(snapshots), - statusCounts[core.StatusOK], - statusCounts[core.StatusAuth], - statusCounts[core.StatusLimited], - statusCounts[core.StatusError], - statusCounts[core.StatusUnknown], - ingestErr != nil, - ) - } -} diff --git a/internal/daemon/server_poll.go b/internal/daemon/server_poll.go new file mode 100644 index 0000000..7790128 --- /dev/null +++ b/internal/daemon/server_poll.go @@ -0,0 +1,137 @@ +package daemon + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (s *Service) runPollLoop(ctx context.Context) { + ticker := time.NewTicker(s.cfg.PollInterval) + defer ticker.Stop() + + s.infof("poll_loop_start", "interval=%s", s.cfg.PollInterval) + s.pollProviders(ctx) + for { + select { + case <-ctx.Done(): + s.infof("poll_loop_stop", "reason=context_done") + return + case <-ticker.C: + s.pollProviders(ctx) + } + } +} + +func (s *Service) pollProviders(ctx context.Context) { + if s == nil || s.quotaIngest == nil { + return + } + started := time.Now() + + accounts, modelNorm, err := LoadAccountsAndNorm() + if err != nil { + if s.shouldLog("poll_config_warning", 20*time.Second) { + s.warnf("poll_config_warning", "error=%v", err) + } + return + } + if len(accounts) == 0 { + if s.shouldLog("poll_no_accounts", 30*time.Second) { + s.infof("poll_skipped", "reason=no_enabled_accounts") + } + return + } + + type providerResult struct { + accountID string + snapshot core.UsageSnapshot + } + + results := make(chan providerResult, len(accounts)) + var wg sync.WaitGroup + + for _, acct := range accounts { + wg.Add(1) + go func(account core.AccountConfig) { + defer wg.Done() + + provider, ok := s.providerByID[account.Provider] + if !ok { + results <- providerResult{ + accountID: account.ID, + snapshot: core.UsageSnapshot{ + ProviderID: account.Provider, + AccountID: account.ID, + Timestamp: time.Now().UTC(), + Status: core.StatusError, + Message: fmt.Sprintf("no provider adapter registered for %q (restart/reinstall telemetry daemon if recently added)", account.Provider), + }, + } + return + } + + fetchCtx, cancel := context.WithTimeout(ctx, 8*time.Second) + defer cancel() + + snap, fetchErr := provider.Fetch(fetchCtx, account) + if fetchErr != nil { + snap = core.UsageSnapshot{ + ProviderID: account.Provider, + AccountID: account.ID, + Timestamp: time.Now().UTC(), + Status: core.StatusError, + Message: fetchErr.Error(), + } + } + snap = core.NormalizeUsageSnapshotWithConfig(snap, modelNorm) + results <- providerResult{accountID: account.ID, snapshot: snap} + }(acct) + } + + go func() { + wg.Wait() + close(results) + }() + + snapshots := make(map[string]core.UsageSnapshot, len(accounts)) + statusCounts := map[core.Status]int{} + errorCount := 0 + for result := range results { + snapshots[result.accountID] = result.snapshot + statusCounts[result.snapshot.Status]++ + if result.snapshot.Status == core.StatusError { + errorCount++ + } + } + if len(snapshots) == 0 { + return + } + + ingestCtx, cancel := context.WithTimeout(ctx, 12*time.Second) + defer cancel() + ingestErr := s.ingestQuotaSnapshots(ingestCtx, snapshots) + if ingestErr != nil && s.shouldLog("poll_ingest_warning", 10*time.Second) { + s.warnf("poll_ingest_warning", "error=%v", ingestErr) + } + + durationMs := time.Since(started).Milliseconds() + if ingestErr != nil || errorCount > 0 || s.shouldLog("poll_cycle_info", 45*time.Second) { + s.infof( + "poll_cycle", + "duration_ms=%d accounts=%d snapshots=%d status_ok=%d status_auth=%d status_limited=%d status_error=%d status_unknown=%d ingest_error=%t", + durationMs, + len(accounts), + len(snapshots), + statusCounts[core.StatusOK], + statusCounts[core.StatusAuth], + statusCounts[core.StatusLimited], + statusCounts[core.StatusError], + statusCounts[core.StatusUnknown], + ingestErr != nil, + ) + } +} diff --git a/internal/daemon/server_spool.go b/internal/daemon/server_spool.go new file mode 100644 index 0000000..177ba99 --- /dev/null +++ b/internal/daemon/server_spool.go @@ -0,0 +1,251 @@ +package daemon + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/providers" + "github.com/janekbaraniewski/openusage/internal/telemetry" +) + +func (s *Service) runSpoolMaintenanceLoop(ctx context.Context) { + if s == nil { + return + } + flushTicker := time.NewTicker(5 * time.Second) + cleanupTicker := time.NewTicker(60 * time.Second) + defer flushTicker.Stop() + defer cleanupTicker.Stop() + + s.infof("spool_loop_start", "flush_interval=%s cleanup_interval=%s", 5*time.Second, 60*time.Second) + s.flushSpoolBacklog(ctx, 10000) + s.cleanupSpool() + + for { + select { + case <-ctx.Done(): + s.infof("spool_loop_stop", "reason=context_done") + return + case <-flushTicker.C: + s.flushSpoolBacklog(ctx, 10000) + case <-cleanupTicker.C: + s.cleanupSpool() + } + } +} + +func (s *Service) flushSpoolBacklog(ctx context.Context, maxTotal int) { + if s == nil || s.pipeline == nil { + return + } + + flush, warnings := FlushInBatches(ctx, s.pipeline, maxTotal) + if flush.Processed > 0 || len(warnings) > 0 { + s.infof( + "spool_flush", + "processed=%d ingested=%d deduped=%d failed=%d warnings=%d", + flush.Processed, flush.Ingested, flush.Deduped, flush.Failed, len(warnings), + ) + for _, warning := range warnings { + s.warnf("spool_flush_warning", "message=%q", warning) + } + } +} + +func (s *Service) cleanupSpool() { + if s == nil || strings.TrimSpace(s.cfg.SpoolDir) == "" { + return + } + + policy := telemetry.SpoolCleanupPolicy{ + MaxAge: 96 * time.Hour, + MaxFiles: 25000, + MaxBytes: 768 << 20, + } + + s.spoolMu.Lock() + result, err := telemetry.NewSpool(s.cfg.SpoolDir).Cleanup(policy) + s.spoolMu.Unlock() + if err != nil { + if s.shouldLog("spool_cleanup_error", 20*time.Second) { + s.warnf("spool_cleanup_error", "error=%v", err) + } + return + } + if result.RemovedFiles > 0 { + s.infof( + "spool_cleanup", + "removed_files=%d removed_bytes=%d remaining_files=%d remaining_bytes=%d", + result.RemovedFiles, + result.RemovedBytes, + result.RemainingFiles, + result.RemainingBytes, + ) + return + } + if s.shouldLog("spool_cleanup_steady", 30*time.Minute) { + s.infof( + "spool_cleanup_steady", + "remaining_files=%d remaining_bytes=%d", + result.RemainingFiles, + result.RemainingBytes, + ) + } +} + +func (s *Service) runHookSpoolLoop(ctx context.Context) { + if s == nil { + return + } + hookSpoolDir, err := telemetry.DefaultHookSpoolDir() + if err != nil { + s.warnf("hook_spool_loop", "resolve dir error=%v", err) + return + } + + processInterval := 5 * time.Second + cleanupInterval := 5 * time.Minute + processTicker := time.NewTicker(processInterval) + cleanupTicker := time.NewTicker(cleanupInterval) + defer processTicker.Stop() + defer cleanupTicker.Stop() + + s.infof( + "hook_spool_loop_start", + "dir=%s process_interval=%s cleanup_interval=%s", + hookSpoolDir, + processInterval, + cleanupInterval, + ) + s.processHookSpool(ctx, hookSpoolDir) + s.cleanupHookSpool(hookSpoolDir) + + for { + select { + case <-ctx.Done(): + s.infof("hook_spool_loop_stop", "reason=context_done") + return + case <-processTicker.C: + s.processHookSpool(ctx, hookSpoolDir) + case <-cleanupTicker.C: + s.cleanupHookSpool(hookSpoolDir) + } + } +} + +type rawHookFile struct { + Source string `json:"source"` + AccountID string `json:"account_id"` + Payload json.RawMessage `json:"payload"` +} + +const hookSpoolBatchLimit = 200 + +func (s *Service) processHookSpool(ctx context.Context, dir string) { + files, err := filepath.Glob(filepath.Join(dir, "*.json")) + if err != nil || len(files) == 0 { + return + } + + processed := 0 + for _, path := range files { + if processed >= hookSpoolBatchLimit || ctx.Err() != nil { + return + } + + data, readErr := os.ReadFile(path) + if readErr != nil { + _ = os.Remove(path) + processed++ + continue + } + + var raw rawHookFile + if json.Unmarshal(data, &raw) != nil || len(raw.Payload) == 0 { + _ = os.Remove(path) + processed++ + continue + } + + source, ok := providers.TelemetrySourceBySystem(raw.Source) + if !ok { + _ = os.Remove(path) + processed++ + continue + } + + reqs, parseErr := telemetry.ParseSourceHookPayload( + source, + raw.Payload, + source.DefaultCollectOptions(), + strings.TrimSpace(raw.AccountID), + ) + if parseErr != nil || len(reqs) == 0 { + _ = os.Remove(path) + processed++ + continue + } + + tally, _ := s.ingestBatch(ctx, reqs) + _ = os.Remove(path) + processed++ + + s.infof( + "hook_spool_ingest", + "file=%s source=%s processed=%d ingested=%d deduped=%d failed=%d", + filepath.Base(path), raw.Source, + tally.processed, tally.ingested, tally.deduped, tally.failed, + ) + } +} + +func (s *Service) cleanupHookSpool(dir string) { + files, err := filepath.Glob(filepath.Join(dir, "*.json")) + if err != nil || len(files) == 0 { + tmps, _ := filepath.Glob(filepath.Join(dir, "*.json.tmp")) + for _, tmp := range tmps { + _ = os.Remove(tmp) + } + return + } + + now := time.Now() + removed := 0 + remaining := make([]string, 0, len(files)) + for _, path := range files { + info, statErr := os.Stat(path) + if statErr != nil { + _ = os.Remove(path) + removed++ + continue + } + if now.Sub(info.ModTime()) > 24*time.Hour { + _ = os.Remove(path) + removed++ + continue + } + remaining = append(remaining, path) + } + + if len(remaining) > 500 { + for _, path := range remaining[:len(remaining)-500] { + _ = os.Remove(path) + removed++ + } + remaining = remaining[len(remaining)-500:] + } + + tmps, _ := filepath.Glob(filepath.Join(dir, "*.json.tmp")) + for _, tmp := range tmps { + _ = os.Remove(tmp) + removed++ + } + + if removed > 0 { + s.infof("hook_spool_cleanup", "removed=%d remaining=%d", removed, len(remaining)) + } +} diff --git a/internal/tui/analytics.go b/internal/tui/analytics.go index 29d2d4c..22594c5 100644 --- a/internal/tui/analytics.go +++ b/internal/tui/analytics.go @@ -655,30 +655,12 @@ func buildProviderModelTokenDistributionSeries(data costData, limit int) []Brail var cands []candidate for _, g := range data.timeSeries { - keys := lo.Keys(g.series) - sort.Strings(keys) - tokenKeys := make([]string, 0, len(keys)) - usageKeys := make([]string, 0, len(keys)) - for _, key := range keys { - if strings.HasPrefix(key, "tokens_") { - tokenKeys = append(tokenKeys, key) - } else if strings.HasPrefix(key, "usage_model_") { - usageKeys = append(usageKeys, key) - } - } - modelKeys := tokenKeys - if len(modelKeys) == 0 { - modelKeys = usageKeys - } - for _, key := range modelKeys { - pts := clipSeriesPointsByRecentDates(g.series[key], 30) + for _, named := range core.ExtractAnalyticsModelSeries(g.series) { + pts := clipSeriesPointsByRecentDates(named.Points, 30) if !hasNonZeroData(pts) { continue } - - model := key - model = strings.TrimPrefix(model, "tokens_") - model = strings.TrimPrefix(model, "usage_model_") + model := named.Name label := truncStr(prettifyModelName(model)+" · "+g.providerName, 34) cands = append(cands, candidate{ @@ -710,28 +692,8 @@ func buildProviderModelTokenDistributionSeries(data costData, limit int) []Brail } func selectBestProviderCostWeightSeries(series map[string][]core.TimePoint) []core.TimePoint { - for _, key := range []string{ - "tokens_total", - "messages", - "sessions", - "tool_calls", - "requests", - "tab_accepted", - "composer_accepted", - } { - if pts, ok := series[key]; ok && hasNonZeroData(pts) { - return pts - } - } - keys := lo.Keys(series) - sort.Strings(keys) - for _, key := range keys { - if strings.HasPrefix(key, "tokens_") || strings.HasPrefix(key, "usage_model_") || strings.HasPrefix(key, "usage_client_") { - pts := series[key] - if hasNonZeroData(pts) { - return pts - } - } + if pts := core.SelectAnalyticsWeightSeries(series); hasNonZeroData(pts) { + return pts } return nil } @@ -747,13 +709,8 @@ func buildProviderModelHeatmapSpec(data costData, maxRows int, lastDays int) (He dateSet := make(map[string]bool) for _, g := range data.timeSeries { - keys := lo.Keys(g.series) - sort.Strings(keys) - for _, key := range keys { - pts := g.series[key] - if !strings.HasPrefix(key, "tokens_") { - continue - } + for _, named := range core.ExtractAnalyticsModelSeries(g.series) { + pts := named.Points total := seriesTotal(pts) if total <= 0 { continue @@ -765,10 +722,10 @@ func buildProviderModelHeatmapSpec(data costData, maxRows int, lastDays int) (He dateSet[p.Date] = true } } - model := prettifyModelName(strings.TrimPrefix(key, "tokens_")) + model := prettifyModelName(named.Name) rows = append(rows, row{ label: truncStr(g.providerName+" · "+model, 42), - color: stableModelColor(key, g.providerID), + color: stableModelColor(named.Name, g.providerID), vals: vals, total: total, }) @@ -892,10 +849,8 @@ func computeAnalyticsSummary(data costData) analyticsSummary { } } if !hasTotalTokens { - for key, pts := range g.series { - if !strings.HasPrefix(key, "tokens_") { - continue - } + for _, named := range core.ExtractAnalyticsModelSeries(g.series) { + pts := named.Points for _, p := range pts { tokensByDate[p.Date] += p.Value } diff --git a/internal/tui/analytics_data.go b/internal/tui/analytics_data.go index 7fc00b3..844cc36 100644 --- a/internal/tui/analytics_data.go +++ b/internal/tui/analytics_data.go @@ -2,7 +2,6 @@ package tui import ( "sort" - "strconv" "strings" "github.com/charmbracelet/lipgloss" @@ -204,13 +203,8 @@ func extractCostData(snapshots map[string]core.UsageSnapshot, filter string) cos func extractProviderCost(snap core.UsageSnapshot) float64 { modelTotal := 0.0 - for key, m := range snap.Metrics { - if m.Used == nil || *m.Used <= 0 { - continue - } - if strings.HasPrefix(key, "model_") && (strings.HasSuffix(key, "_cost") || strings.HasSuffix(key, "_cost_usd")) { - modelTotal += *m.Used - } + for _, model := range core.ExtractAnalyticsModelUsage(snap) { + modelTotal += model.CostUSD } if modelTotal > 0 { return modelTotal @@ -255,192 +249,23 @@ func extract7DayCost(snap core.UsageSnapshot) float64 { } func extractAllModels(snap core.UsageSnapshot, provColor lipgloss.Color) []modelCostEntry { - if len(snap.ModelUsage) > 0 { - return extractAllModelsFromRecords(snap) - } - - type md struct { - cost float64 - input float64 - output float64 - } - models := make(map[string]*md) - var order []string - - ensure := func(name string) *md { - if _, ok := models[name]; !ok { - models[name] = &md{} - order = append(order, name) - } - return models[name] - } - - for key, m := range snap.Metrics { - if !strings.HasPrefix(key, "model_") { - continue - } - name := strings.TrimPrefix(key, "model_") - switch { - case strings.HasSuffix(name, "_cost_usd"): - name = strings.TrimSuffix(name, "_cost_usd") - if m.Used != nil && *m.Used > 0 { - ensure(name).cost += *m.Used - } - case strings.HasSuffix(name, "_cost"): - name = strings.TrimSuffix(name, "_cost") - if m.Used != nil && *m.Used > 0 { - ensure(name).cost += *m.Used - } - case strings.HasSuffix(name, "_input_tokens"): - name = strings.TrimSuffix(name, "_input_tokens") - if m.Used != nil { - ensure(name).input += *m.Used - } - case strings.HasSuffix(name, "_output_tokens"): - name = strings.TrimSuffix(name, "_output_tokens") - if m.Used != nil { - ensure(name).output += *m.Used - } - } - } - - for key, val := range snap.Raw { - if !strings.HasPrefix(key, "model_") { - continue - } - name := strings.TrimPrefix(key, "model_") - switch { - case strings.HasSuffix(name, "_input_tokens"): - name = strings.TrimSuffix(name, "_input_tokens") - if v, err := strconv.ParseFloat(val, 64); err == nil && v > 0 { - m := ensure(name) - if m.input == 0 { - m.input = v - } - } - case strings.HasSuffix(name, "_output_tokens"): - name = strings.TrimSuffix(name, "_output_tokens") - if v, err := strconv.ParseFloat(val, 64); err == nil && v > 0 { - m := ensure(name) - if m.output == 0 { - m.output = v - } - } - } - } - - for key, m := range snap.Metrics { - switch { - case strings.HasPrefix(key, "input_tokens_"): - name := strings.TrimPrefix(key, "input_tokens_") - if m.Used != nil && *m.Used > 0 { - ensure(name).input += *m.Used - } - case strings.HasPrefix(key, "output_tokens_"): - name := strings.TrimPrefix(key, "output_tokens_") - if m.Used != nil && *m.Used > 0 { - ensure(name).output += *m.Used - } - } - } - - var result []modelCostEntry - for _, name := range order { - d := models[name] - if d.cost > 0 || d.input > 0 || d.output > 0 { - result = append(result, modelCostEntry{ - name: prettifyModelName(name), - provider: snap.AccountID, - cost: d.cost, - inputTokens: d.input, - outputTokens: d.output, - color: stableModelColor(name, snap.AccountID), - }) - } - } - return result -} - -func extractAllModelsFromRecords(snap core.UsageSnapshot) []modelCostEntry { - type md struct { - cost float64 - input float64 - output float64 - confidence float64 - window string - } - models := make(map[string]*md) - var order []string - - ensure := func(name string) *md { - if _, ok := models[name]; !ok { - models[name] = &md{} - order = append(order, name) - } - return models[name] - } - - for _, rec := range snap.ModelUsage { - name := modelRecordDisplayName(rec) - if name == "" { - continue - } - md := ensure(name) - if rec.CostUSD != nil && *rec.CostUSD > 0 { - md.cost += *rec.CostUSD - } - if rec.InputTokens != nil { - md.input += *rec.InputTokens - } - if rec.OutputTokens != nil { - md.output += *rec.OutputTokens - } - if rec.TotalTokens != nil && rec.InputTokens == nil && rec.OutputTokens == nil { - md.input += *rec.TotalTokens - } - if rec.Confidence > md.confidence { - md.confidence = rec.Confidence - } - if md.window == "" { - md.window = rec.Window - } - } - - result := make([]modelCostEntry, 0, len(order)) - for _, name := range order { - md := models[name] - if md.cost <= 0 && md.input <= 0 && md.output <= 0 { - continue - } + records := core.ExtractAnalyticsModelUsage(snap) + result := make([]modelCostEntry, 0, len(records)) + for _, record := range records { result = append(result, modelCostEntry{ - name: prettifyModelName(name), + name: prettifyModelName(record.Name), provider: snap.AccountID, - cost: md.cost, - inputTokens: md.input, - outputTokens: md.output, - color: stableModelColor(name, snap.AccountID), - confidence: md.confidence, - window: md.window, + cost: record.CostUSD, + inputTokens: record.InputTokens, + outputTokens: record.OutputTokens, + color: stableModelColor(record.Name, snap.AccountID), + confidence: record.Confidence, + window: record.Window, }) } return result } -func modelRecordDisplayName(rec core.ModelUsageRecord) string { - if rec.Dimensions != nil { - if groupID := strings.TrimSpace(rec.Dimensions["canonical_group_id"]); groupID != "" { - return groupID - } - } - if strings.TrimSpace(rec.RawModelID) != "" { - return rec.RawModelID - } - if strings.TrimSpace(rec.CanonicalLineageID) != "" { - return rec.CanonicalLineageID - } - return "unknown" -} - func aggregateCanonicalModels(providers []providerCostEntry) []modelCostEntry { type splitAgg struct { cost float64 diff --git a/internal/tui/detail.go b/internal/tui/detail.go index 5ffd77f..8356c9d 100644 --- a/internal/tui/detail.go +++ b/internal/tui/detail.go @@ -28,19 +28,15 @@ func DetailTabs(snap core.UsageSnapshot) []string { tabs = append(tabs, g.title) } } - // Add Models tab if model data is available. - if len(snap.ModelUsage) > 0 || hasModelCostMetrics(snap) { + if hasAnalyticsModelData(snap) { tabs = append(tabs, "Models") } - // Add Languages tab if language data is available. if hasLanguageMetrics(snap) { tabs = append(tabs, "Languages") } - // Add MCP Usage tab if MCP metrics are available. if hasMCPMetrics(snap) { tabs = append(tabs, "MCP Usage") } - // Add Trends tab if daily series has enough data for a chart. if hasChartableSeries(snap.DailySeries) { tabs = append(tabs, "Trends") } @@ -97,9 +93,8 @@ func RenderDetailContent(snap core.UsageSnapshot, w int, warnThresh, critThresh } } - // Models section — dispatched directly (needs full snapshot, not just metric entries). showModels := tabName == "Models" || showAll - if showModels && (len(snap.ModelUsage) > 0 || hasModelCostMetrics(snap)) { + if showModels && hasAnalyticsModelData(snap) { sb.WriteString("\n") renderDetailSectionHeader(&sb, "Models", w) renderModelsSection(&sb, snap, widget, w) @@ -765,112 +760,63 @@ func renderSectionSparklines(sb *strings.Builder, widget core.DashboardWidget, w } } -// renderModelsSection renders ModelUsageRecord data as a horizontal bar chart of costs -// and a token breakdown for the top model. Falls back to existing model cost table -// if ModelUsage is empty but metric-based model costs exist. func renderModelsSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { - if len(snap.ModelUsage) > 0 { - // Sort by cost descending, take top 8. - records := make([]core.ModelUsageRecord, len(snap.ModelUsage)) - copy(records, snap.ModelUsage) - sort.Slice(records, func(i, j int) bool { - ci, cj := float64(0), float64(0) - if records[i].CostUSD != nil { - ci = *records[i].CostUSD - } - if records[j].CostUSD != nil { - cj = *records[j].CostUSD - } - return ci > cj - }) - if len(records) > 8 { - records = records[:8] - } + models := core.ExtractAnalyticsModelUsage(snap) + if len(models) == 0 { + return + } - // Build chart items. - var items []chartItem - for i, rec := range records { - cost := float64(0) - if rec.CostUSD != nil { - cost = *rec.CostUSD - } - if cost <= 0 { - continue - } - name := rec.Canonical - if name == "" { - name = rec.RawModelID - } - items = append(items, chartItem{ - Label: prettifyModelName(name), - Value: cost, - Color: stableModelColor(name, snap.ProviderID), - SubLabel: func() string { - if i == 0 && rec.InputTokens != nil { - return formatTokens(*rec.InputTokens) + " in" - } - return "" - }(), - }) - } + if len(models) > 8 { + models = models[:8] + } - if len(items) > 0 { - labelW := 22 - if w < 55 { - labelW = 16 - } - barW := w - labelW - 20 - if barW < 8 { - barW = 8 - } - if barW > 30 { - barW = 30 - } - sb.WriteString(RenderHBarChart(items, barW, labelW) + "\n") + items := make([]chartItem, 0, len(models)) + for i, model := range models { + if model.CostUSD <= 0 { + continue } + subLabel := "" + if i == 0 && model.InputTokens > 0 { + subLabel = formatTokens(model.InputTokens) + " in" + } + items = append(items, chartItem{ + Label: prettifyModelName(model.Name), + Value: model.CostUSD, + Color: stableModelColor(model.Name, snap.ProviderID), + SubLabel: subLabel, + }) + } - // Token breakdown for the top model with token data. - for _, rec := range records { - inTok := float64(0) - outTok := float64(0) - if rec.InputTokens != nil { - inTok = *rec.InputTokens - } - if rec.OutputTokens != nil { - outTok = *rec.OutputTokens - } - if inTok > 0 || outTok > 0 { - sb.WriteString("\n") - name := rec.Canonical - if name == "" { - name = rec.RawModelID - } - sb.WriteString(" " + dimStyle.Render("Token breakdown: "+prettifyModelName(name)) + "\n") - sb.WriteString(RenderTokenBreakdown(inTok, outTok, w-4) + "\n") - break - } + if len(items) > 0 { + labelW := 22 + if w < 55 { + labelW = 16 } - return + barW := w - labelW - 20 + if barW < 8 { + barW = 8 + } + if barW > 30 { + barW = 30 + } + sb.WriteString(RenderHBarChart(items, barW, labelW) + "\n") } - // Fallback: check for model cost metrics. - if hasModelCostMetrics(snap) { - groups := groupMetrics(snap.Metrics, widget, detailWidget(snap.ProviderID)) - for _, g := range groups { - var modelCosts []metricEntry - for _, e := range g.entries { - if isModelCostKey(e.key) { - modelCosts = append(modelCosts, e) - } - } - if len(modelCosts) > 0 { - renderModelCostsTable(sb, modelCosts, w) - return - } + for _, model := range models { + if model.InputTokens <= 0 && model.OutputTokens <= 0 { + continue } + sb.WriteString("\n") + sb.WriteString(" " + dimStyle.Render("Token breakdown: "+prettifyModelName(model.Name)) + "\n") + sb.WriteString(RenderTokenBreakdown(model.InputTokens, model.OutputTokens, w-4) + "\n") + break } } +func hasAnalyticsModelData(snap core.UsageSnapshot) bool { + return len(core.ExtractAnalyticsModelUsage(snap)) > 0 +} + // hasChartableSeries returns true if at least one daily series has >= 2 data points. func hasChartableSeries(series map[string][]core.TimePoint) bool { for _, pts := range series { diff --git a/internal/tui/model.go b/internal/tui/model.go index 2aec87e..95ff964 100644 --- a/internal/tui/model.go +++ b/internal/tui/model.go @@ -2,7 +2,6 @@ package tui import ( "fmt" - "log" "sort" "strings" "time" @@ -266,902 +265,8 @@ type integrationInstallResultMsg struct { Err error } -func (m Model) persistThemeCmd(themeName string) tea.Cmd { - return func() tea.Msg { - if m.services == nil { - return themePersistedMsg{err: fmt.Errorf("theme service unavailable")} - } - err := m.services.SaveTheme(themeName) - if err != nil { - log.Printf("theme persist: %v", err) - } - return themePersistedMsg{err: err} - } -} - -func (m Model) persistDashboardPrefsCmd() tea.Cmd { - providers := m.dashboardConfigProviders() - return func() tea.Msg { - if m.services == nil { - return dashboardPrefsPersistedMsg{err: fmt.Errorf("dashboard settings service unavailable")} - } - err := m.services.SaveDashboardProviders(providers) - if err != nil { - log.Printf("dashboard settings persist: %v", err) - } - return dashboardPrefsPersistedMsg{err: err} - } -} - -func (m Model) persistDashboardViewCmd() tea.Cmd { - view := string(m.configuredDashboardView()) - return func() tea.Msg { - if m.services == nil { - return dashboardViewPersistedMsg{err: fmt.Errorf("dashboard view service unavailable")} - } - err := m.services.SaveDashboardView(view) - if err != nil { - log.Printf("dashboard view persist: %v", err) - } - return dashboardViewPersistedMsg{err: err} - } -} - -func (m Model) persistDashboardWidgetSectionsCmd() tea.Cmd { - sections := m.dashboardWidgetSectionConfigEntries() - return func() tea.Msg { - if m.services == nil { - return dashboardWidgetSectionsPersistedMsg{err: fmt.Errorf("dashboard sections service unavailable")} - } - err := m.services.SaveDashboardWidgetSections(sections) - if err != nil { - log.Printf("dashboard widget sections persist: %v", err) - } - return dashboardWidgetSectionsPersistedMsg{err: err} - } -} - -func (m Model) persistDashboardHideSectionsWithNoDataCmd() tea.Cmd { - hide := m.hideSectionsWithNoData - return func() tea.Msg { - if m.services == nil { - return dashboardHideSectionsWithNoDataPersistedMsg{err: fmt.Errorf("dashboard empty-state service unavailable")} - } - err := m.services.SaveDashboardHideSectionsWithNoData(hide) - if err != nil { - log.Printf("dashboard hide sections with no data persist: %v", err) - } - return dashboardHideSectionsWithNoDataPersistedMsg{err: err} - } -} - -func (m Model) persistTimeWindowCmd(window string) tea.Cmd { - return func() tea.Msg { - if m.services == nil { - return timeWindowPersistedMsg{err: fmt.Errorf("time window service unavailable")} - } - err := m.services.SaveTimeWindow(window) - if err != nil { - log.Printf("time window persist: %v", err) - } - return timeWindowPersistedMsg{err: err} - } -} - -func (m Model) validateKeyCmd(accountID, providerID, apiKey string) tea.Cmd { - return func() tea.Msg { - if m.services == nil { - return validateKeyResultMsg{AccountID: accountID, Valid: false, Error: "validation service unavailable"} - } - valid, errMsg := m.services.ValidateAPIKey(accountID, providerID, apiKey) - return validateKeyResultMsg{AccountID: accountID, Valid: valid, Error: errMsg} - } -} - -func (m Model) saveCredentialCmd(accountID, apiKey string) tea.Cmd { - return func() tea.Msg { - if m.services == nil { - return credentialSavedMsg{AccountID: accountID, Err: fmt.Errorf("credential service unavailable")} - } - err := m.services.SaveCredential(accountID, apiKey) - return credentialSavedMsg{AccountID: accountID, Err: err} - } -} - -func (m Model) deleteCredentialCmd(accountID string) tea.Cmd { - return func() tea.Msg { - if m.services == nil { - return credentialDeletedMsg{AccountID: accountID, Err: fmt.Errorf("credential service unavailable")} - } - err := m.services.DeleteCredential(accountID) - return credentialDeletedMsg{AccountID: accountID, Err: err} - } -} - -func (m Model) installIntegrationCmd(id integrations.ID) tea.Cmd { - return func() tea.Msg { - if m.services == nil { - return integrationInstallResultMsg{IntegrationID: id, Err: fmt.Errorf("integration service unavailable")} - } - statuses, err := m.services.InstallIntegration(id) - return integrationInstallResultMsg{ - IntegrationID: id, - Statuses: statuses, - Err: err, - } - } -} - func (m Model) Init() tea.Cmd { return tickCmd() } -func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { - switch msg := msg.(type) { - case tickMsg: - m.animFrame++ - return m, tickCmd() - - case tea.WindowSizeMsg: - m.width = msg.Width - m.height = msg.Height - return m, nil - - case DaemonStatusMsg: - m.daemon.status = msg.Status - m.daemon.message = msg.Message - if msg.Status == DaemonRunning { - m.daemon.installing = false - } - return m, nil - - case AppUpdateMsg: - m.daemon.appUpdateCurrent = strings.TrimSpace(msg.CurrentVersion) - m.daemon.appUpdateLatest = strings.TrimSpace(msg.LatestVersion) - m.daemon.appUpdateHint = strings.TrimSpace(msg.UpgradeHint) - return m, nil - - case daemonInstallResultMsg: - m.daemon.installing = false - if msg.err != nil { - m.daemon.status = DaemonError - m.daemon.message = msg.err.Error() - } else { - m.daemon.installDone = true - m.daemon.status = DaemonStarting - } - return m, nil - - case SnapshotsMsg: - msgWindow := msg.TimeWindow - if msgWindow == "" { - msgWindow = core.TimeWindow30d - } - if msgWindow != m.timeWindow { - return m, nil - } - if msg.RequestID > 0 && msg.RequestID < m.lastSnapshotRequestID { - return m, nil - } - if m.refreshing && m.hasData && !snapshotsReady(msg.Snapshots) { - return m, nil - } - m.snapshots = msg.Snapshots - m.refreshing = false - if msg.RequestID > m.lastSnapshotRequestID { - m.lastSnapshotRequestID = msg.RequestID - } - if len(msg.Snapshots) > 0 || snapshotsReady(msg.Snapshots) { - m.hasData = true - m.daemon.status = DaemonRunning - } - for id, snap := range m.snapshots { - info := computeDisplayInfo(snap, dashboardWidget(snap.ProviderID)) - if info.reason != "" { - snap.EnsureMaps() - snap.Diagnostics["display_branch"] = info.reason - m.snapshots[id] = snap - } - } - m.ensureSnapshotProvidersKnown() - m.rebuildSortedIDs() - return m, nil - - case dashboardPrefsPersistedMsg: - if msg.err != nil { - m.settings.status = "save failed" - } else { - m.settings.status = "saved" - } - return m, nil - - case dashboardViewPersistedMsg: - if msg.err != nil { - m.settings.status = "view save failed" - } else { - m.settings.status = "view saved" - } - return m, nil - - case dashboardWidgetSectionsPersistedMsg: - if msg.err != nil { - m.settings.status = "section save failed" - } else { - m.settings.status = "sections saved" - } - return m, nil - - case dashboardHideSectionsWithNoDataPersistedMsg: - if msg.err != nil { - m.settings.status = "empty-state save failed" - } else { - m.settings.status = "empty-state saved" - } - return m, nil - - case themePersistedMsg: - if msg.err != nil { - m.settings.status = "theme save failed" - } else { - m.settings.status = "theme saved" - } - return m, nil - - case timeWindowPersistedMsg: - if msg.err != nil { - m.settings.status = "time window save failed" - } else { - m.settings.status = "time window saved" - } - return m, nil - - case validateKeyResultMsg: - if msg.Valid { - m.settings.apiKeyStatus = "valid ✓ — saving..." - return m, m.saveCredentialCmd(msg.AccountID, m.settings.apiKeyInput) - } - m.settings.apiKeyStatus = "invalid ✗" - if msg.Error != "" { - errMsg := msg.Error - if len(errMsg) > 40 { - errMsg = errMsg[:37] + "..." - } - m.settings.apiKeyStatus = "invalid: " + errMsg - } - return m, nil - - case credentialSavedMsg: - if msg.Err != nil { - m.settings.apiKeyStatus = "save failed" - } else { - m.settings.apiKeyStatus = "saved ✓" - apiKey := m.settings.apiKeyInput - m.settings.apiKeyEditing = false - m.settings.apiKeyInput = "" - - // Register account with engine if callback is set - if m.onAddAccount != nil { - providerID := m.accountProviders[msg.AccountID] - acct := core.AccountConfig{ - ID: msg.AccountID, - Provider: providerID, - Auth: "api_key", - Token: apiKey, - } - m.onAddAccount(acct) - } - - // Ensure the provider shows in the UI - if m.providerOrderIndex(msg.AccountID) < 0 { - m.providerOrder = append(m.providerOrder, msg.AccountID) - m.providerEnabled[msg.AccountID] = true - } - m.refreshing = true - } - return m, nil - - case credentialDeletedMsg: - if msg.Err != nil { - m.settings.status = "delete failed" - } else { - m.settings.status = "key deleted" - } - return m, nil - - case integrationInstallResultMsg: - m.settings.integrationStatus = msg.Statuses - if msg.Err != nil { - errMsg := msg.Err.Error() - if len(errMsg) > 80 { - errMsg = errMsg[:77] + "..." - } - m.settings.status = "integration install failed: " + errMsg - } else { - m.settings.status = "integration installed" - } - return m, nil - - case tea.KeyMsg: - if !m.hasData { - return m.handleSplashKey(msg) - } - return m.handleKey(msg) - case tea.MouseMsg: - return m.handleMouse(msg) - } - return m, nil -} - -func (m Model) handleSplashKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - switch msg.String() { - case "q", "ctrl+c": - return m, tea.Quit - case "enter": - if (m.daemon.status == DaemonNotInstalled || m.daemon.status == DaemonOutdated) && !m.daemon.installing { - m.daemon.installing = true - m.daemon.message = "Setting up background helper..." - return m, m.installDaemonCmd() - } - } - return m, nil -} - -func (m Model) handleMouse(msg tea.MouseMsg) (tea.Model, tea.Cmd) { - if m.settings.show { - return m.handleSettingsMouse(msg) - } - if m.showHelp { - return m, nil - } - if m.filter.active || m.analyticsFilter.active { - return m, nil - } - if msg.Action != tea.MouseActionPress { - return m, nil - } - - // Handle left-click tile selection in grid/list mode. - if msg.Button == tea.MouseButtonLeft { - return m.handleMouseClick(msg) - } - - scroll := 0 - switch msg.Button { - case tea.MouseButtonWheelUp: - scroll = -m.mouseScrollStep() - case tea.MouseButtonWheelDown: - scroll = m.mouseScrollStep() - default: - return m, nil - } - - if m.screen != screenDashboard { - return m, nil - } - - if m.mode == modeDetail { - m.detailOffset += scroll - if m.detailOffset < 0 { - m.detailOffset = 0 - } - return m, nil - } - - if m.mode == modeList && m.shouldUseWidgetScroll() { - m.tileOffset += scroll - if m.tileOffset < 0 { - m.tileOffset = 0 - } - return m, nil - } - - if m.mode == modeList && m.shouldUsePanelScroll() { - m.tileOffset += scroll - if m.tileOffset < 0 { - m.tileOffset = 0 - } - return m, nil - } - - if m.mode == modeList && m.activeDashboardView() == dashboardViewSplit { - step := 1 - if scroll < 0 { - step = -1 - } - next := m.cursor + step - ids := m.filteredIDs() - if next < 0 { - next = 0 - } - if next >= len(ids) { - next = len(ids) - 1 - } - if next < 0 { - next = 0 - } - m.cursor = next - } - - return m, nil -} - -func (m Model) handleSettingsMouse(msg tea.MouseMsg) (tea.Model, tea.Cmd) { - if msg.Action != tea.MouseActionPress { - return m, nil - } - if m.settings.tab != settingsTabWidgetSections { - return m, nil - } - - scroll := 0 - switch msg.Button { - case tea.MouseButtonWheelUp: - scroll = -m.mouseScrollStep() - case tea.MouseButtonWheelDown: - scroll = m.mouseScrollStep() - default: - return m, nil - } - - m.settings.previewOffset += scroll - if m.settings.previewOffset < 0 { - m.settings.previewOffset = 0 - } - return m, nil -} - -// handleMouseClick selects the tile under the mouse cursor when clicked. -func (m Model) handleMouseClick(msg tea.MouseMsg) (tea.Model, tea.Cmd) { - if m.screen != screenDashboard || m.mode != modeList { - return m, nil - } - - ids := m.filteredIDs() - if len(ids) == 0 { - return m, nil - } - - view := m.activeDashboardView() - switch view { - case dashboardViewGrid, dashboardViewStacked: - default: - return m, nil - } - - contentH := m.height - 3 // header (2 lines) + footer (1 line) - if contentH < 5 { - contentH = 5 - } - cols, tileW, tileMaxH := m.tileGrid(m.width, contentH, len(ids)) - if view == dashboardViewStacked { - cols = 1 - } - - // Header is 2 lines (brand + separator), content starts at Y=2. - headerH := 2 - clickY := msg.Y - headerH - clickX := msg.X - 1 // 1-char left padding in content - - if clickX < 0 || clickY < 0 { - return m, nil - } - - // Determine column from X position. - cellW := tileW + tileBorderH + tileGapH - if cellW <= 0 { - return m, nil - } - col := clickX / cellW - if col >= cols { - return m, nil - } - - // Determine row from Y position. - // We need to figure out the visible row height and account for scroll. - // Each tile row is tileMaxH + tileBorderV lines, rows separated by tileGapV. - // For single-column (stacked), tileMaxH=0 which means variable height; use an - // approximate row height in that case. - var rowH int - if tileMaxH > 0 { - rowH = tileMaxH + tileBorderV - } else { - // Variable height tiles — estimate from content area. - rowH = contentH - if len(ids) > 1 { - rowH = contentH / len(ids) - } - if rowH < tileMinHeight+tileBorderV { - rowH = tileMinHeight + tileBorderV - } - } - - rowCell := rowH + tileGapV - if rowCell <= 0 { - return m, nil - } - - // Account for scroll offset. - // In grid mode the view scrolls to keep the cursor row visible. - // We need to know which line the viewport starts at. - cursorRow := m.cursor / cols - totalRows := (len(ids) + cols - 1) / cols - - // Build row offsets like renderTilesWithColumns does. - rowOffsets := make([]int, totalRows) - acc := 0 - for r := 0; r < totalRows; r++ { - rowOffsets[r] = acc - acc += rowH - if r < totalRows-1 { - acc += tileGapV - } - } - totalLines := acc - - rowScrollOffset := 0 - if cols == 1 { - rowScrollOffset = m.tileOffset - } - scrollLine := 0 - if cursorRow >= 0 && cursorRow < totalRows { - scrollLine = rowOffsets[cursorRow] + rowScrollOffset - } - if scrollLine > totalLines-contentH { - scrollLine = totalLines - contentH - } - if scrollLine < 0 { - scrollLine = 0 - } - - absY := clickY + scrollLine - row := -1 - for r := 0; r < totalRows; r++ { - if absY >= rowOffsets[r] && absY < rowOffsets[r]+rowH { - row = r - break - } - } - if row < 0 { - return m, nil - } - - idx := row*cols + col - if idx < 0 || idx >= len(ids) { - return m, nil - } - - m.cursor = idx - m.tileOffset = 0 - return m, nil -} - -func (m Model) handleKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - if msg.String() == "?" && !m.filter.active && !m.analyticsFilter.active && !m.settings.show { - m.showHelp = !m.showHelp - return m, nil - } - if m.showHelp { - m.showHelp = false - return m, nil - } - - if m.settings.show { - return m.handleSettingsModalKey(msg) - } - - if !m.filter.active && !m.analyticsFilter.active { - switch msg.String() { - case ",", "S": - m.openSettingsModal() - return m, nil - case "tab": - m.screen = m.nextScreen(1) - m.mode = modeList - m.detailOffset = 0 - m.tileOffset = 0 - return m, nil - case "shift+tab": - m.screen = m.nextScreen(-1) - m.mode = modeList - m.detailOffset = 0 - m.tileOffset = 0 - return m, nil - case "t": - name := CycleTheme() - return m, m.persistThemeCmd(name) - case "w": - return m.cycleTimeWindow() - case "v": - if m.screen == screenDashboard { - m.setDashboardView(m.nextDashboardView(1)) - return m, m.persistDashboardViewCmd() - } - case "V": - if m.screen == screenDashboard { - m.setDashboardView(m.nextDashboardView(-1)) - return m, m.persistDashboardViewCmd() - } - } - } - - switch m.screen { - case screenAnalytics: - return m.handleAnalyticsKey(msg) - default: - return m.handleDashboardTilesKey(msg) - } -} - -func (m Model) handleDashboardTilesKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - if m.filter.active { - return m.handleFilterKey(msg) - } - if m.mode == modeDetail { - return m.handleDetailKey(msg) - } - if m.activeDashboardView() == dashboardViewSplit { - return m.handleListKey(msg) - } - return m.handleTilesKey(msg) -} - -func (m Model) handleAnalyticsKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - if m.analyticsFilter.active { - return m.handleAnalyticsFilterKey(msg) - } - - switch msg.String() { - case "q", "ctrl+c": - return m, tea.Quit - case "s": - m.analyticsSortBy = (m.analyticsSortBy + 1) % analyticsSortCount - case "/": - m.analyticsFilter.active = true - m.analyticsFilter.text = "" - case "esc": - if m.analyticsFilter.text != "" { - m.analyticsFilter.text = "" - } - case "r": - m = m.requestRefresh() - } - return m, nil -} - -func (m Model) handleAnalyticsFilterKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - switch msg.String() { - case "enter": - m.analyticsFilter.active = false - case "esc": - m.analyticsFilter.active = false - m.analyticsFilter.text = "" - case "backspace": - if len(m.analyticsFilter.text) > 0 { - m.analyticsFilter.text = m.analyticsFilter.text[:len(m.analyticsFilter.text)-1] - } - default: - if len(msg.String()) == 1 { - m.analyticsFilter.text += msg.String() - } - } - return m, nil -} - -func (m Model) availableScreens() []screenTab { - if !m.experimentalAnalytics { - return []screenTab{screenDashboard} - } - return []screenTab{screenDashboard, screenAnalytics} -} - -func (m Model) nextScreen(step int) screenTab { - screens := m.availableScreens() - if len(screens) == 0 { - return screenDashboard - } - - idx := 0 - for i, screen := range screens { - if screen == m.screen { - idx = i - break - } - } - - next := (idx + step) % len(screens) - if next < 0 { - next += len(screens) - } - return screens[next] -} - -func (m Model) handleListKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - ids := m.filteredIDs() - pageStep := m.listPageStep() - switch msg.String() { - case "q", "ctrl+c": - return m, tea.Quit - case "up", "k": - if m.cursor > 0 { - m.cursor-- - m.detailOffset = 0 - m.detailTab = 0 - m.tileOffset = 0 - } - case "down", "j": - if m.cursor < len(ids)-1 { - m.cursor++ - m.detailOffset = 0 - m.detailTab = 0 - m.tileOffset = 0 - } - case "pgdown", "ctrl+d": - if len(ids) > 0 { - m.cursor = clamp(m.cursor+pageStep, 0, len(ids)-1) - } - case "pgup", "ctrl+u": - if len(ids) > 0 { - m.cursor = clamp(m.cursor-pageStep, 0, len(ids)-1) - } - case "enter", "right", "l": - m.mode = modeDetail - m.detailOffset = 0 - case "/": - m.filter.active = true - m.filter.text = "" - case "r": - m = m.requestRefresh() - } - return m, nil -} - -func (m Model) handleDetailKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - switch msg.String() { - case "q", "ctrl+c": - return m, tea.Quit - case "esc", "left", "h", "backspace": - m.mode = modeList - case "up", "k": - if m.detailOffset > 0 { - m.detailOffset-- - } - case "down", "j": - m.detailOffset++ // capped during render - case "g": - m.detailOffset = 0 - case "G": - m.detailOffset = 9999 // will be capped - case "[": - if m.detailTab > 0 { - m.detailTab-- - m.detailOffset = 0 - } - case "]": - m.detailTab++ - m.detailOffset = 0 - case "1", "2", "3", "4", "5", "6", "7", "8", "9": - idx := int(msg.String()[0] - '1') // "1" → 0, "2" → 1, ... - m.detailTab = idx - m.detailOffset = 0 - } - return m, nil -} - -func (m Model) handleFilterKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - switch msg.String() { - case "enter": - m.filter.active = false - m.cursor = 0 - m.tileOffset = 0 - case "esc": - m.filter.text = "" - m.filter.active = false - m.cursor = 0 - m.tileOffset = 0 - case "backspace": - if len(m.filter.text) > 0 { - m.filter.text = m.filter.text[:len(m.filter.text)-1] - } - default: - if len(msg.String()) == 1 { - m.filter.text += msg.String() - } - } - return m, nil -} - -func (m Model) handleTilesKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - ids := m.filteredIDs() - cols := m.tileCols() - scrollModeWidget := m.shouldUseWidgetScroll() - switch msg.String() { - case "q", "ctrl+c": - return m, tea.Quit - case "up", "k": - if m.cursor >= cols { - m.cursor -= cols - m.tileOffset = 0 - } - case "down", "j": - if m.cursor+cols < len(ids) { - m.cursor += cols - m.tileOffset = 0 - } - case "left", "h": - if m.cursor > 0 { - m.cursor-- - m.tileOffset = 0 - } - case "right", "l": - if m.cursor < len(ids)-1 { - m.cursor++ - m.tileOffset = 0 - } - case "pgdown", "ctrl+d": - if scrollModeWidget { - m.tileOffset += m.widgetScrollStep() - } else { - m.tileOffset += m.tileScrollStep() - } - case "pgup", "ctrl+u": - if scrollModeWidget { - m.tileOffset -= m.widgetScrollStep() - } else { - m.tileOffset -= m.tileScrollStep() - } - if m.tileOffset < 0 { - m.tileOffset = 0 - } - case "ctrl+o": - if id := m.selectedTileID(ids); id != "" { - m.expandedModelMixTiles[id] = !m.expandedModelMixTiles[id] - } - case "home": - m.tileOffset = 0 - case "end": - m.tileOffset = 9999 // capped during render - case "enter": - m.mode = modeDetail - m.detailOffset = 0 - case "/": - m.filter.active = true - m.filter.text = "" - case "esc": - if m.filter.text != "" { - m.filter.text = "" - m.cursor = 0 - m.tileOffset = 0 - } - case "r": - m = m.requestRefresh() - } - return m, nil -} - -func (m Model) cycleTimeWindow() (tea.Model, tea.Cmd) { - next := core.NextTimeWindow(m.timeWindow) - m = m.beginTimeWindowRefresh(next) - return m, m.persistTimeWindowCmd(string(next)) -} - -func (m Model) requestRefresh() Model { - m.refreshing = true - if m.onRefresh != nil { - m.onRefresh(m.timeWindow) - } - return m -} - -func (m Model) beginTimeWindowRefresh(window core.TimeWindow) Model { - m.timeWindow = window - if m.onTimeWindowChange != nil { - m.onTimeWindowChange(window) - } - m.refreshing = true - if m.onRefresh != nil { - m.onRefresh(window) - } - return m -} - func (m Model) selectedTileID(ids []string) string { if len(ids) == 0 { return "" @@ -1231,24 +336,6 @@ func (m Model) shouldUsePanelScroll() bool { return m.tileCols() == 1 } -func snapshotsReady(snaps map[string]core.UsageSnapshot) bool { - if len(snaps) == 0 { - return false - } - for _, snap := range snaps { - if snap.Status != core.StatusUnknown { - return true - } - if len(snap.Metrics) > 0 || - len(snap.Resets) > 0 || - len(snap.DailySeries) > 0 || - len(snap.ModelUsage) > 0 { - return true - } - } - return false -} - func (m Model) View() string { if m.width < 30 || m.height < 8 { return lipgloss.NewStyle(). @@ -1268,42 +355,6 @@ func (m Model) View() string { return view } -func (m Model) installDaemonCmd() tea.Cmd { - fn := m.onInstallDaemon - return func() tea.Msg { - if fn == nil { - return daemonInstallResultMsg{err: fmt.Errorf("install callback not configured")} - } - return daemonInstallResultMsg{err: fn()} - } -} - -func (m Model) renderDashboard() string { - w, h := m.width, m.height - - header := m.renderHeader(w) - headerH := strings.Count(header, "\n") + 1 - - footer := m.renderFooter(w) - footerH := strings.Count(footer, "\n") + 1 - - contentH := h - headerH - footerH - if contentH < 3 { - contentH = 3 - } - - var content string - - switch m.screen { - case screenAnalytics: - content = m.renderAnalyticsContent(w, contentH) - default: - content = m.renderDashboardContent(w, contentH) - } - - return header + "\n" + content + "\n" + footer -} - func (m Model) renderDashboardContent(w, contentH int) string { if m.mode == modeDetail { return m.renderDetailPanel(w, contentH) diff --git a/internal/tui/model_commands.go b/internal/tui/model_commands.go new file mode 100644 index 0000000..4203f5a --- /dev/null +++ b/internal/tui/model_commands.go @@ -0,0 +1,217 @@ +package tui + +import ( + "fmt" + "log" + "strings" + + tea "github.com/charmbracelet/bubbletea" + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/integrations" +) + +func (m Model) persistThemeCmd(themeName string) tea.Cmd { + return func() tea.Msg { + if m.services == nil { + return themePersistedMsg{err: fmt.Errorf("theme service unavailable")} + } + err := m.services.SaveTheme(themeName) + if err != nil { + log.Printf("theme persist: %v", err) + } + return themePersistedMsg{err: err} + } +} + +func (m Model) persistDashboardPrefsCmd() tea.Cmd { + providers := m.dashboardConfigProviders() + return func() tea.Msg { + if m.services == nil { + return dashboardPrefsPersistedMsg{err: fmt.Errorf("dashboard settings service unavailable")} + } + err := m.services.SaveDashboardProviders(providers) + if err != nil { + log.Printf("dashboard settings persist: %v", err) + } + return dashboardPrefsPersistedMsg{err: err} + } +} + +func (m Model) persistDashboardViewCmd() tea.Cmd { + view := string(m.configuredDashboardView()) + return func() tea.Msg { + if m.services == nil { + return dashboardViewPersistedMsg{err: fmt.Errorf("dashboard view service unavailable")} + } + err := m.services.SaveDashboardView(view) + if err != nil { + log.Printf("dashboard view persist: %v", err) + } + return dashboardViewPersistedMsg{err: err} + } +} + +func (m Model) persistDashboardWidgetSectionsCmd() tea.Cmd { + sections := m.dashboardWidgetSectionConfigEntries() + return func() tea.Msg { + if m.services == nil { + return dashboardWidgetSectionsPersistedMsg{err: fmt.Errorf("dashboard sections service unavailable")} + } + err := m.services.SaveDashboardWidgetSections(sections) + if err != nil { + log.Printf("dashboard widget sections persist: %v", err) + } + return dashboardWidgetSectionsPersistedMsg{err: err} + } +} + +func (m Model) persistDashboardHideSectionsWithNoDataCmd() tea.Cmd { + hide := m.hideSectionsWithNoData + return func() tea.Msg { + if m.services == nil { + return dashboardHideSectionsWithNoDataPersistedMsg{err: fmt.Errorf("dashboard empty-state service unavailable")} + } + err := m.services.SaveDashboardHideSectionsWithNoData(hide) + if err != nil { + log.Printf("dashboard hide sections with no data persist: %v", err) + } + return dashboardHideSectionsWithNoDataPersistedMsg{err: err} + } +} + +func (m Model) persistTimeWindowCmd(window string) tea.Cmd { + return func() tea.Msg { + if m.services == nil { + return timeWindowPersistedMsg{err: fmt.Errorf("time window service unavailable")} + } + err := m.services.SaveTimeWindow(window) + if err != nil { + log.Printf("time window persist: %v", err) + } + return timeWindowPersistedMsg{err: err} + } +} + +func (m Model) validateKeyCmd(accountID, providerID, apiKey string) tea.Cmd { + return func() tea.Msg { + if m.services == nil { + return validateKeyResultMsg{AccountID: accountID, Valid: false, Error: "validation service unavailable"} + } + valid, errMsg := m.services.ValidateAPIKey(accountID, providerID, apiKey) + return validateKeyResultMsg{AccountID: accountID, Valid: valid, Error: errMsg} + } +} + +func (m Model) saveCredentialCmd(accountID, apiKey string) tea.Cmd { + return func() tea.Msg { + if m.services == nil { + return credentialSavedMsg{AccountID: accountID, Err: fmt.Errorf("credential service unavailable")} + } + err := m.services.SaveCredential(accountID, apiKey) + return credentialSavedMsg{AccountID: accountID, Err: err} + } +} + +func (m Model) deleteCredentialCmd(accountID string) tea.Cmd { + return func() tea.Msg { + if m.services == nil { + return credentialDeletedMsg{AccountID: accountID, Err: fmt.Errorf("credential service unavailable")} + } + err := m.services.DeleteCredential(accountID) + return credentialDeletedMsg{AccountID: accountID, Err: err} + } +} + +func (m Model) installIntegrationCmd(id integrations.ID) tea.Cmd { + return func() tea.Msg { + if m.services == nil { + return integrationInstallResultMsg{IntegrationID: id, Err: fmt.Errorf("integration service unavailable")} + } + statuses, err := m.services.InstallIntegration(id) + return integrationInstallResultMsg{ + IntegrationID: id, + Statuses: statuses, + Err: err, + } + } +} + +func (m Model) cycleTimeWindow() (tea.Model, tea.Cmd) { + next := core.NextTimeWindow(m.timeWindow) + m = m.beginTimeWindowRefresh(next) + return m, m.persistTimeWindowCmd(string(next)) +} + +func (m Model) requestRefresh() Model { + m.refreshing = true + if m.onRefresh != nil { + m.onRefresh(m.timeWindow) + } + return m +} + +func (m Model) beginTimeWindowRefresh(window core.TimeWindow) Model { + m.timeWindow = window + if m.onTimeWindowChange != nil { + m.onTimeWindowChange(window) + } + m.refreshing = true + if m.onRefresh != nil { + m.onRefresh(window) + } + return m +} + +func (m Model) installDaemonCmd() tea.Cmd { + fn := m.onInstallDaemon + return func() tea.Msg { + if fn == nil { + return daemonInstallResultMsg{err: fmt.Errorf("install callback not configured")} + } + return daemonInstallResultMsg{err: fn()} + } +} + +func snapshotsReady(snaps map[string]core.UsageSnapshot) bool { + if len(snaps) == 0 { + return false + } + for _, snap := range snaps { + if snap.Status != core.StatusUnknown { + return true + } + if len(snap.Metrics) > 0 || + len(snap.Resets) > 0 || + len(snap.DailySeries) > 0 || + len(snap.ModelUsage) > 0 { + return true + } + } + return false +} + +func (m Model) renderDashboard() string { + w, h := m.width, m.height + + header := m.renderHeader(w) + headerH := strings.Count(header, "\n") + 1 + + footer := m.renderFooter(w) + footerH := strings.Count(footer, "\n") + 1 + + contentH := h - headerH - footerH + if contentH < 3 { + contentH = 3 + } + + var content string + + switch m.screen { + case screenAnalytics: + content = m.renderAnalyticsContent(w, contentH) + default: + content = m.renderDashboardContent(w, contentH) + } + + return header + "\n" + content + "\n" + footer +} diff --git a/internal/tui/model_input.go b/internal/tui/model_input.go new file mode 100644 index 0000000..4f44aee --- /dev/null +++ b/internal/tui/model_input.go @@ -0,0 +1,698 @@ +package tui + +import ( + "strings" + + tea "github.com/charmbracelet/bubbletea" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { + switch msg := msg.(type) { + case tickMsg: + m.animFrame++ + return m, tickCmd() + + case tea.WindowSizeMsg: + m.width = msg.Width + m.height = msg.Height + return m, nil + + case DaemonStatusMsg: + m.daemon.status = msg.Status + m.daemon.message = msg.Message + if msg.Status == DaemonRunning { + m.daemon.installing = false + } + return m, nil + + case AppUpdateMsg: + m.daemon.appUpdateCurrent = strings.TrimSpace(msg.CurrentVersion) + m.daemon.appUpdateLatest = strings.TrimSpace(msg.LatestVersion) + m.daemon.appUpdateHint = strings.TrimSpace(msg.UpgradeHint) + return m, nil + + case daemonInstallResultMsg: + m.daemon.installing = false + if msg.err != nil { + m.daemon.status = DaemonError + m.daemon.message = msg.err.Error() + } else { + m.daemon.installDone = true + m.daemon.status = DaemonStarting + } + return m, nil + + case SnapshotsMsg: + msgWindow := msg.TimeWindow + if msgWindow == "" { + msgWindow = core.TimeWindow30d + } + if msgWindow != m.timeWindow { + return m, nil + } + if msg.RequestID > 0 && msg.RequestID < m.lastSnapshotRequestID { + return m, nil + } + if m.refreshing && m.hasData && !snapshotsReady(msg.Snapshots) { + return m, nil + } + m.snapshots = msg.Snapshots + m.refreshing = false + if msg.RequestID > m.lastSnapshotRequestID { + m.lastSnapshotRequestID = msg.RequestID + } + if len(msg.Snapshots) > 0 || snapshotsReady(msg.Snapshots) { + m.hasData = true + m.daemon.status = DaemonRunning + } + for id, snap := range m.snapshots { + info := computeDisplayInfo(snap, dashboardWidget(snap.ProviderID)) + if info.reason != "" { + snap.EnsureMaps() + snap.Diagnostics["display_branch"] = info.reason + m.snapshots[id] = snap + } + } + m.ensureSnapshotProvidersKnown() + m.rebuildSortedIDs() + return m, nil + + case dashboardPrefsPersistedMsg: + if msg.err != nil { + m.settings.status = "save failed" + } else { + m.settings.status = "saved" + } + return m, nil + + case dashboardViewPersistedMsg: + if msg.err != nil { + m.settings.status = "view save failed" + } else { + m.settings.status = "view saved" + } + return m, nil + + case dashboardWidgetSectionsPersistedMsg: + if msg.err != nil { + m.settings.status = "section save failed" + } else { + m.settings.status = "sections saved" + } + return m, nil + + case dashboardHideSectionsWithNoDataPersistedMsg: + if msg.err != nil { + m.settings.status = "empty-state save failed" + } else { + m.settings.status = "empty-state saved" + } + return m, nil + + case themePersistedMsg: + if msg.err != nil { + m.settings.status = "theme save failed" + } else { + m.settings.status = "theme saved" + } + return m, nil + + case timeWindowPersistedMsg: + if msg.err != nil { + m.settings.status = "time window save failed" + } else { + m.settings.status = "time window saved" + } + return m, nil + + case validateKeyResultMsg: + if msg.Valid { + m.settings.apiKeyStatus = "valid ✓ — saving..." + return m, m.saveCredentialCmd(msg.AccountID, m.settings.apiKeyInput) + } + m.settings.apiKeyStatus = "invalid ✗" + if msg.Error != "" { + errMsg := msg.Error + if len(errMsg) > 40 { + errMsg = errMsg[:37] + "..." + } + m.settings.apiKeyStatus = "invalid: " + errMsg + } + return m, nil + + case credentialSavedMsg: + if msg.Err != nil { + m.settings.apiKeyStatus = "save failed" + } else { + m.settings.apiKeyStatus = "saved ✓" + apiKey := m.settings.apiKeyInput + m.settings.apiKeyEditing = false + m.settings.apiKeyInput = "" + if m.onAddAccount != nil { + providerID := m.accountProviders[msg.AccountID] + acct := core.AccountConfig{ + ID: msg.AccountID, + Provider: providerID, + Auth: "api_key", + Token: apiKey, + } + m.onAddAccount(acct) + } + if m.providerOrderIndex(msg.AccountID) < 0 { + m.providerOrder = append(m.providerOrder, msg.AccountID) + m.providerEnabled[msg.AccountID] = true + } + m.refreshing = true + } + return m, nil + + case credentialDeletedMsg: + if msg.Err != nil { + m.settings.status = "delete failed" + } else { + m.settings.status = "key deleted" + } + return m, nil + + case integrationInstallResultMsg: + m.settings.integrationStatus = msg.Statuses + if msg.Err != nil { + errMsg := msg.Err.Error() + if len(errMsg) > 80 { + errMsg = errMsg[:77] + "..." + } + m.settings.status = "integration install failed: " + errMsg + } else { + m.settings.status = "integration installed" + } + return m, nil + + case tea.KeyMsg: + if !m.hasData { + return m.handleSplashKey(msg) + } + return m.handleKey(msg) + case tea.MouseMsg: + return m.handleMouse(msg) + } + return m, nil +} + +func (m Model) handleSplashKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + switch msg.String() { + case "q", "ctrl+c": + return m, tea.Quit + case "enter": + if (m.daemon.status == DaemonNotInstalled || m.daemon.status == DaemonOutdated) && !m.daemon.installing { + m.daemon.installing = true + m.daemon.message = "Setting up background helper..." + return m, m.installDaemonCmd() + } + } + return m, nil +} + +func (m Model) handleMouse(msg tea.MouseMsg) (tea.Model, tea.Cmd) { + if m.settings.show { + return m.handleSettingsMouse(msg) + } + if m.showHelp || m.filter.active || m.analyticsFilter.active { + return m, nil + } + if msg.Action != tea.MouseActionPress { + return m, nil + } + if msg.Button == tea.MouseButtonLeft { + return m.handleMouseClick(msg) + } + + scroll := 0 + switch msg.Button { + case tea.MouseButtonWheelUp: + scroll = -m.mouseScrollStep() + case tea.MouseButtonWheelDown: + scroll = m.mouseScrollStep() + default: + return m, nil + } + + if m.screen != screenDashboard { + return m, nil + } + if m.mode == modeDetail { + m.detailOffset += scroll + if m.detailOffset < 0 { + m.detailOffset = 0 + } + return m, nil + } + if m.mode == modeList && (m.shouldUseWidgetScroll() || m.shouldUsePanelScroll()) { + m.tileOffset += scroll + if m.tileOffset < 0 { + m.tileOffset = 0 + } + return m, nil + } + if m.mode == modeList && m.activeDashboardView() == dashboardViewSplit { + step := 1 + if scroll < 0 { + step = -1 + } + next := m.cursor + step + ids := m.filteredIDs() + if next < 0 { + next = 0 + } + if next >= len(ids) { + next = len(ids) - 1 + } + if next < 0 { + next = 0 + } + m.cursor = next + } + return m, nil +} + +func (m Model) handleSettingsMouse(msg tea.MouseMsg) (tea.Model, tea.Cmd) { + if msg.Action != tea.MouseActionPress || m.settings.tab != settingsTabWidgetSections { + return m, nil + } + + scroll := 0 + switch msg.Button { + case tea.MouseButtonWheelUp: + scroll = -m.mouseScrollStep() + case tea.MouseButtonWheelDown: + scroll = m.mouseScrollStep() + default: + return m, nil + } + + m.settings.previewOffset += scroll + if m.settings.previewOffset < 0 { + m.settings.previewOffset = 0 + } + return m, nil +} + +func (m Model) handleMouseClick(msg tea.MouseMsg) (tea.Model, tea.Cmd) { + if m.screen != screenDashboard || m.mode != modeList { + return m, nil + } + + ids := m.filteredIDs() + if len(ids) == 0 { + return m, nil + } + view := m.activeDashboardView() + if view != dashboardViewGrid && view != dashboardViewStacked { + return m, nil + } + + contentH := m.height - 3 + if contentH < 5 { + contentH = 5 + } + cols, tileW, tileMaxH := m.tileGrid(m.width, contentH, len(ids)) + if view == dashboardViewStacked { + cols = 1 + } + + clickY := msg.Y - 2 + clickX := msg.X - 1 + if clickX < 0 || clickY < 0 { + return m, nil + } + + cellW := tileW + tileBorderH + tileGapH + if cellW <= 0 { + return m, nil + } + col := clickX / cellW + if col >= cols { + return m, nil + } + + rowH := tileMaxH + tileBorderV + if tileMaxH <= 0 { + rowH = contentH + if len(ids) > 1 { + rowH = contentH / len(ids) + } + if rowH < tileMinHeight+tileBorderV { + rowH = tileMinHeight + tileBorderV + } + } + rowCell := rowH + tileGapV + if rowCell <= 0 { + return m, nil + } + + cursorRow := m.cursor / cols + totalRows := (len(ids) + cols - 1) / cols + rowOffsets := make([]int, totalRows) + acc := 0 + for r := 0; r < totalRows; r++ { + rowOffsets[r] = acc + acc += rowH + if r < totalRows-1 { + acc += tileGapV + } + } + + rowScrollOffset := 0 + if cols == 1 { + rowScrollOffset = m.tileOffset + } + scrollLine := 0 + if cursorRow >= 0 && cursorRow < totalRows { + scrollLine = rowOffsets[cursorRow] + rowScrollOffset + } + if scrollLine > acc-contentH { + scrollLine = acc - contentH + } + if scrollLine < 0 { + scrollLine = 0 + } + + absY := clickY + scrollLine + row := -1 + for r := 0; r < totalRows; r++ { + if absY >= rowOffsets[r] && absY < rowOffsets[r]+rowH { + row = r + break + } + } + if row < 0 { + return m, nil + } + + idx := row*cols + col + if idx < 0 || idx >= len(ids) { + return m, nil + } + + m.cursor = idx + m.tileOffset = 0 + return m, nil +} + +func (m Model) handleKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + if msg.String() == "?" && !m.filter.active && !m.analyticsFilter.active && !m.settings.show { + m.showHelp = !m.showHelp + return m, nil + } + if m.showHelp { + m.showHelp = false + return m, nil + } + if m.settings.show { + return m.handleSettingsModalKey(msg) + } + + if !m.filter.active && !m.analyticsFilter.active { + switch msg.String() { + case ",", "S": + m.openSettingsModal() + return m, nil + case "tab": + m.screen = m.nextScreen(1) + m.mode = modeList + m.detailOffset = 0 + m.tileOffset = 0 + return m, nil + case "shift+tab": + m.screen = m.nextScreen(-1) + m.mode = modeList + m.detailOffset = 0 + m.tileOffset = 0 + return m, nil + case "t": + return m, m.persistThemeCmd(CycleTheme()) + case "w": + return m.cycleTimeWindow() + case "v": + if m.screen == screenDashboard { + m.setDashboardView(m.nextDashboardView(1)) + return m, m.persistDashboardViewCmd() + } + case "V": + if m.screen == screenDashboard { + m.setDashboardView(m.nextDashboardView(-1)) + return m, m.persistDashboardViewCmd() + } + } + } + + if m.screen == screenAnalytics { + return m.handleAnalyticsKey(msg) + } + return m.handleDashboardTilesKey(msg) +} + +func (m Model) handleDashboardTilesKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + if m.filter.active { + return m.handleFilterKey(msg) + } + if m.mode == modeDetail { + return m.handleDetailKey(msg) + } + if m.activeDashboardView() == dashboardViewSplit { + return m.handleListKey(msg) + } + return m.handleTilesKey(msg) +} + +func (m Model) handleAnalyticsKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + if m.analyticsFilter.active { + return m.handleAnalyticsFilterKey(msg) + } + + switch msg.String() { + case "q", "ctrl+c": + return m, tea.Quit + case "s": + m.analyticsSortBy = (m.analyticsSortBy + 1) % analyticsSortCount + case "/": + m.analyticsFilter.active = true + m.analyticsFilter.text = "" + case "esc": + if m.analyticsFilter.text != "" { + m.analyticsFilter.text = "" + } + case "r": + m = m.requestRefresh() + } + return m, nil +} + +func (m Model) handleAnalyticsFilterKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + switch msg.String() { + case "enter": + m.analyticsFilter.active = false + case "esc": + m.analyticsFilter.active = false + m.analyticsFilter.text = "" + case "backspace": + if len(m.analyticsFilter.text) > 0 { + m.analyticsFilter.text = m.analyticsFilter.text[:len(m.analyticsFilter.text)-1] + } + default: + if len(msg.String()) == 1 { + m.analyticsFilter.text += msg.String() + } + } + return m, nil +} + +func (m Model) availableScreens() []screenTab { + if !m.experimentalAnalytics { + return []screenTab{screenDashboard} + } + return []screenTab{screenDashboard, screenAnalytics} +} + +func (m Model) nextScreen(step int) screenTab { + screens := m.availableScreens() + if len(screens) == 0 { + return screenDashboard + } + + idx := 0 + for i, screen := range screens { + if screen == m.screen { + idx = i + break + } + } + + next := (idx + step) % len(screens) + if next < 0 { + next += len(screens) + } + return screens[next] +} + +func (m Model) handleListKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + ids := m.filteredIDs() + pageStep := m.listPageStep() + switch msg.String() { + case "q", "ctrl+c": + return m, tea.Quit + case "up", "k": + if m.cursor > 0 { + m.cursor-- + m.detailOffset = 0 + m.detailTab = 0 + m.tileOffset = 0 + } + case "down", "j": + if m.cursor < len(ids)-1 { + m.cursor++ + m.detailOffset = 0 + m.detailTab = 0 + m.tileOffset = 0 + } + case "pgdown", "ctrl+d": + if len(ids) > 0 { + m.cursor = clamp(m.cursor+pageStep, 0, len(ids)-1) + } + case "pgup", "ctrl+u": + if len(ids) > 0 { + m.cursor = clamp(m.cursor-pageStep, 0, len(ids)-1) + } + case "enter", "right", "l": + m.mode = modeDetail + m.detailOffset = 0 + case "/": + m.filter.active = true + m.filter.text = "" + case "r": + m = m.requestRefresh() + } + return m, nil +} + +func (m Model) handleDetailKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + switch msg.String() { + case "q", "ctrl+c": + return m, tea.Quit + case "esc", "left", "h", "backspace": + m.mode = modeList + case "up", "k": + if m.detailOffset > 0 { + m.detailOffset-- + } + case "down", "j": + m.detailOffset++ + case "g": + m.detailOffset = 0 + case "G": + m.detailOffset = 9999 + case "[": + if m.detailTab > 0 { + m.detailTab-- + m.detailOffset = 0 + } + case "]": + m.detailTab++ + m.detailOffset = 0 + case "1", "2", "3", "4", "5", "6", "7", "8", "9": + m.detailTab = int(msg.String()[0] - '1') + m.detailOffset = 0 + } + return m, nil +} + +func (m Model) handleFilterKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + switch msg.String() { + case "enter": + m.filter.active = false + m.cursor = 0 + m.tileOffset = 0 + case "esc": + m.filter.text = "" + m.filter.active = false + m.cursor = 0 + m.tileOffset = 0 + case "backspace": + if len(m.filter.text) > 0 { + m.filter.text = m.filter.text[:len(m.filter.text)-1] + } + default: + if len(msg.String()) == 1 { + m.filter.text += msg.String() + } + } + return m, nil +} + +func (m Model) handleTilesKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + ids := m.filteredIDs() + cols := m.tileCols() + scrollModeWidget := m.shouldUseWidgetScroll() + switch msg.String() { + case "q", "ctrl+c": + return m, tea.Quit + case "up", "k": + if m.cursor >= cols { + m.cursor -= cols + m.tileOffset = 0 + } + case "down", "j": + if m.cursor+cols < len(ids) { + m.cursor += cols + m.tileOffset = 0 + } + case "left", "h": + if m.cursor > 0 { + m.cursor-- + m.tileOffset = 0 + } + case "right", "l": + if m.cursor < len(ids)-1 { + m.cursor++ + m.tileOffset = 0 + } + case "pgdown", "ctrl+d": + if scrollModeWidget { + m.tileOffset += m.widgetScrollStep() + } else { + m.tileOffset += m.tileScrollStep() + } + case "pgup", "ctrl+u": + if scrollModeWidget { + m.tileOffset -= m.widgetScrollStep() + } else { + m.tileOffset -= m.tileScrollStep() + } + if m.tileOffset < 0 { + m.tileOffset = 0 + } + case "ctrl+o": + if id := m.selectedTileID(ids); id != "" { + m.expandedModelMixTiles[id] = !m.expandedModelMixTiles[id] + } + case "home": + m.tileOffset = 0 + case "end": + m.tileOffset = 9999 + case "enter": + m.mode = modeDetail + m.detailOffset = 0 + case "/": + m.filter.active = true + m.filter.text = "" + case "esc": + if m.filter.text != "" { + m.filter.text = "" + m.cursor = 0 + m.tileOffset = 0 + } + case "r": + m = m.requestRefresh() + } + return m, nil +} From 3652539063e8245bfb017180b6fdcc956941ca49 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 14:04:59 +0100 Subject: [PATCH 12/32] refactor: finish cursor and openrouter provider splits --- internal/providers/cursor/api_projection.go | 415 ++++ internal/providers/cursor/cursor.go | 1761 ----------------- internal/providers/cursor/fetch.go | 121 ++ internal/providers/cursor/runtime.go | 86 + internal/providers/cursor/state_projection.go | 452 +++++ .../providers/cursor/tracking_projection.go | 461 +++++ internal/providers/openrouter/analytics.go | 729 +++++++ internal/providers/openrouter/openrouter.go | 1374 ------------- .../openrouter/snapshot_projection.go | 626 ++++++ 9 files changed, 2890 insertions(+), 3135 deletions(-) create mode 100644 internal/providers/cursor/api_projection.go create mode 100644 internal/providers/cursor/fetch.go create mode 100644 internal/providers/cursor/runtime.go create mode 100644 internal/providers/cursor/state_projection.go create mode 100644 internal/providers/cursor/tracking_projection.go create mode 100644 internal/providers/openrouter/analytics.go create mode 100644 internal/providers/openrouter/snapshot_projection.go diff --git a/internal/providers/cursor/api_projection.go b/internal/providers/cursor/api_projection.go new file mode 100644 index 0000000..4e3fa1e --- /dev/null +++ b/internal/providers/cursor/api_projection.go @@ -0,0 +1,415 @@ +package cursor + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +func (p *Provider) fetchFromAPI(ctx context.Context, token string, snap *core.UsageSnapshot) error { + var ( + hasPeriodUsage bool + periodUsage currentPeriodUsageResp + pu planUsage + su spendLimitUsage + totalSpendDollars, limitDollars float64 + ) + if err := p.callDashboardAPI(ctx, token, "GetCurrentPeriodUsage", &periodUsage); err != nil { + log.Printf("[cursor] GetCurrentPeriodUsage failed (continuing with other endpoints): %v", err) + snap.Raw["period_usage_error"] = err.Error() + } else { + hasPeriodUsage = true + pu = periodUsage.PlanUsage + su = periodUsage.SpendLimitUsage + totalSpendDollars = pu.TotalSpend / 100.0 + includedDollars := pu.IncludedSpend / 100.0 + limitDollars = pu.Limit / 100.0 + bonusDollars := pu.BonusSpend / 100.0 + + snap.Metrics["plan_spend"] = core.Metric{Used: &totalSpendDollars, Limit: &limitDollars, Unit: "USD", Window: "billing-cycle"} + snap.Metrics["plan_included"] = core.Metric{Used: &includedDollars, Unit: "USD", Window: "billing-cycle"} + snap.Metrics["plan_bonus"] = core.Metric{Used: &bonusDollars, Unit: "USD", Window: "billing-cycle"} + + totalPctUsed := pu.TotalPercentUsed + totalPctRemaining := 100.0 - totalPctUsed + hundredPct := 100.0 + snap.Metrics["plan_percent_used"] = core.Metric{ + Used: &totalPctUsed, + Remaining: &totalPctRemaining, + Limit: &hundredPct, + Unit: "%", + Window: "billing-cycle", + } + autoPctUsed := pu.AutoPercentUsed + autoPctRemaining := 100.0 - autoPctUsed + snap.Metrics["plan_auto_percent_used"] = core.Metric{ + Used: &autoPctUsed, + Remaining: &autoPctRemaining, + Limit: &hundredPct, + Unit: "%", + Window: "billing-cycle", + } + apiPctUsed := pu.APIPercentUsed + apiPctRemaining := 100.0 - apiPctUsed + snap.Metrics["plan_api_percent_used"] = core.Metric{ + Used: &apiPctUsed, + Remaining: &apiPctRemaining, + Limit: &hundredPct, + Unit: "%", + Window: "billing-cycle", + } + + if su.PooledLimit > 0 { + pooledLimitDollars := su.PooledLimit / 100.0 + pooledUsedDollars := su.PooledUsed / 100.0 + pooledRemainingDollars := su.PooledRemaining / 100.0 + individualDollars := su.IndividualUsed / 100.0 + + snap.Metrics["spend_limit"] = core.Metric{ + Limit: &pooledLimitDollars, + Used: &pooledUsedDollars, + Remaining: &pooledRemainingDollars, + Unit: "USD", + Window: "billing-cycle", + } + snap.Metrics["individual_spend"] = core.Metric{Used: &individualDollars, Unit: "USD", Window: "billing-cycle"} + + teamTotalUsedDollars := pooledUsedDollars + snap.Metrics["team_budget"] = core.Metric{Limit: &pooledLimitDollars, Used: &teamTotalUsedDollars, Unit: "USD", Window: "billing-cycle"} + selfSpend := individualDollars + snap.Metrics["team_budget_self"] = core.Metric{Used: &selfSpend, Unit: "USD", Window: "billing-cycle"} + othersSpend := pooledUsedDollars - individualDollars + if othersSpend < 0 { + othersSpend = 0 + } + snap.Metrics["team_budget_others"] = core.Metric{Used: &othersSpend, Unit: "USD", Window: "billing-cycle"} + + snap.Raw["spend_limit_type"] = su.LimitType + } + + snap.Raw["display_message"] = periodUsage.DisplayMessage + snap.Raw["display_threshold"] = strconv.FormatFloat(periodUsage.DisplayThreshold, 'f', -1, 64) + snap.Raw["billing_cycle_start"] = formatTimestamp(periodUsage.BillingCycleStart) + snap.Raw["billing_cycle_end"] = formatTimestamp(periodUsage.BillingCycleEnd) + + cycleStart := shared.FlexParseTime(periodUsage.BillingCycleStart) + cycleEnd := shared.FlexParseTime(periodUsage.BillingCycleEnd) + if !cycleEnd.IsZero() { + snap.Resets["billing_cycle_end"] = cycleEnd + } + if !cycleStart.IsZero() && !cycleEnd.IsZero() && cycleEnd.After(cycleStart) { + totalDuration := cycleEnd.Sub(cycleStart).Seconds() + elapsed := time.Since(cycleStart).Seconds() + if elapsed < 0 { + elapsed = 0 + } + if elapsed > totalDuration { + elapsed = totalDuration + } + cyclePct := (elapsed / totalDuration) * 100 + remaining := 100.0 - cyclePct + hundred := 100.0 + snap.Metrics["billing_cycle_progress"] = core.Metric{ + Used: &cyclePct, + Remaining: &remaining, + Limit: &hundred, + Unit: "%", + Window: "billing-cycle", + } + daysRemaining := cycleEnd.Sub(p.now()).Hours() / 24 + if daysRemaining < 0 { + daysRemaining = 0 + } + snap.Raw["billing_cycle_days_remaining"] = fmt.Sprintf("%.0f", daysRemaining) + totalDays := totalDuration / 86400 + snap.Raw["billing_cycle_total_days"] = fmt.Sprintf("%.0f", totalDays) + } + + if su.PooledLimit > 0 && su.PooledRemaining > 0 { + spendPctUsed := (su.PooledUsed / su.PooledLimit) * 100 + if spendPctUsed >= 100 { + snap.Status = core.StatusLimited + } else if spendPctUsed >= 80 { + snap.Status = core.StatusNearLimit + } + } else if pu.TotalPercentUsed >= 100 { + snap.Status = core.StatusLimited + } else if pu.TotalPercentUsed >= 80 { + snap.Status = core.StatusNearLimit + } + + snap.Metrics["plan_total_spend_usd"] = core.Metric{Used: &totalSpendDollars, Limit: &limitDollars, Unit: "USD", Window: "billing-cycle"} + if su.PooledLimit > 0 { + pooledLimitDollars := su.PooledLimit / 100.0 + snap.Metrics["plan_limit_usd"] = core.Metric{Limit: &pooledLimitDollars, Unit: "USD", Window: "billing-cycle"} + } else { + snap.Metrics["plan_limit_usd"] = core.Metric{Limit: &limitDollars, Unit: "USD", Window: "billing-cycle"} + } + } + + var planInfo planInfoResp + if err := p.callDashboardAPI(ctx, token, "GetPlanInfo", &planInfo); err == nil { + snap.Raw["plan_name"] = planInfo.PlanInfo.PlanName + snap.Raw["plan_price"] = planInfo.PlanInfo.Price + snap.Raw["plan_billing_cycle_end"] = formatTimestamp(planInfo.PlanInfo.BillingCycleEnd) + if planInfo.PlanInfo.IncludedAmountCents > 0 { + snap.Raw["plan_included_amount_cents"] = strconv.FormatFloat(planInfo.PlanInfo.IncludedAmountCents, 'f', -1, 64) + planIncludedAmountUSD := planInfo.PlanInfo.IncludedAmountCents / 100.0 + snap.Metrics["plan_included_amount"] = core.Metric{Used: &planIncludedAmountUSD, Unit: "USD", Window: "billing-cycle"} + + if hasPeriodUsage && limitDollars <= 0 && su.PooledLimit <= 0 { + effectiveLimit := planIncludedAmountUSD + snap.Metrics["plan_spend"] = core.Metric{Used: &totalSpendDollars, Limit: &effectiveLimit, Unit: "USD", Window: "billing-cycle"} + } + } + } + + effectivePlanLimitUSD := limitDollars + if effectivePlanLimitUSD <= 0 && su.PooledLimit > 0 { + effectivePlanLimitUSD = su.PooledLimit / 100.0 + } + if effectivePlanLimitUSD <= 0 && planInfo.PlanInfo.IncludedAmountCents > 0 { + effectivePlanLimitUSD = planInfo.PlanInfo.IncludedAmountCents / 100.0 + } + + var aggUsage aggregatedUsageResp + aggErr := p.callDashboardAPI(ctx, token, "GetAggregatedUsageEvents", &aggUsage) + aggApplied := false + if aggErr == nil { + aggApplied = applyModelAggregations(snap, aggUsage.Aggregations) + if aggApplied { + p.storeModelAggregationCache(snap.AccountID, snap.Raw["billing_cycle_start"], snap.Raw["billing_cycle_end"], aggUsage.Aggregations, effectivePlanLimitUSD) + } + applyAggregationTotals(snap, &aggUsage) + } + if !aggApplied && p.applyCachedModelAggregations(snap.AccountID, snap.Raw["billing_cycle_start"], snap.Raw["billing_cycle_end"], snap) { + if aggErr != nil { + log.Printf("[cursor] using cached model aggregation after API error: %v", aggErr) + } else { + log.Printf("[cursor] using cached model aggregation after empty API aggregation response") + } + } + + if !hasPeriodUsage { + p.applyCachedBillingMetrics(snap.AccountID, snap) + if _, ok := snap.Metrics["plan_spend"]; !ok { + if m, ok := snap.Metrics["billing_total_cost"]; ok && m.Used != nil && *m.Used > 0 { + costUSD := *m.Used + if effectivePlanLimitUSD > 0 { + snap.Metrics["plan_spend"] = core.Metric{ + Used: &costUSD, + Limit: core.Float64Ptr(effectivePlanLimitUSD), + Unit: "USD", + Window: "billing-cycle", + } + } + } + } + } + + var hardLimit hardLimitResp + if err := p.callDashboardAPI(ctx, token, "GetHardLimit", &hardLimit); err == nil { + if hardLimit.NoUsageBasedAllowed { + snap.Raw["usage_based_billing"] = "disabled" + } else { + snap.Raw["usage_based_billing"] = "enabled" + } + } + + var profile stripeProfileResp + if err := p.callRESTAPI(ctx, token, "/auth/full_stripe_profile", &profile); err == nil { + snap.Raw["membership_type"] = profile.MembershipType + snap.Raw["is_team_member"] = strconv.FormatBool(profile.IsTeamMember) + snap.Raw["team_membership"] = profile.TeamMembershipType + snap.Raw["individual_membership"] = profile.IndividualMembershipType + if profile.IsTeamMember { + snap.Raw["team_id"] = fmt.Sprintf("%.0f", profile.TeamID) + } + } + + var limitPolicy usageLimitPolicyResp + if err := p.callDashboardAPI(ctx, token, "GetUsageLimitPolicyStatus", &limitPolicy); err == nil { + snap.Raw["can_configure_spend_limit"] = strconv.FormatBool(limitPolicy.CanConfigureSpendLimit) + snap.Raw["limit_policy_type"] = limitPolicy.LimitType + } + + if profile.IsTeamMember && profile.TeamID > 0 { + teamIDStr := fmt.Sprintf("%.0f", profile.TeamID) + body := []byte(fmt.Sprintf(`{"teamId":"%s"}`, teamIDStr)) + var teamMembers teamMembersResp + if err := p.callDashboardAPIWithBody(ctx, token, "GetTeamMembers", body, &teamMembers); err == nil { + var activeCount int + var memberNames []string + var ownerCount int + for _, member := range teamMembers.TeamMembers { + if member.IsRemoved { + continue + } + activeCount++ + memberNames = append(memberNames, member.Name) + if strings.Contains(member.Role, "OWNER") { + ownerCount++ + } + } + teamSize := float64(activeCount) + snap.Metrics["team_size"] = core.Metric{Used: &teamSize, Unit: "members", Window: "current"} + snap.Raw["team_members"] = strings.Join(memberNames, ", ") + snap.Raw["team_size"] = strconv.Itoa(activeCount) + if ownerCount > 0 { + ownerV := float64(ownerCount) + snap.Metrics["team_owners"] = core.Metric{Used: &ownerV, Unit: "owners", Window: "current"} + } + } + } + + planName := snap.Raw["plan_name"] + if su.PooledLimit > 0 { + pooledLimitDollars := su.PooledLimit / 100.0 + pooledUsedDollars := su.PooledUsed / 100.0 + pooledRemainingDollars := su.PooledRemaining / 100.0 + snap.Message = fmt.Sprintf("%s — $%.0f / $%.0f team spend ($%.0f remaining)", planName, pooledUsedDollars, pooledLimitDollars, pooledRemainingDollars) + } else if limitDollars > 0 { + snap.Message = fmt.Sprintf("%s — $%.2f / $%.0f plan spend", planName, totalSpendDollars, limitDollars) + } else if planName != "" { + snap.Message = fmt.Sprintf("%s — %s", planName, periodUsage.DisplayMessage) + } + + p.storeBillingMetricsCache(snap.AccountID, snap) + + _, hasPlanSpend := snap.Metrics["plan_spend"] + _, hasSpendLimit := snap.Metrics["spend_limit"] + _, hasBillingTotal := snap.Metrics["billing_total_cost"] + if !hasPlanSpend && !hasSpendLimit && !hasBillingTotal && !hasPeriodUsage && !aggApplied { + return fmt.Errorf("all billing API endpoints failed") + } + + return nil +} + +func applyModelAggregations(snap *core.UsageSnapshot, aggregations []modelAggregation) bool { + if len(aggregations) == 0 { + return false + } + if snap.Metrics == nil { + snap.Metrics = make(map[string]core.Metric) + } + if snap.Raw == nil { + snap.Raw = make(map[string]string) + } + + var applied bool + for _, agg := range aggregations { + modelIntent := strings.TrimSpace(agg.ModelIntent) + if modelIntent == "" { + continue + } + rec := core.ModelUsageRecord{ + RawModelID: modelIntent, + RawSource: "api", + Window: "billing-cycle", + } + + inputTokens := strings.TrimSpace(agg.InputTokens) + outputTokens := strings.TrimSpace(agg.OutputTokens) + cacheWriteTokens := strings.TrimSpace(agg.CacheWriteTokens) + cacheReadTokens := strings.TrimSpace(agg.CacheReadTokens) + + if agg.TotalCents > 0 { + costDollars := agg.TotalCents / 100.0 + snap.Metrics[fmt.Sprintf("model_%s_cost", modelIntent)] = core.Metric{Used: &costDollars, Unit: "USD", Window: "billing-cycle"} + rec.CostUSD = core.Float64Ptr(costDollars) + } + if inputTokens != "" { + snap.Raw[fmt.Sprintf("model_%s_input_tokens", modelIntent)] = inputTokens + } + if outputTokens != "" { + snap.Raw[fmt.Sprintf("model_%s_output_tokens", modelIntent)] = outputTokens + } + if cacheWriteTokens != "" { + snap.Raw[fmt.Sprintf("model_%s_cache_write_tokens", modelIntent)] = cacheWriteTokens + } + if cacheReadTokens != "" { + snap.Raw[fmt.Sprintf("model_%s_cache_read_tokens", modelIntent)] = cacheReadTokens + } + if agg.Tier > 0 { + snap.Raw[fmt.Sprintf("model_%s_tier", modelIntent)] = strconv.Itoa(agg.Tier) + } + + if parsed, ok := parseModelTokenCount(inputTokens); ok { + v := parsed + snap.Metrics[fmt.Sprintf("model_%s_input_tokens", modelIntent)] = core.Metric{Used: &v, Unit: "tokens", Window: "billing-cycle"} + rec.InputTokens = core.Float64Ptr(parsed) + } + if parsed, ok := parseModelTokenCount(outputTokens); ok { + v := parsed + snap.Metrics[fmt.Sprintf("model_%s_output_tokens", modelIntent)] = core.Metric{Used: &v, Unit: "tokens", Window: "billing-cycle"} + rec.OutputTokens = core.Float64Ptr(parsed) + } + cacheWrite := float64(0) + cacheRead := float64(0) + hasCacheWrite := false + hasCacheRead := false + if parsed, ok := parseModelTokenCount(cacheWriteTokens); ok { + cacheWrite = parsed + hasCacheWrite = true + v := parsed + snap.Metrics[fmt.Sprintf("model_%s_cache_write_tokens", modelIntent)] = core.Metric{Used: &v, Unit: "tokens", Window: "billing-cycle"} + } + if parsed, ok := parseModelTokenCount(cacheReadTokens); ok { + cacheRead = parsed + hasCacheRead = true + v := parsed + snap.Metrics[fmt.Sprintf("model_%s_cache_read_tokens", modelIntent)] = core.Metric{Used: &v, Unit: "tokens", Window: "billing-cycle"} + } + if hasCacheWrite || hasCacheRead { + cached := cacheWrite + cacheRead + snap.Metrics[fmt.Sprintf("model_%s_cached_tokens", modelIntent)] = core.Metric{Used: &cached, Unit: "tokens", Window: "billing-cycle"} + rec.CachedTokens = core.Float64Ptr(cached) + } + + if agg.TotalCents > 0 || inputTokens != "" || outputTokens != "" || cacheWriteTokens != "" || cacheReadTokens != "" { + applied = true + snap.AppendModelUsage(rec) + } + } + return applied +} + +func applyAggregationTotals(snap *core.UsageSnapshot, agg *aggregatedUsageResp) { + if agg.TotalCostCents > 0 { + totalCostUSD := agg.TotalCostCents / 100.0 + snap.Metrics["billing_total_cost"] = core.Metric{Used: &totalCostUSD, Unit: "USD", Window: "billing-cycle"} + } + if v, ok := parseModelTokenCount(agg.TotalInputTokens); ok { + snap.Metrics["billing_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "billing-cycle"} + } + if v, ok := parseModelTokenCount(agg.TotalOutputTokens); ok { + snap.Metrics["billing_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "billing-cycle"} + } + if cwv, cwOK := parseModelTokenCount(agg.TotalCacheWriteTokens); cwOK { + if crv, crOK := parseModelTokenCount(agg.TotalCacheReadTokens); crOK { + total := cwv + crv + snap.Metrics["billing_cached_tokens"] = core.Metric{Used: &total, Unit: "tokens", Window: "billing-cycle"} + } + } +} + +func parseModelTokenCount(raw string) (float64, bool) { + cleaned := strings.TrimSpace(raw) + if cleaned == "" { + return 0, false + } + cleaned = strings.ReplaceAll(cleaned, ",", "") + cleaned = strings.ReplaceAll(cleaned, "_", "") + v, err := strconv.ParseFloat(cleaned, 64) + if err != nil { + return 0, false + } + return v, true +} diff --git a/internal/providers/cursor/cursor.go b/internal/providers/cursor/cursor.go index d93179b..e0831a6 100644 --- a/internal/providers/cursor/cursor.go +++ b/internal/providers/cursor/cursor.go @@ -1,14 +1,6 @@ package cursor import ( - "context" - "database/sql" - "fmt" - "log" - "math" - "sort" - "strconv" - "strings" "sync" "time" @@ -16,8 +8,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" - "github.com/janekbaraniewski/openusage/internal/providers/shared" - "github.com/samber/lo" ) var cursorAPIBase = "https://api2.cursor.sh" @@ -172,1754 +162,3 @@ func (p *Provider) now() time.Time { } return time.Now() } - -func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.UsageSnapshot, error) { - if strings.TrimSpace(acct.Provider) == "" { - acct.Provider = p.ID() - } - snap := core.UsageSnapshot{ - ProviderID: p.ID(), - AccountID: acct.ID, - Timestamp: p.now(), - Status: core.StatusOK, - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - DailySeries: make(map[string][]core.TimePoint), - } - if acct.ExtraData != nil { - if email := strings.TrimSpace(acct.ExtraData["email"]); email != "" { - snap.Raw["account_email"] = email - } - if membership := strings.TrimSpace(acct.ExtraData["membership"]); membership != "" { - snap.Raw["membership_type"] = membership - } - } - - acct.NormalizeRuntimePaths() - trackingDBPath := acct.Path("tracking_db", "") - stateDBPath := acct.Path("state_db", "") - - // If the token was not persisted (json:"-"), try to extract it fresh - // from the Cursor state DB so daemon polls can access the API. - token := acct.Token - if token == "" && stateDBPath != "" { - token = extractTokenFromStateDB(stateDBPath) - } - - // Run API calls concurrently with local DB reads so heavy local queries - // don't consume the context timeout needed by the API. - type apiResult struct { - snap *core.UsageSnapshot - err error - } - apiCh := make(chan apiResult, 1) - if token != "" { - go func() { - apiSnap := core.UsageSnapshot{ - AccountID: acct.ID, - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - DailySeries: make(map[string][]core.TimePoint), - } - err := p.fetchFromAPI(ctx, token, &apiSnap) - apiCh <- apiResult{snap: &apiSnap, err: err} - }() - } else { - apiCh <- apiResult{err: fmt.Errorf("no token")} - } - - // Also resolve ExtraData from persisted fields if not present. - if acct.ExtraData == nil { - acct.ExtraData = make(map[string]string) - } - if acct.ExtraData["tracking_db"] == "" && trackingDBPath != "" { - acct.ExtraData["tracking_db"] = trackingDBPath - } - if acct.ExtraData["state_db"] == "" && stateDBPath != "" { - acct.ExtraData["state_db"] = stateDBPath - } - - var hasLocalData bool - if trackingDBPath != "" { - before := cursorSnapshotDataSignature(&snap) - if err := p.readTrackingDB(ctx, trackingDBPath, &snap); err != nil { - log.Printf("[cursor] tracking DB error: %v", err) - snap.Raw["tracking_db_error"] = err.Error() - } else if cursorSnapshotDataSignature(&snap) != before { - hasLocalData = true - } - } - if stateDBPath != "" { - before := cursorSnapshotDataSignature(&snap) - if err := p.readStateDB(ctx, stateDBPath, &snap); err != nil { - log.Printf("[cursor] state DB error: %v", err) - snap.Raw["state_db_error"] = err.Error() - } else if cursorSnapshotDataSignature(&snap) != before { - hasLocalData = true - } - } - - // Collect API results. - ar := <-apiCh - hasAPIData := false - if ar.err == nil && ar.snap != nil { - mergeAPIIntoSnapshot(&snap, ar.snap) - hasAPIData = true - } else if ar.err != nil && token != "" { - log.Printf("[cursor] API fetch failed, falling back to local data: %v", ar.err) - snap.Raw["api_error"] = ar.err.Error() - } - - if !hasAPIData && !hasLocalData { - snap.Status = core.StatusError - snap.Message = "No Cursor tracking data accessible (no API token and no local DBs)" - return snap, nil - } - - if !hasAPIData { - p.applyCachedModelAggregations(acct.ID, "", "", &snap) - p.applyCachedBillingMetrics(acct.ID, &snap) - p.buildLocalOnlyMessage(&snap) - } - - // Final safety net: ensure credit gauges exist from local data when - // API didn't provide them (or API is completely unavailable). - p.ensureCreditGauges(acct.ID, &snap) - - return snap, nil -} - -func mergeAPIIntoSnapshot(dst, src *core.UsageSnapshot) { - for k, v := range src.Metrics { - dst.Metrics[k] = v - } - for k, v := range src.Resets { - dst.Resets[k] = v - } - for k, v := range src.Raw { - dst.Raw[k] = v - } - for k, v := range src.DailySeries { - dst.DailySeries[k] = v - } - dst.ModelUsage = append(dst.ModelUsage, src.ModelUsage...) - if src.Status != "" { - dst.Status = src.Status - } - if src.Message != "" { - dst.Message = src.Message - } -} - -type cursorSnapshotSignature struct { - metrics int - resets int - raw int - dailySeries int - modelUsage int -} - -func cursorSnapshotDataSignature(snap *core.UsageSnapshot) cursorSnapshotSignature { - if snap == nil { - return cursorSnapshotSignature{} - } - return cursorSnapshotSignature{ - metrics: len(snap.Metrics), - resets: len(snap.Resets), - raw: len(snap.Raw), - dailySeries: len(snap.DailySeries), - modelUsage: len(snap.ModelUsage), - } -} - -func (p *Provider) buildLocalOnlyMessage(snap *core.UsageSnapshot) { - var parts []string - - if m, ok := snap.Metrics["composer_cost"]; ok && m.Used != nil && *m.Used > 0 { - parts = append(parts, fmt.Sprintf("$%.2f session cost", *m.Used)) - } - if m, ok := snap.Metrics["total_ai_requests"]; ok && m.Used != nil && *m.Used > 0 { - parts = append(parts, fmt.Sprintf("%.0f requests", *m.Used)) - } - if m, ok := snap.Metrics["composer_sessions"]; ok && m.Used != nil && *m.Used > 0 { - parts = append(parts, fmt.Sprintf("%.0f sessions", *m.Used)) - } - - if len(parts) > 0 { - snap.Message = strings.Join(parts, " · ") + " (API unavailable)" - } else { - snap.Message = "Local Cursor IDE usage tracking (API unavailable)" - } -} - -func (p *Provider) fetchFromAPI(ctx context.Context, token string, snap *core.UsageSnapshot) error { - // All API endpoints are called independently so a single endpoint failure - // doesn't lose data from the others. - var ( - hasPeriodUsage bool - periodUsage currentPeriodUsageResp - pu planUsage - su spendLimitUsage - totalSpendDollars, limitDollars float64 - ) - if err := p.callDashboardAPI(ctx, token, "GetCurrentPeriodUsage", &periodUsage); err != nil { - log.Printf("[cursor] GetCurrentPeriodUsage failed (continuing with other endpoints): %v", err) - snap.Raw["period_usage_error"] = err.Error() - } else { - hasPeriodUsage = true - pu = periodUsage.PlanUsage - su = periodUsage.SpendLimitUsage - totalSpendDollars = pu.TotalSpend / 100.0 - includedDollars := pu.IncludedSpend / 100.0 - limitDollars = pu.Limit / 100.0 - bonusDollars := pu.BonusSpend / 100.0 - - snap.Metrics["plan_spend"] = core.Metric{ - Used: &totalSpendDollars, - Limit: &limitDollars, - Unit: "USD", - Window: "billing-cycle", - } - snap.Metrics["plan_included"] = core.Metric{ - Used: &includedDollars, - Unit: "USD", - Window: "billing-cycle", - } - snap.Metrics["plan_bonus"] = core.Metric{ - Used: &bonusDollars, - Unit: "USD", - Window: "billing-cycle", - } - - totalPctUsed := pu.TotalPercentUsed - totalPctRemaining := 100.0 - totalPctUsed - hundredPct := 100.0 - snap.Metrics["plan_percent_used"] = core.Metric{ - Used: &totalPctUsed, - Remaining: &totalPctRemaining, - Limit: &hundredPct, - Unit: "%", - Window: "billing-cycle", - } - autoPctUsed := pu.AutoPercentUsed - autoPctRemaining := 100.0 - autoPctUsed - snap.Metrics["plan_auto_percent_used"] = core.Metric{ - Used: &autoPctUsed, - Remaining: &autoPctRemaining, - Limit: &hundredPct, - Unit: "%", - Window: "billing-cycle", - } - apiPctUsed := pu.APIPercentUsed - apiPctRemaining := 100.0 - apiPctUsed - snap.Metrics["plan_api_percent_used"] = core.Metric{ - Used: &apiPctUsed, - Remaining: &apiPctRemaining, - Limit: &hundredPct, - Unit: "%", - Window: "billing-cycle", - } - - if su.PooledLimit > 0 { - pooledLimitDollars := su.PooledLimit / 100.0 - pooledUsedDollars := su.PooledUsed / 100.0 - pooledRemainingDollars := su.PooledRemaining / 100.0 - individualDollars := su.IndividualUsed / 100.0 - - snap.Metrics["spend_limit"] = core.Metric{ - Limit: &pooledLimitDollars, - Used: &pooledUsedDollars, - Remaining: &pooledRemainingDollars, - Unit: "USD", - Window: "billing-cycle", - } - snap.Metrics["individual_spend"] = core.Metric{ - Used: &individualDollars, - Unit: "USD", - Window: "billing-cycle", - } - - // Stacked gauge: team_budget shows self vs others within the pooled limit. - teamTotalUsedDollars := pooledUsedDollars - snap.Metrics["team_budget"] = core.Metric{ - Limit: &pooledLimitDollars, - Used: &teamTotalUsedDollars, - Unit: "USD", - Window: "billing-cycle", - } - selfSpend := individualDollars - snap.Metrics["team_budget_self"] = core.Metric{ - Used: &selfSpend, - Unit: "USD", - Window: "billing-cycle", - } - othersSpend := pooledUsedDollars - individualDollars - if othersSpend < 0 { - othersSpend = 0 - } - snap.Metrics["team_budget_others"] = core.Metric{ - Used: &othersSpend, - Unit: "USD", - Window: "billing-cycle", - } - - snap.Raw["spend_limit_type"] = su.LimitType - } - - snap.Raw["display_message"] = periodUsage.DisplayMessage - snap.Raw["display_threshold"] = strconv.FormatFloat(periodUsage.DisplayThreshold, 'f', -1, 64) - snap.Raw["billing_cycle_start"] = formatTimestamp(periodUsage.BillingCycleStart) - snap.Raw["billing_cycle_end"] = formatTimestamp(periodUsage.BillingCycleEnd) - - cycleStart := shared.FlexParseTime(periodUsage.BillingCycleStart) - cycleEnd := shared.FlexParseTime(periodUsage.BillingCycleEnd) - if !cycleEnd.IsZero() { - snap.Resets["billing_cycle_end"] = cycleEnd - } - if !cycleStart.IsZero() && !cycleEnd.IsZero() && cycleEnd.After(cycleStart) { - totalDuration := cycleEnd.Sub(cycleStart).Seconds() - elapsed := time.Since(cycleStart).Seconds() - if elapsed < 0 { - elapsed = 0 - } - if elapsed > totalDuration { - elapsed = totalDuration - } - cyclePct := (elapsed / totalDuration) * 100 - remaining := 100.0 - cyclePct - hundred := 100.0 - snap.Metrics["billing_cycle_progress"] = core.Metric{ - Used: &cyclePct, - Remaining: &remaining, - Limit: &hundred, - Unit: "%", - Window: "billing-cycle", - } - daysRemaining := cycleEnd.Sub(p.now()).Hours() / 24 - if daysRemaining < 0 { - daysRemaining = 0 - } - snap.Raw["billing_cycle_days_remaining"] = fmt.Sprintf("%.0f", daysRemaining) - totalDays := totalDuration / 86400 - snap.Raw["billing_cycle_total_days"] = fmt.Sprintf("%.0f", totalDays) - } - - if su.PooledLimit > 0 && su.PooledRemaining > 0 { - spendPctUsed := (su.PooledUsed / su.PooledLimit) * 100 - if spendPctUsed >= 100 { - snap.Status = core.StatusLimited - } else if spendPctUsed >= 80 { - snap.Status = core.StatusNearLimit - } - } else if pu.TotalPercentUsed >= 100 { - snap.Status = core.StatusLimited - } else if pu.TotalPercentUsed >= 80 { - snap.Status = core.StatusNearLimit - } - - snap.Metrics["plan_total_spend_usd"] = core.Metric{ - Used: &totalSpendDollars, - Limit: &limitDollars, - Unit: "USD", - Window: "billing-cycle", - } - if su.PooledLimit > 0 { - pooledLimitDollars := su.PooledLimit / 100.0 - snap.Metrics["plan_limit_usd"] = core.Metric{ - Limit: &pooledLimitDollars, - Unit: "USD", - Window: "billing-cycle", - } - } else { - snap.Metrics["plan_limit_usd"] = core.Metric{ - Limit: &limitDollars, - Unit: "USD", - Window: "billing-cycle", - } - } - } - - var planInfo planInfoResp - if err := p.callDashboardAPI(ctx, token, "GetPlanInfo", &planInfo); err == nil { - snap.Raw["plan_name"] = planInfo.PlanInfo.PlanName - snap.Raw["plan_price"] = planInfo.PlanInfo.Price - snap.Raw["plan_billing_cycle_end"] = formatTimestamp(planInfo.PlanInfo.BillingCycleEnd) - if planInfo.PlanInfo.IncludedAmountCents > 0 { - snap.Raw["plan_included_amount_cents"] = strconv.FormatFloat(planInfo.PlanInfo.IncludedAmountCents, 'f', -1, 64) - planIncludedAmountUSD := planInfo.PlanInfo.IncludedAmountCents / 100.0 - snap.Metrics["plan_included_amount"] = core.Metric{ - Used: &planIncludedAmountUSD, - Unit: "USD", - Window: "billing-cycle", - } - - if hasPeriodUsage && limitDollars <= 0 && su.PooledLimit <= 0 { - effectiveLimit := planIncludedAmountUSD - snap.Metrics["plan_spend"] = core.Metric{ - Used: &totalSpendDollars, - Limit: &effectiveLimit, - Unit: "USD", - Window: "billing-cycle", - } - } - } - } - - effectivePlanLimitUSD := limitDollars - if effectivePlanLimitUSD <= 0 && su.PooledLimit > 0 { - effectivePlanLimitUSD = su.PooledLimit / 100.0 - } - if effectivePlanLimitUSD <= 0 && planInfo.PlanInfo.IncludedAmountCents > 0 { - effectivePlanLimitUSD = planInfo.PlanInfo.IncludedAmountCents / 100.0 - } - - var aggUsage aggregatedUsageResp - aggErr := p.callDashboardAPI(ctx, token, "GetAggregatedUsageEvents", &aggUsage) - aggApplied := false - if aggErr == nil { - aggApplied = applyModelAggregations(snap, aggUsage.Aggregations) - if aggApplied { - p.storeModelAggregationCache(snap.AccountID, snap.Raw["billing_cycle_start"], snap.Raw["billing_cycle_end"], aggUsage.Aggregations, effectivePlanLimitUSD) - } - applyAggregationTotals(snap, &aggUsage) - } - if !aggApplied && p.applyCachedModelAggregations(snap.AccountID, snap.Raw["billing_cycle_start"], snap.Raw["billing_cycle_end"], snap) { - if aggErr != nil { - log.Printf("[cursor] using cached model aggregation after API error: %v", aggErr) - } else { - log.Printf("[cursor] using cached model aggregation after empty API aggregation response") - } - } - - // If GetCurrentPeriodUsage failed but aggregation succeeded, build a - // plan_spend gauge from billing_total_cost so credits are visible. - if !hasPeriodUsage { - p.applyCachedBillingMetrics(snap.AccountID, snap) - if _, ok := snap.Metrics["plan_spend"]; !ok { - if m, ok := snap.Metrics["billing_total_cost"]; ok && m.Used != nil && *m.Used > 0 { - costUSD := *m.Used - if effectivePlanLimitUSD > 0 { - snap.Metrics["plan_spend"] = core.Metric{ - Used: &costUSD, - Limit: core.Float64Ptr(effectivePlanLimitUSD), - Unit: "USD", - Window: "billing-cycle", - } - } - } - } - } - - var hardLimit hardLimitResp - if err := p.callDashboardAPI(ctx, token, "GetHardLimit", &hardLimit); err == nil { - if hardLimit.NoUsageBasedAllowed { - snap.Raw["usage_based_billing"] = "disabled" - } else { - snap.Raw["usage_based_billing"] = "enabled" - } - } - - var profile stripeProfileResp - if err := p.callRESTAPI(ctx, token, "/auth/full_stripe_profile", &profile); err == nil { - snap.Raw["membership_type"] = profile.MembershipType - snap.Raw["is_team_member"] = strconv.FormatBool(profile.IsTeamMember) - snap.Raw["team_membership"] = profile.TeamMembershipType - snap.Raw["individual_membership"] = profile.IndividualMembershipType - if profile.IsTeamMember { - snap.Raw["team_id"] = fmt.Sprintf("%.0f", profile.TeamID) - } - } - - var limitPolicy usageLimitPolicyResp - if err := p.callDashboardAPI(ctx, token, "GetUsageLimitPolicyStatus", &limitPolicy); err == nil { - snap.Raw["can_configure_spend_limit"] = strconv.FormatBool(limitPolicy.CanConfigureSpendLimit) - snap.Raw["limit_policy_type"] = limitPolicy.LimitType - } - - // Fetch team members if user is on a team. - if profile.IsTeamMember && profile.TeamID > 0 { - teamIDStr := fmt.Sprintf("%.0f", profile.TeamID) - body := []byte(fmt.Sprintf(`{"teamId":"%s"}`, teamIDStr)) - var teamMembers teamMembersResp - if err := p.callDashboardAPIWithBody(ctx, token, "GetTeamMembers", body, &teamMembers); err == nil { - var activeCount int - var memberNames []string - var ownerCount int - for _, m := range teamMembers.TeamMembers { - if m.IsRemoved { - continue - } - activeCount++ - memberNames = append(memberNames, m.Name) - if strings.Contains(m.Role, "OWNER") { - ownerCount++ - } - } - teamSize := float64(activeCount) - snap.Metrics["team_size"] = core.Metric{Used: &teamSize, Unit: "members", Window: "current"} - snap.Raw["team_members"] = strings.Join(memberNames, ", ") - snap.Raw["team_size"] = strconv.Itoa(activeCount) - if ownerCount > 0 { - ownerV := float64(ownerCount) - snap.Metrics["team_owners"] = core.Metric{Used: &ownerV, Unit: "owners", Window: "current"} - } - } - } - - planName := snap.Raw["plan_name"] - if su.PooledLimit > 0 { - pooledLimitDollars := su.PooledLimit / 100.0 - pooledUsedDollars := su.PooledUsed / 100.0 - pooledRemainingDollars := su.PooledRemaining / 100.0 - snap.Message = fmt.Sprintf("%s — $%.0f / $%.0f team spend ($%.0f remaining)", - planName, pooledUsedDollars, pooledLimitDollars, pooledRemainingDollars) - } else if limitDollars > 0 { - snap.Message = fmt.Sprintf("%s — $%.2f / $%.0f plan spend", - planName, totalSpendDollars, limitDollars) - } else if planName != "" { - snap.Message = fmt.Sprintf("%s — %s", planName, periodUsage.DisplayMessage) - } - - // Cache billing metrics so credit gauges survive temporary API failures. - p.storeBillingMetricsCache(snap.AccountID, snap) - - // If none of the billing/aggregation endpoints yielded useful data, - // report an error so the caller knows API data is effectively absent. - _, hasPlanSpend := snap.Metrics["plan_spend"] - _, hasSpendLimit := snap.Metrics["spend_limit"] - _, hasBillingTotal := snap.Metrics["billing_total_cost"] - if !hasPlanSpend && !hasSpendLimit && !hasBillingTotal && !hasPeriodUsage && !aggApplied { - return fmt.Errorf("all billing API endpoints failed") - } - - return nil -} - -func applyModelAggregations(snap *core.UsageSnapshot, aggregations []modelAggregation) bool { - if len(aggregations) == 0 { - return false - } - if snap.Metrics == nil { - snap.Metrics = make(map[string]core.Metric) - } - if snap.Raw == nil { - snap.Raw = make(map[string]string) - } - - var applied bool - for _, agg := range aggregations { - modelIntent := strings.TrimSpace(agg.ModelIntent) - if modelIntent == "" { - continue - } - rec := core.ModelUsageRecord{ - RawModelID: modelIntent, - RawSource: "api", - Window: "billing-cycle", - } - - inputTokens := strings.TrimSpace(agg.InputTokens) - outputTokens := strings.TrimSpace(agg.OutputTokens) - cacheWriteTokens := strings.TrimSpace(agg.CacheWriteTokens) - cacheReadTokens := strings.TrimSpace(agg.CacheReadTokens) - - if agg.TotalCents > 0 { - costDollars := agg.TotalCents / 100.0 - snap.Metrics[fmt.Sprintf("model_%s_cost", modelIntent)] = core.Metric{ - Used: &costDollars, - Unit: "USD", - Window: "billing-cycle", - } - rec.CostUSD = core.Float64Ptr(costDollars) - } - if inputTokens != "" { - snap.Raw[fmt.Sprintf("model_%s_input_tokens", modelIntent)] = inputTokens - } - if outputTokens != "" { - snap.Raw[fmt.Sprintf("model_%s_output_tokens", modelIntent)] = outputTokens - } - if cacheWriteTokens != "" { - snap.Raw[fmt.Sprintf("model_%s_cache_write_tokens", modelIntent)] = cacheWriteTokens - } - if cacheReadTokens != "" { - snap.Raw[fmt.Sprintf("model_%s_cache_read_tokens", modelIntent)] = cacheReadTokens - } - if agg.Tier > 0 { - snap.Raw[fmt.Sprintf("model_%s_tier", modelIntent)] = strconv.Itoa(agg.Tier) - } - - if parsed, ok := parseModelTokenCount(inputTokens); ok { - v := parsed - snap.Metrics[fmt.Sprintf("model_%s_input_tokens", modelIntent)] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "billing-cycle", - } - rec.InputTokens = core.Float64Ptr(parsed) - } - if parsed, ok := parseModelTokenCount(outputTokens); ok { - v := parsed - snap.Metrics[fmt.Sprintf("model_%s_output_tokens", modelIntent)] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "billing-cycle", - } - rec.OutputTokens = core.Float64Ptr(parsed) - } - cacheWrite := float64(0) - cacheRead := float64(0) - hasCacheWrite := false - hasCacheRead := false - if parsed, ok := parseModelTokenCount(cacheWriteTokens); ok { - cacheWrite = parsed - hasCacheWrite = true - v := parsed - snap.Metrics[fmt.Sprintf("model_%s_cache_write_tokens", modelIntent)] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "billing-cycle", - } - } - if parsed, ok := parseModelTokenCount(cacheReadTokens); ok { - cacheRead = parsed - hasCacheRead = true - v := parsed - snap.Metrics[fmt.Sprintf("model_%s_cache_read_tokens", modelIntent)] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "billing-cycle", - } - } - if hasCacheWrite || hasCacheRead { - cached := cacheWrite + cacheRead - snap.Metrics[fmt.Sprintf("model_%s_cached_tokens", modelIntent)] = core.Metric{ - Used: &cached, - Unit: "tokens", - Window: "billing-cycle", - } - rec.CachedTokens = core.Float64Ptr(cached) - } - - if agg.TotalCents > 0 || inputTokens != "" || outputTokens != "" || cacheWriteTokens != "" || cacheReadTokens != "" { - applied = true - snap.AppendModelUsage(rec) - } - } - return applied -} - -func applyAggregationTotals(snap *core.UsageSnapshot, agg *aggregatedUsageResp) { - if agg.TotalCostCents > 0 { - totalCostUSD := agg.TotalCostCents / 100.0 - snap.Metrics["billing_total_cost"] = core.Metric{ - Used: &totalCostUSD, - Unit: "USD", - Window: "billing-cycle", - } - } - if v, ok := parseModelTokenCount(agg.TotalInputTokens); ok { - snap.Metrics["billing_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "billing-cycle"} - } - if v, ok := parseModelTokenCount(agg.TotalOutputTokens); ok { - snap.Metrics["billing_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "billing-cycle"} - } - if cwv, cwOK := parseModelTokenCount(agg.TotalCacheWriteTokens); cwOK { - if crv, crOK := parseModelTokenCount(agg.TotalCacheReadTokens); crOK { - total := cwv + crv - snap.Metrics["billing_cached_tokens"] = core.Metric{Used: &total, Unit: "tokens", Window: "billing-cycle"} - } - } -} - -func parseModelTokenCount(raw string) (float64, bool) { - cleaned := strings.TrimSpace(raw) - if cleaned == "" { - return 0, false - } - cleaned = strings.ReplaceAll(cleaned, ",", "") - cleaned = strings.ReplaceAll(cleaned, "_", "") - v, err := strconv.ParseFloat(cleaned, 64) - if err != nil { - return 0, false - } - return v, true -} - -func (p *Provider) readTrackingDB(ctx context.Context, dbPath string, snap *core.UsageSnapshot) error { - db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) - if err != nil { - return fmt.Errorf("opening tracking DB: %w", err) - } - defer db.Close() - - if !cursorTableExists(ctx, db, "ai_code_hashes") { - return nil - } - - trackingRecords, err := loadTrackingRecords(ctx, db, p.clock) - if err != nil { - return err - } - totalRequests := len(trackingRecords) - if totalRequests > 0 { - total := float64(totalRequests) - snap.Metrics["total_ai_requests"] = core.Metric{ - Used: &total, - Unit: "requests", - Window: "all-time", - } - } - - today := p.now().Format("2006-01-02") - todayCount := 0 - for _, record := range trackingRecords { - if record.OccurredDay == today { - todayCount++ - } - } - if todayCount > 0 { - tc := float64(todayCount) - snap.Metrics["requests_today"] = core.Metric{ - Used: &tc, - Unit: "requests", - Window: "1d", - } - } - - p.readTrackingSourceBreakdown(trackingRecords, snap, today) - p.readTrackingDailyRequests(trackingRecords, snap) - p.readTrackingModelBreakdown(trackingRecords, snap, today) - p.readTrackingLanguageBreakdown(trackingRecords, snap) - p.readScoredCommits(ctx, db, snap) - p.readDeletedFiles(ctx, db, snap) - p.readTrackedFileContent(ctx, db, snap) - - return nil -} - -func (p *Provider) readScoredCommits(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - var totalCommits int - if db.QueryRowContext(ctx, `SELECT COUNT(*) FROM scored_commits WHERE linesAdded IS NOT NULL AND linesAdded > 0`).Scan(&totalCommits) != nil || totalCommits == 0 { - return - } - - rows, err := db.QueryContext(ctx, ` - SELECT v2AiPercentage, linesAdded, linesDeleted, - tabLinesAdded, tabLinesDeleted, - composerLinesAdded, composerLinesDeleted, - humanLinesAdded, humanLinesDeleted, - blankLinesAdded, blankLinesDeleted - FROM scored_commits - WHERE linesAdded IS NOT NULL AND linesAdded > 0 - ORDER BY scoredAt DESC`) - if err != nil { - return - } - defer rows.Close() - - var ( - sumAIPct float64 - countWithPct int - totalTabAdd int - totalTabDel int - totalCompAdd int - totalCompDel int - totalHumanAdd int - totalHumanDel int - totalBlankAdd int - totalBlankDel int - totalLinesAdd int - totalLinesDel int - ) - - for rows.Next() { - var pctStr sql.NullString - var linesAdded, linesDeleted sql.NullInt64 - var tabAdd, tabDel, compAdd, compDel, humanAdd, humanDel sql.NullInt64 - var blankAdd, blankDel sql.NullInt64 - if rows.Scan(&pctStr, &linesAdded, &linesDeleted, &tabAdd, &tabDel, &compAdd, &compDel, &humanAdd, &humanDel, &blankAdd, &blankDel) != nil { - continue - } - if pctStr.Valid && pctStr.String != "" { - if v, err := strconv.ParseFloat(pctStr.String, 64); err == nil { - sumAIPct += v - countWithPct++ - } - } - if linesAdded.Valid { - totalLinesAdd += int(linesAdded.Int64) - } - if linesDeleted.Valid { - totalLinesDel += int(linesDeleted.Int64) - } - if tabAdd.Valid { - totalTabAdd += int(tabAdd.Int64) - } - if tabDel.Valid { - totalTabDel += int(tabDel.Int64) - } - if compAdd.Valid { - totalCompAdd += int(compAdd.Int64) - } - if compDel.Valid { - totalCompDel += int(compDel.Int64) - } - if humanAdd.Valid { - totalHumanAdd += int(humanAdd.Int64) - } - if humanDel.Valid { - totalHumanDel += int(humanDel.Int64) - } - if blankAdd.Valid { - totalBlankAdd += int(blankAdd.Int64) - } - if blankDel.Valid { - totalBlankDel += int(blankDel.Int64) - } - } - - tc := float64(totalCommits) - snap.Metrics["scored_commits"] = core.Metric{Used: &tc, Unit: "commits", Window: "all-time"} - snap.Raw["scored_commits_total"] = strconv.Itoa(totalCommits) - - if countWithPct > 0 { - avgPct := sumAIPct / float64(countWithPct) - avgPct = math.Round(avgPct*10) / 10 - hundred := 100.0 - remaining := hundred - avgPct - snap.Metrics["ai_code_percentage"] = core.Metric{ - Used: &avgPct, - Remaining: &remaining, - Limit: &hundred, - Unit: "%", - Window: "all-commits", - } - snap.Raw["ai_code_pct_avg"] = fmt.Sprintf("%.1f%%", avgPct) - snap.Raw["ai_code_pct_sample"] = strconv.Itoa(countWithPct) - } - - if totalLinesAdd > 0 || totalLinesDel > 0 { - snap.Raw["commit_total_lines_added"] = strconv.Itoa(totalLinesAdd) - snap.Raw["commit_total_lines_deleted"] = strconv.Itoa(totalLinesDel) - } - if totalTabAdd > 0 || totalCompAdd > 0 || totalHumanAdd > 0 { - snap.Raw["commit_tab_lines"] = strconv.Itoa(totalTabAdd) - snap.Raw["commit_composer_lines"] = strconv.Itoa(totalCompAdd) - snap.Raw["commit_human_lines"] = strconv.Itoa(totalHumanAdd) - } - if totalTabDel > 0 || totalCompDel > 0 || totalHumanDel > 0 { - snap.Raw["commit_tab_lines_deleted"] = strconv.Itoa(totalTabDel) - snap.Raw["commit_composer_lines_deleted"] = strconv.Itoa(totalCompDel) - snap.Raw["commit_human_lines_deleted"] = strconv.Itoa(totalHumanDel) - } - if totalBlankAdd > 0 || totalBlankDel > 0 { - snap.Raw["commit_blank_lines_added"] = strconv.Itoa(totalBlankAdd) - snap.Raw["commit_blank_lines_deleted"] = strconv.Itoa(totalBlankDel) - } -} - -func (p *Provider) readDeletedFiles(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - var count int - if db.QueryRowContext(ctx, `SELECT COUNT(*) FROM ai_deleted_files`).Scan(&count) == nil && count > 0 { - v := float64(count) - snap.Metrics["ai_deleted_files"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} - } -} - -func (p *Provider) readTrackedFileContent(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - var count int - if db.QueryRowContext(ctx, `SELECT COUNT(*) FROM tracked_file_content`).Scan(&count) == nil && count > 0 { - v := float64(count) - snap.Metrics["ai_tracked_files"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} - } -} - -func chooseTrackingTimeExpr(ctx context.Context, db *sql.DB) string { - columns := cursorTableColumns(ctx, db, "ai_code_hashes") - hasCreatedAt := columns["createdat"] - hasTimestamp := columns["timestamp"] - - switch { - case hasCreatedAt && hasTimestamp: - return "COALESCE(createdAt, timestamp)" - case hasCreatedAt: - return "createdAt" - case hasTimestamp: - return "timestamp" - default: - return "0" - } -} - -func (p *Provider) readTrackingSourceBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot, today string) { - clientTotals := map[string]float64{ - "ide": 0, - "cli_agents": 0, - "other": 0, - } - sourceTotals := make(map[string]int) - todaySourceTotals := make(map[string]int) - var sourceSummary []string - for _, record := range records { - sourceTotals[record.Source]++ - if record.OccurredDay == today { - todaySourceTotals[record.Source]++ - } - } - for source, count := range sourceTotals { - value := float64(count) - sourceKey := sanitizeCursorMetricName(source) - snap.Metrics["source_"+sourceKey+"_requests"] = core.Metric{ - Used: &value, - Unit: "requests", - Window: "all-time", - } - - // Emit interface-level metrics for the Interface breakdown composition. - ifaceValue := value - snap.Metrics["interface_"+sourceKey] = core.Metric{ - Used: &ifaceValue, - Unit: "calls", - Window: "all-time", - } - - clientKey := cursorClientBucket(source) - clientTotals[clientKey] += value - sourceSummary = append(sourceSummary, fmt.Sprintf("%s %d", sourceLabel(source), count)) - } - - if len(sourceSummary) > 0 { - snap.Raw["source_usage"] = strings.Join(sourceSummary, " · ") - } - - for bucket, value := range clientTotals { - if value <= 0 { - continue - } - v := value - snap.Metrics["client_"+bucket+"_sessions"] = core.Metric{ - Used: &v, - Unit: "sessions", - Window: "all-time", - } - } - - var todaySummary []string - for source, count := range todaySourceTotals { - if count <= 0 { - continue - } - value := float64(count) - sourceKey := sanitizeCursorMetricName(source) - snap.Metrics["source_"+sourceKey+"_requests_today"] = core.Metric{ - Used: &value, - Unit: "requests", - Window: "1d", - } - todaySummary = append(todaySummary, fmt.Sprintf("%s %d", sourceLabel(source), count)) - } - if len(todaySummary) > 0 { - snap.Raw["source_usage_today"] = strings.Join(todaySummary, " · ") - } -} - -func (p *Provider) readTrackingDailyRequests(records []cursorTrackingRecord, snap *core.UsageSnapshot) { - totalByDay := make(map[string]float64) - byClientDay := map[string]map[string]float64{ - "ide": make(map[string]float64), - "cli_agents": make(map[string]float64), - "other": make(map[string]float64), - } - bySourceDay := make(map[string]map[string]float64) - - for _, record := range records { - day := record.OccurredDay - if day == "" { - continue - } - v := 1.0 - totalByDay[day] += v - clientKey := cursorClientBucket(record.Source) - byClientDay[clientKey][day] += v - sourceKey := sanitizeCursorMetricName(record.Source) - if bySourceDay[sourceKey] == nil { - bySourceDay[sourceKey] = make(map[string]float64) - } - bySourceDay[sourceKey][day] += v - } - - if len(totalByDay) > 1 { - snap.DailySeries["analytics_requests"] = mapToSortedDailyPoints(totalByDay) - } - for clientKey, pointsByDay := range byClientDay { - if len(pointsByDay) < 2 { - continue - } - snap.DailySeries["usage_client_"+clientKey] = mapToSortedDailyPoints(pointsByDay) - } - for sourceKey, pointsByDay := range bySourceDay { - if len(pointsByDay) < 2 { - continue - } - snap.DailySeries["usage_source_"+sourceKey] = mapToSortedDailyPoints(pointsByDay) - } -} - -func (p *Provider) readTrackingModelBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot, today string) { - modelTotals := make(map[string]int) - todayModelTotals := make(map[string]int) - byModelDay := make(map[string]map[string]float64) - var modelSummary []string - for _, record := range records { - modelTotals[record.Model]++ - if record.OccurredDay == today { - todayModelTotals[record.Model]++ - } - modelKey := sanitizeCursorMetricName(record.Model) - if byModelDay[modelKey] == nil { - byModelDay[modelKey] = make(map[string]float64) - } - if record.OccurredDay != "" { - byModelDay[modelKey][record.OccurredDay]++ - } - } - for model, count := range modelTotals { - if count <= 0 { - continue - } - - value := float64(count) - modelKey := sanitizeCursorMetricName(model) - snap.Metrics["model_"+modelKey+"_requests"] = core.Metric{ - Used: &value, - Unit: "requests", - Window: "all-time", - } - modelSummary = append(modelSummary, fmt.Sprintf("%s %d", sourceLabel(model), count)) - } - if len(modelSummary) > 0 { - snap.Raw["model_usage"] = strings.Join(modelSummary, " · ") - } - - for model, count := range todayModelTotals { - if count <= 0 { - continue - } - modelKey := sanitizeCursorMetricName(model) - value := float64(count) - snap.Metrics["model_"+modelKey+"_requests_today"] = core.Metric{ - Used: &value, - Unit: "requests", - Window: "1d", - } - } - for modelKey, pointsByDay := range byModelDay { - if len(pointsByDay) < 2 { - continue - } - snap.DailySeries["usage_model_"+modelKey] = mapToSortedDailyPoints(pointsByDay) - } -} - -func (p *Provider) readTrackingLanguageBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot) { - langTotals := make(map[string]int) - var langSummary []string - for _, record := range records { - if strings.TrimSpace(record.FileExt) == "" { - continue - } - langTotals[record.FileExt]++ - } - for ext, count := range langTotals { - value := float64(count) - langName := extensionToLanguage(ext) - langKey := sanitizeCursorMetricName(langName) - snap.Metrics["lang_"+langKey] = core.Metric{ - Used: &value, - Unit: "requests", - Window: "all-time", - } - langSummary = append(langSummary, fmt.Sprintf("%s %d", langName, count)) - } - if len(langSummary) > 0 { - snap.Raw["language_usage"] = strings.Join(langSummary, " · ") - } -} - -var extToLang = map[string]string{ - ".ts": "TypeScript", ".tsx": "TypeScript", ".js": "JavaScript", ".jsx": "JavaScript", - ".py": "Python", ".go": "Go", ".rs": "Rust", ".rb": "Ruby", - ".java": "Java", ".kt": "Kotlin", ".kts": "Kotlin", - ".cs": "C#", ".fs": "F#", - ".cpp": "C++", ".cc": "C++", ".cxx": "C++", ".hpp": "C++", - ".c": "C", ".h": "C/C++", - ".swift": "Swift", ".m": "Obj-C", - ".php": "PHP", ".lua": "Lua", ".r": "R", - ".scala": "Scala", ".clj": "Clojure", ".ex": "Elixir", ".exs": "Elixir", - ".hs": "Haskell", ".erl": "Erlang", - ".html": "HTML", ".htm": "HTML", ".css": "CSS", ".scss": "SCSS", ".less": "LESS", - ".json": "JSON", ".yaml": "YAML", ".yml": "YAML", ".toml": "TOML", ".xml": "XML", - ".md": "Markdown", ".mdx": "Markdown", - ".sql": "SQL", ".graphql": "GraphQL", ".gql": "GraphQL", - ".sh": "Shell", ".bash": "Shell", ".zsh": "Shell", ".fish": "Shell", - ".dockerfile": "Docker", ".tf": "Terraform", ".hcl": "HCL", - ".vue": "Vue", ".svelte": "Svelte", ".astro": "Astro", - ".dart": "Dart", ".zig": "Zig", ".nim": "Nim", ".v": "V", - ".proto": "Protobuf", ".wasm": "WASM", -} - -func extensionToLanguage(ext string) string { - ext = strings.ToLower(strings.TrimSpace(ext)) - if !strings.HasPrefix(ext, ".") { - ext = "." + ext - } - if lang, ok := extToLang[ext]; ok { - return lang - } - return strings.TrimPrefix(ext, ".") -} - -func mapToSortedDailyPoints(byDay map[string]float64) []core.TimePoint { - if len(byDay) == 0 { - return nil - } - days := lo.Keys(byDay) - sort.Strings(days) - - points := make([]core.TimePoint, 0, len(days)) - for _, day := range days { - points = append(points, core.TimePoint{Date: day, Value: byDay[day]}) - } - return points -} - -func cursorClientBucket(source string) string { - s := strings.ToLower(strings.TrimSpace(source)) - switch { - case s == "": - return "other" - case strings.Contains(s, "cloud"), strings.Contains(s, "web"), s == "background-agent", s == "background_agent": - return "cloud_agents" - case strings.Contains(s, "cli"), strings.Contains(s, "agent"), strings.Contains(s, "terminal"), strings.Contains(s, "cmd"): - return "cli_agents" - case s == "composer", s == "tab", s == "human", strings.Contains(s, "ide"), strings.Contains(s, "editor"): - return "ide" - default: - return "other" - } -} - -func sanitizeCursorMetricName(source string) string { - s := strings.ToLower(strings.TrimSpace(source)) - if s == "" { - return "unknown" - } - var b strings.Builder - lastUnderscore := false - for _, r := range s { - switch { - case r >= 'a' && r <= 'z': - b.WriteRune(r) - lastUnderscore = false - case r >= '0' && r <= '9': - b.WriteRune(r) - lastUnderscore = false - default: - if !lastUnderscore { - b.WriteByte('_') - lastUnderscore = true - } - } - } - out := strings.Trim(b.String(), "_") - if out == "" { - return "unknown" - } - return out -} - -func sourceLabel(source string) string { - trimmed := strings.TrimSpace(source) - if trimmed == "" { - return "unknown" - } - return trimmed -} - -func (p *Provider) readStateDB(ctx context.Context, dbPath string, snap *core.UsageSnapshot) error { - db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) - if err != nil { - return fmt.Errorf("opening state DB: %w", err) - } - defer db.Close() - - if err := db.PingContext(ctx); err != nil { - return fmt.Errorf("state DB not accessible: %w", err) - } - - dailyStatsRecords, err := loadDailyStatsRecords(ctx, db) - if err != nil { - dailyStatsRecords = nil - } - composerRecords, err := loadComposerSessionRecords(ctx, db) - if err != nil { - log.Printf("[cursor] composerData query error: %v", err) - } - bubbleRecords, err := loadBubbleRecords(ctx, db) - if err != nil { - log.Printf("[cursor] bubbleId query error: %v", err) - } - - p.readDailyStatsToday(dailyStatsRecords, snap) - p.readDailyStatsSeries(dailyStatsRecords, snap) - p.readComposerSessions(composerRecords, snap) - p.readStateMetadata(ctx, db, snap) - p.readToolUsage(bubbleRecords, snap) - - return nil -} - -func (p *Provider) readDailyStatsToday(records []cursorDailyStatsRecord, snap *core.UsageSnapshot) { - today := p.now().Format("2006-01-02") - yesterday := p.now().AddDate(0, 0, -1).Format("2006-01-02") - var stats *dailyStats - for i := range records { - switch records[i].Date { - case today: - stats = &records[i].Stats - case yesterday: - if stats == nil { - stats = &records[i].Stats - } - } - } - if stats == nil { - return - } - - if stats.TabSuggestedLines > 0 { - suggested := float64(stats.TabSuggestedLines) - accepted := float64(stats.TabAcceptedLines) - snap.Metrics["tab_suggested_lines"] = core.Metric{Used: &suggested, Unit: "lines", Window: "1d"} - snap.Metrics["tab_accepted_lines"] = core.Metric{Used: &accepted, Unit: "lines", Window: "1d"} - } - if stats.ComposerSuggestedLines > 0 { - suggested := float64(stats.ComposerSuggestedLines) - accepted := float64(stats.ComposerAcceptedLines) - snap.Metrics["composer_suggested_lines"] = core.Metric{Used: &suggested, Unit: "lines", Window: "1d"} - snap.Metrics["composer_accepted_lines"] = core.Metric{Used: &accepted, Unit: "lines", Window: "1d"} - } -} - -func (p *Provider) readComposerSessions(records []cursorComposerSessionRecord, snap *core.UsageSnapshot) { - var ( - totalCostCents float64 - totalRequests int - totalSessions int - totalLinesAdded int - totalLinesRemoved int - totalFilesChanged int - totalFilesCreated int - totalFilesRemoved int - agenticSessions int - nonAgenticSessions int - totalContextUsed float64 - totalContextLimit float64 - contextSampleCount int - subagentTypes = make(map[string]int) - modelCosts = make(map[string]float64) - modelRequests = make(map[string]int) - modeSessions = make(map[string]int) - forceModes = make(map[string]int) - statusCounts = make(map[string]int) - dailyCost = make(map[string]float64) - dailyRequests = make(map[string]float64) - todayCostCents float64 - todayRequests int - ) - - now := p.now() - todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) - - for _, record := range records { - totalSessions++ - if record.Mode != "" { - modeSessions[record.Mode]++ - } - if record.IsAgentic != nil { - if *record.IsAgentic { - agenticSessions++ - } else { - nonAgenticSessions++ - } - } - if record.ForceMode != "" { - forceModes[record.ForceMode]++ - } - if record.Status != "" { - statusCounts[record.Status]++ - } - totalLinesAdded += record.LinesAdded - totalLinesRemoved += record.LinesRemoved - if record.FilesChanged > 0 { - totalFilesChanged += record.FilesChanged - } - if record.AddedFiles > 0 { - totalFilesCreated += record.AddedFiles - } - if record.RemovedFiles > 0 { - totalFilesRemoved += record.RemovedFiles - } - if record.ContextTokensUsed > 0 && record.ContextTokenLimit > 0 { - totalContextUsed += record.ContextTokensUsed - totalContextLimit += record.ContextTokenLimit - contextSampleCount++ - } - if record.SubagentType != "" { - subagentTypes[record.SubagentType]++ - } - - var sessionDay string - if !record.OccurredAt.IsZero() { - sessionDay = record.OccurredAt.In(now.Location()).Format("2006-01-02") - } - - for model, mu := range record.Usage { - totalCostCents += mu.CostInCents - totalRequests += mu.Amount - modelCosts[model] += mu.CostInCents - modelRequests[model] += mu.Amount - - if sessionDay != "" { - dailyCost[sessionDay] += mu.CostInCents - dailyRequests[sessionDay] += float64(mu.Amount) - } - if !record.OccurredAt.IsZero() && record.OccurredAt.After(todayStart) { - todayCostCents += mu.CostInCents - todayRequests += mu.Amount - } - } - } - - if totalSessions == 0 { - return - } - - totalCostUSD := totalCostCents / 100.0 - snap.Metrics["composer_cost"] = core.Metric{ - Used: &totalCostUSD, - Unit: "USD", - Window: "all-time", - } - - if todayCostCents > 0 { - todayCostUSD := todayCostCents / 100.0 - snap.Metrics["today_cost"] = core.Metric{ - Used: &todayCostUSD, - Unit: "USD", - Window: "1d", - } - } - if todayRequests > 0 { - tr := float64(todayRequests) - snap.Metrics["today_composer_requests"] = core.Metric{ - Used: &tr, - Unit: "requests", - Window: "1d", - } - } - - sessions := float64(totalSessions) - snap.Metrics["composer_sessions"] = core.Metric{ - Used: &sessions, - Unit: "sessions", - Window: "all-time", - } - reqs := float64(totalRequests) - snap.Metrics["composer_requests"] = core.Metric{ - Used: &reqs, - Unit: "requests", - Window: "all-time", - } - - if totalLinesAdded > 0 { - la := float64(totalLinesAdded) - snap.Metrics["composer_lines_added"] = core.Metric{Used: &la, Unit: "lines", Window: "all-time"} - } - if totalLinesRemoved > 0 { - lr := float64(totalLinesRemoved) - snap.Metrics["composer_lines_removed"] = core.Metric{Used: &lr, Unit: "lines", Window: "all-time"} - } - - for model, costCents := range modelCosts { - costUSD := costCents / 100.0 - modelKey := sanitizeCursorMetricName(model) - snap.Metrics["model_"+modelKey+"_cost"] = core.Metric{ - Used: &costUSD, - Unit: "USD", - Window: "all-time", - } - if reqs, ok := modelRequests[model]; ok { - r := float64(reqs) - if existing, exists := snap.Metrics["model_"+modelKey+"_requests"]; exists && existing.Used != nil { - combined := *existing.Used + r - snap.Metrics["model_"+modelKey+"_requests"] = core.Metric{ - Used: &combined, - Unit: "requests", - Window: "all-time", - } - } else { - snap.Metrics["model_"+modelKey+"_requests"] = core.Metric{ - Used: &r, - Unit: "requests", - Window: "all-time", - } - } - } - - rec := core.ModelUsageRecord{ - RawModelID: model, - RawSource: "composer", - Window: "all-time", - CostUSD: core.Float64Ptr(costUSD), - } - if r, ok := modelRequests[model]; ok { - rec.Requests = core.Float64Ptr(float64(r)) - } - snap.AppendModelUsage(rec) - } - - for mode, count := range modeSessions { - v := float64(count) - modeKey := sanitizeCursorMetricName(mode) - snap.Metrics["mode_"+modeKey+"_sessions"] = core.Metric{ - Used: &v, - Unit: "sessions", - Window: "all-time", - } - } - - if totalFilesChanged > 0 { - fc := float64(totalFilesChanged) - snap.Metrics["composer_files_changed"] = core.Metric{Used: &fc, Unit: "files", Window: "all-time"} - } - if totalFilesCreated > 0 { - v := float64(totalFilesCreated) - snap.Metrics["composer_files_created"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} - } - if totalFilesRemoved > 0 { - v := float64(totalFilesRemoved) - snap.Metrics["composer_files_removed"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} - } - - if agenticSessions > 0 { - v := float64(agenticSessions) - snap.Metrics["agentic_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: "all-time"} - } - if nonAgenticSessions > 0 { - v := float64(nonAgenticSessions) - snap.Metrics["non_agentic_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: "all-time"} - } - - for fm, count := range forceModes { - v := float64(count) - fmKey := sanitizeCursorMetricName(fm) - snap.Metrics["mode_"+fmKey+"_sessions"] = core.Metric{ - Used: &v, - Unit: "sessions", - Window: "all-time", - } - } - - if contextSampleCount > 0 { - avgPct := (totalContextUsed / totalContextLimit) * 100 - avgPct = math.Round(avgPct*10) / 10 - hundred := 100.0 - remaining := hundred - avgPct - snap.Metrics["composer_context_pct"] = core.Metric{ - Used: &avgPct, - Remaining: &remaining, - Limit: &hundred, - Unit: "%", - Window: "avg", - } - } - - for saType, count := range subagentTypes { - v := float64(count) - saKey := sanitizeCursorMetricName(saType) - snap.Metrics["subagent_"+saKey+"_sessions"] = core.Metric{ - Used: &v, - Unit: "sessions", - Window: "all-time", - } - } - - snap.Raw["composer_total_cost"] = fmt.Sprintf("$%.2f", totalCostUSD) - snap.Raw["composer_total_sessions"] = strconv.Itoa(totalSessions) - snap.Raw["composer_total_requests"] = strconv.Itoa(totalRequests) - if totalLinesAdded > 0 { - snap.Raw["composer_lines_added"] = strconv.Itoa(totalLinesAdded) - snap.Raw["composer_lines_removed"] = strconv.Itoa(totalLinesRemoved) - } - - if len(dailyCost) > 1 { - points := make([]core.TimePoint, 0, len(dailyCost)) - for day, cents := range dailyCost { - points = append(points, core.TimePoint{Date: day, Value: cents / 100.0}) - } - sort.Slice(points, func(i, j int) bool { return points[i].Date < points[j].Date }) - snap.DailySeries["analytics_cost"] = points - } - if len(dailyRequests) > 1 { - points := mapToSortedDailyPoints(dailyRequests) - if existing, ok := snap.DailySeries["analytics_requests"]; ok && len(existing) > 0 { - snap.DailySeries["analytics_requests"] = mergeDailyPoints(existing, points) - } else { - snap.DailySeries["composer_requests_daily"] = points - } - } -} - -func mergeDailyPoints(a, b []core.TimePoint) []core.TimePoint { - byDay := make(map[string]float64) - for _, p := range a { - byDay[p.Date] += p.Value - } - for _, p := range b { - if byDay[p.Date] < p.Value { - byDay[p.Date] = p.Value - } - } - return mapToSortedDailyPoints(byDay) -} - -// extractTokenFromStateDB reads the Cursor access token directly from the -// state.vscdb SQLite database. This is needed because the Token field has -// json:"-" and is not persisted to the config file, so daemon polls that -// load accounts from config would otherwise have no API token. -func extractTokenFromStateDB(dbPath string) string { - db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) - if err != nil { - return "" - } - defer db.Close() - - var token string - if db.QueryRow(`SELECT value FROM ItemTable WHERE key = 'cursorAuth/accessToken'`).Scan(&token) != nil { - return "" - } - return strings.TrimSpace(token) -} - -func (p *Provider) readStateMetadata(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { - var email string - if db.QueryRowContext(ctx, - `SELECT value FROM ItemTable WHERE key = 'cursorAuth/cachedEmail'`).Scan(&email) == nil && email != "" { - snap.Raw["account_email"] = email - } - - var promptCount string - if db.QueryRowContext(ctx, - `SELECT value FROM ItemTable WHERE key = 'freeBestOfN.promptCount'`).Scan(&promptCount) == nil && promptCount != "" { - if v, err := strconv.ParseFloat(promptCount, 64); err == nil && v > 0 { - snap.Metrics["total_prompts"] = core.Metric{Used: &v, Unit: "prompts", Window: "all-time"} - snap.Raw["total_prompts"] = promptCount - } - } - - var membership string - if db.QueryRowContext(ctx, - `SELECT value FROM ItemTable WHERE key = 'cursorAuth/stripeMembershipType'`).Scan(&membership) == nil && membership != "" { - if snap.Raw["membership_type"] == "" { - snap.Raw["membership_type"] = membership - } - } -} - -func (p *Provider) readToolUsage(records []cursorBubbleRecord, snap *core.UsageSnapshot) { - toolCounts := make(map[string]int) - statusCounts := make(map[string]int) - var totalCalls int - - for _, record := range records { - if strings.TrimSpace(record.ToolName) == "" { - continue - } - - name := normalizeToolName(record.ToolName) - toolCounts[name]++ - totalCalls++ - - if strings.TrimSpace(record.ToolStatus) != "" { - statusCounts[record.ToolStatus]++ - } - } - - if totalCalls == 0 { - return - } - - tc := float64(totalCalls) - snap.Metrics["tool_calls_total"] = core.Metric{Used: &tc, Unit: "calls", Window: "all-time"} - - for name, count := range toolCounts { - v := float64(count) - toolKey := sanitizeCursorMetricName(name) - snap.Metrics["tool_"+toolKey] = core.Metric{ - Used: &v, - Unit: "calls", - Window: "all-time", - } - } - - if completed, ok := statusCounts["completed"]; ok && completed > 0 { - v := float64(completed) - snap.Metrics["tool_completed"] = core.Metric{Used: &v, Unit: "calls", Window: "all-time"} - } - if errored, ok := statusCounts["error"]; ok && errored > 0 { - v := float64(errored) - snap.Metrics["tool_errored"] = core.Metric{Used: &v, Unit: "calls", Window: "all-time"} - } - if cancelled, ok := statusCounts["cancelled"]; ok && cancelled > 0 { - v := float64(cancelled) - snap.Metrics["tool_cancelled"] = core.Metric{Used: &v, Unit: "calls", Window: "all-time"} - } - - if totalCalls > 0 { - completed := float64(statusCounts["completed"]) - successPct := (completed / float64(totalCalls)) * 100 - successPct = math.Round(successPct*10) / 10 - hundred := 100.0 - remaining := hundred - successPct - snap.Metrics["tool_success_rate"] = core.Metric{ - Used: &successPct, - Remaining: &remaining, - Limit: &hundred, - Unit: "%", - Window: "all-time", - } - } - - snap.Raw["tool_calls_total"] = strconv.Itoa(totalCalls) - snap.Raw["tool_completed"] = strconv.Itoa(statusCounts["completed"]) - snap.Raw["tool_errored"] = strconv.Itoa(statusCounts["error"]) - snap.Raw["tool_cancelled"] = strconv.Itoa(statusCounts["cancelled"]) -} - -// normalizeToolName cleans up raw tool names from Cursor bubble data. -// MCP tools come in formats like: -// - "mcp-kubernetes-user-kubernetes-pods_list" (Cursor's internal format) -// - Hyphen-prefixed with "user-" for user-installed MCP servers -// -// We normalize MCP tools to the canonical "mcp__server__function" format -// so the telemetry pipeline handles all providers uniformly. -func normalizeToolName(raw string) string { - name := strings.TrimSpace(raw) - if name == "" { - return "unknown" - } - - // Detect MCP tools by prefix. - if strings.HasPrefix(name, "mcp-") || strings.HasPrefix(name, "mcp_") { - return normalizeCursorMCPName(name) - } - - // Strip version suffixes: "read_file_v2" → "read_file" - name = strings.TrimSuffix(name, "_v2") - name = strings.TrimSuffix(name, "_v3") - - return name -} - -// normalizeCursorMCPName converts Cursor's MCP tool name format to the -// canonical "mcp__server__function" format used by the telemetry pipeline. -// -// Input formats: -// -// "mcp-kubernetes-user-kubernetes-pods_list" → "mcp__kubernetes__pods_list" -// "mcp-notion-workspace-notion-notion-fetch" → "mcp__notion__fetch" -// "mcp_something_else" → "mcp__something__else" (fallback) -func normalizeCursorMCPName(name string) string { - // Primary format: "mcp-SERVER-user-SERVER-FUNCTION" (hyphen-separated). - if strings.HasPrefix(name, "mcp-") { - rest := name[4:] // strip "mcp-" - parts := strings.SplitN(rest, "-user-", 2) - if len(parts) == 2 { - server := parts[0] - // After "user-", the server name is repeated then the function follows. - // e.g., "kubernetes-pods_list" where "kubernetes" is the repeated server. - afterUser := parts[1] - // Strip the repeated server prefix if present. - serverDash := server + "-" - if strings.HasPrefix(afterUser, serverDash) { - function := afterUser[len(serverDash):] - return "mcp__" + server + "__" + function - } - // Server not repeated — the whole remainder is server-function. - // Try to split on first hyphen: "notion-fetch" → server=notion, function=fetch. - if idx := strings.LastIndex(afterUser, "-"); idx > 0 { - return "mcp__" + server + "__" + afterUser[idx+1:] - } - return "mcp__" + server + "__" + afterUser - } - - // Simpler format: "mcp-server-function" (no "user" segment). - // e.g., "mcp-kubernetes-pods_log" - if idx := strings.Index(rest, "-"); idx > 0 { - server := rest[:idx] - function := rest[idx+1:] - return "mcp__" + server + "__" + function - } - return "mcp__" + rest + "__" - } - - // Underscore format: "mcp_server_function" (less common). - if strings.HasPrefix(name, "mcp_") { - rest := name[4:] - if idx := strings.Index(rest, "_"); idx > 0 { - server := rest[:idx] - function := rest[idx+1:] - return "mcp__" + server + "__" + function - } - return "mcp__" + rest + "__" - } - - return name -} - -func (p *Provider) readDailyStatsSeries(records []cursorDailyStatsRecord, snap *core.UsageSnapshot) { - for _, record := range records { - ds := record.Stats - dateStr := record.Date - if ds.TabSuggestedLines > 0 || ds.TabAcceptedLines > 0 { - snap.DailySeries["tab_suggested"] = append(snap.DailySeries["tab_suggested"], - core.TimePoint{Date: dateStr, Value: float64(ds.TabSuggestedLines)}) - snap.DailySeries["tab_accepted"] = append(snap.DailySeries["tab_accepted"], - core.TimePoint{Date: dateStr, Value: float64(ds.TabAcceptedLines)}) - } - - if ds.ComposerSuggestedLines > 0 || ds.ComposerAcceptedLines > 0 { - snap.DailySeries["composer_suggested"] = append(snap.DailySeries["composer_suggested"], - core.TimePoint{Date: dateStr, Value: float64(ds.ComposerSuggestedLines)}) - snap.DailySeries["composer_accepted"] = append(snap.DailySeries["composer_accepted"], - core.TimePoint{Date: dateStr, Value: float64(ds.ComposerAcceptedLines)}) - } - - totalLines := float64(ds.TabSuggestedLines + ds.ComposerSuggestedLines) - if totalLines > 0 { - snap.DailySeries["total_lines"] = append(snap.DailySeries["total_lines"], - core.TimePoint{Date: dateStr, Value: totalLines}) - } - } -} - -func formatTimestamp(s string) string { - t := shared.FlexParseTime(s) - if t.IsZero() { - return s // return as-is if we can't parse - } - return t.Format("Jan 02, 2006 15:04 MST") -} diff --git a/internal/providers/cursor/fetch.go b/internal/providers/cursor/fetch.go new file mode 100644 index 0000000..ba7849b --- /dev/null +++ b/internal/providers/cursor/fetch.go @@ -0,0 +1,121 @@ +package cursor + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.UsageSnapshot, error) { + if strings.TrimSpace(acct.Provider) == "" { + acct.Provider = p.ID() + } + snap := core.UsageSnapshot{ + ProviderID: p.ID(), + AccountID: acct.ID, + Timestamp: p.now(), + Status: core.StatusOK, + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + DailySeries: make(map[string][]core.TimePoint), + } + if acct.ExtraData != nil { + if email := strings.TrimSpace(acct.ExtraData["email"]); email != "" { + snap.Raw["account_email"] = email + } + if membership := strings.TrimSpace(acct.ExtraData["membership"]); membership != "" { + snap.Raw["membership_type"] = membership + } + } + + acct.NormalizeRuntimePaths() + trackingDBPath := acct.Path("tracking_db", "") + stateDBPath := acct.Path("state_db", "") + + token := acct.Token + if token == "" && stateDBPath != "" { + token = extractTokenFromStateDB(stateDBPath) + } + + type apiResult struct { + snap *core.UsageSnapshot + err error + } + apiCh := make(chan apiResult, 1) + if token != "" { + go func() { + apiSnap := core.UsageSnapshot{ + AccountID: acct.ID, + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + DailySeries: make(map[string][]core.TimePoint), + } + err := p.fetchFromAPI(ctx, token, &apiSnap) + apiCh <- apiResult{snap: &apiSnap, err: err} + }() + } else { + apiCh <- apiResult{err: fmt.Errorf("no token")} + } + + if acct.ExtraData == nil { + acct.ExtraData = make(map[string]string) + } + if acct.ExtraData["tracking_db"] == "" && trackingDBPath != "" { + acct.ExtraData["tracking_db"] = trackingDBPath + } + if acct.ExtraData["state_db"] == "" && stateDBPath != "" { + acct.ExtraData["state_db"] = stateDBPath + } + + var hasLocalData bool + if trackingDBPath != "" { + before := cursorSnapshotDataSignature(&snap) + if err := p.readTrackingDB(ctx, trackingDBPath, &snap); err != nil { + log.Printf("[cursor] tracking DB error: %v", err) + snap.Raw["tracking_db_error"] = err.Error() + } else if cursorSnapshotDataSignature(&snap) != before { + hasLocalData = true + } + } + if stateDBPath != "" { + before := cursorSnapshotDataSignature(&snap) + if err := p.readStateDB(ctx, stateDBPath, &snap); err != nil { + log.Printf("[cursor] state DB error: %v", err) + snap.Raw["state_db_error"] = err.Error() + } else if cursorSnapshotDataSignature(&snap) != before { + hasLocalData = true + } + } + + ar := <-apiCh + hasAPIData := false + if ar.err == nil && ar.snap != nil { + mergeAPIIntoSnapshot(&snap, ar.snap) + hasAPIData = true + } else if ar.err != nil && token != "" { + log.Printf("[cursor] API fetch failed, falling back to local data: %v", ar.err) + snap.Raw["api_error"] = ar.err.Error() + } + + if !hasAPIData && !hasLocalData { + snap.Status = core.StatusError + snap.Message = "No Cursor tracking data accessible (no API token and no local DBs)" + return snap, nil + } + + if !hasAPIData { + p.applyCachedModelAggregations(acct.ID, "", "", &snap) + p.applyCachedBillingMetrics(acct.ID, &snap) + p.buildLocalOnlyMessage(&snap) + } + + p.ensureCreditGauges(acct.ID, &snap) + + return snap, nil +} diff --git a/internal/providers/cursor/runtime.go b/internal/providers/cursor/runtime.go new file mode 100644 index 0000000..936eeda --- /dev/null +++ b/internal/providers/cursor/runtime.go @@ -0,0 +1,86 @@ +package cursor + +import ( + "database/sql" + "fmt" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func mergeAPIIntoSnapshot(dst, src *core.UsageSnapshot) { + for key, metric := range src.Metrics { + dst.Metrics[key] = metric + } + for key, reset := range src.Resets { + dst.Resets[key] = reset + } + for key, raw := range src.Raw { + dst.Raw[key] = raw + } + for key, series := range src.DailySeries { + dst.DailySeries[key] = series + } + dst.ModelUsage = append(dst.ModelUsage, src.ModelUsage...) + if src.Status != "" { + dst.Status = src.Status + } + if src.Message != "" { + dst.Message = src.Message + } +} + +type cursorSnapshotSignature struct { + metrics int + resets int + raw int + dailySeries int + modelUsage int +} + +func cursorSnapshotDataSignature(snap *core.UsageSnapshot) cursorSnapshotSignature { + if snap == nil { + return cursorSnapshotSignature{} + } + return cursorSnapshotSignature{ + metrics: len(snap.Metrics), + resets: len(snap.Resets), + raw: len(snap.Raw), + dailySeries: len(snap.DailySeries), + modelUsage: len(snap.ModelUsage), + } +} + +func (p *Provider) buildLocalOnlyMessage(snap *core.UsageSnapshot) { + var parts []string + + if metric, ok := snap.Metrics["composer_cost"]; ok && metric.Used != nil && *metric.Used > 0 { + parts = append(parts, fmt.Sprintf("$%.2f session cost", *metric.Used)) + } + if metric, ok := snap.Metrics["total_ai_requests"]; ok && metric.Used != nil && *metric.Used > 0 { + parts = append(parts, fmt.Sprintf("%.0f requests", *metric.Used)) + } + if metric, ok := snap.Metrics["composer_sessions"]; ok && metric.Used != nil && *metric.Used > 0 { + parts = append(parts, fmt.Sprintf("%.0f sessions", *metric.Used)) + } + + if len(parts) > 0 { + snap.Message = strings.Join(parts, " · ") + " (API unavailable)" + return + } + snap.Message = "Local Cursor IDE usage tracking (API unavailable)" +} + +func extractTokenFromStateDB(dbPath string) string { + db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) + if err != nil { + return "" + } + defer db.Close() + + var token string + if db.QueryRow(`SELECT value FROM ItemTable WHERE key = 'cursorAuth/accessToken'`).Scan(&token) != nil { + return "" + } + return strings.TrimSpace(token) +} diff --git a/internal/providers/cursor/state_projection.go b/internal/providers/cursor/state_projection.go new file mode 100644 index 0000000..9b9dd31 --- /dev/null +++ b/internal/providers/cursor/state_projection.go @@ -0,0 +1,452 @@ +package cursor + +import ( + "context" + "database/sql" + "fmt" + "log" + "math" + "sort" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +func (p *Provider) readStateDB(ctx context.Context, dbPath string, snap *core.UsageSnapshot) error { + db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) + if err != nil { + return fmt.Errorf("opening state DB: %w", err) + } + defer db.Close() + + if err := db.PingContext(ctx); err != nil { + return fmt.Errorf("state DB not accessible: %w", err) + } + + dailyStatsRecords, err := loadDailyStatsRecords(ctx, db) + if err != nil { + dailyStatsRecords = nil + } + composerRecords, err := loadComposerSessionRecords(ctx, db) + if err != nil { + log.Printf("[cursor] composerData query error: %v", err) + } + bubbleRecords, err := loadBubbleRecords(ctx, db) + if err != nil { + log.Printf("[cursor] bubbleId query error: %v", err) + } + + p.readDailyStatsToday(dailyStatsRecords, snap) + p.readDailyStatsSeries(dailyStatsRecords, snap) + p.readComposerSessions(composerRecords, snap) + p.readStateMetadata(ctx, db, snap) + p.readToolUsage(bubbleRecords, snap) + return nil +} + +func (p *Provider) readDailyStatsToday(records []cursorDailyStatsRecord, snap *core.UsageSnapshot) { + today := p.now().Format("2006-01-02") + yesterday := p.now().AddDate(0, 0, -1).Format("2006-01-02") + var stats *dailyStats + for i := range records { + switch records[i].Date { + case today: + stats = &records[i].Stats + case yesterday: + if stats == nil { + stats = &records[i].Stats + } + } + } + if stats == nil { + return + } + + if stats.TabSuggestedLines > 0 { + suggested := float64(stats.TabSuggestedLines) + accepted := float64(stats.TabAcceptedLines) + snap.Metrics["tab_suggested_lines"] = core.Metric{Used: &suggested, Unit: "lines", Window: "1d"} + snap.Metrics["tab_accepted_lines"] = core.Metric{Used: &accepted, Unit: "lines", Window: "1d"} + } + if stats.ComposerSuggestedLines > 0 { + suggested := float64(stats.ComposerSuggestedLines) + accepted := float64(stats.ComposerAcceptedLines) + snap.Metrics["composer_suggested_lines"] = core.Metric{Used: &suggested, Unit: "lines", Window: "1d"} + snap.Metrics["composer_accepted_lines"] = core.Metric{Used: &accepted, Unit: "lines", Window: "1d"} + } +} + +func (p *Provider) readComposerSessions(records []cursorComposerSessionRecord, snap *core.UsageSnapshot) { + var ( + totalCostCents float64 + totalRequests int + totalSessions int + totalLinesAdded int + totalLinesRemoved int + totalFilesChanged int + totalFilesCreated int + totalFilesRemoved int + agenticSessions int + nonAgenticSessions int + totalContextUsed float64 + totalContextLimit float64 + contextSampleCount int + subagentTypes = make(map[string]int) + modelCosts = make(map[string]float64) + modelRequests = make(map[string]int) + modeSessions = make(map[string]int) + forceModes = make(map[string]int) + statusCounts = make(map[string]int) + dailyCost = make(map[string]float64) + dailyRequests = make(map[string]float64) + todayCostCents float64 + todayRequests int + ) + + now := p.now() + todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + + for _, record := range records { + totalSessions++ + if record.Mode != "" { + modeSessions[record.Mode]++ + } + if record.IsAgentic != nil { + if *record.IsAgentic { + agenticSessions++ + } else { + nonAgenticSessions++ + } + } + if record.ForceMode != "" { + forceModes[record.ForceMode]++ + } + if record.Status != "" { + statusCounts[record.Status]++ + } + totalLinesAdded += record.LinesAdded + totalLinesRemoved += record.LinesRemoved + if record.FilesChanged > 0 { + totalFilesChanged += record.FilesChanged + } + if record.AddedFiles > 0 { + totalFilesCreated += record.AddedFiles + } + if record.RemovedFiles > 0 { + totalFilesRemoved += record.RemovedFiles + } + if record.ContextTokensUsed > 0 && record.ContextTokenLimit > 0 { + totalContextUsed += record.ContextTokensUsed + totalContextLimit += record.ContextTokenLimit + contextSampleCount++ + } + if record.SubagentType != "" { + subagentTypes[record.SubagentType]++ + } + + var sessionDay string + if !record.OccurredAt.IsZero() { + sessionDay = record.OccurredAt.In(now.Location()).Format("2006-01-02") + } + for model, usage := range record.Usage { + totalCostCents += usage.CostInCents + totalRequests += usage.Amount + modelCosts[model] += usage.CostInCents + modelRequests[model] += usage.Amount + if sessionDay != "" { + dailyCost[sessionDay] += usage.CostInCents + dailyRequests[sessionDay] += float64(usage.Amount) + } + if !record.OccurredAt.IsZero() && record.OccurredAt.After(todayStart) { + todayCostCents += usage.CostInCents + todayRequests += usage.Amount + } + } + } + + if totalSessions == 0 { + return + } + + totalCostUSD := totalCostCents / 100.0 + snap.Metrics["composer_cost"] = core.Metric{Used: &totalCostUSD, Unit: "USD", Window: "all-time"} + if todayCostCents > 0 { + todayCostUSD := todayCostCents / 100.0 + snap.Metrics["today_cost"] = core.Metric{Used: &todayCostUSD, Unit: "USD", Window: "1d"} + } + if todayRequests > 0 { + tr := float64(todayRequests) + snap.Metrics["today_composer_requests"] = core.Metric{Used: &tr, Unit: "requests", Window: "1d"} + } + + sessions := float64(totalSessions) + snap.Metrics["composer_sessions"] = core.Metric{Used: &sessions, Unit: "sessions", Window: "all-time"} + reqs := float64(totalRequests) + snap.Metrics["composer_requests"] = core.Metric{Used: &reqs, Unit: "requests", Window: "all-time"} + + if totalLinesAdded > 0 { + la := float64(totalLinesAdded) + snap.Metrics["composer_lines_added"] = core.Metric{Used: &la, Unit: "lines", Window: "all-time"} + } + if totalLinesRemoved > 0 { + lr := float64(totalLinesRemoved) + snap.Metrics["composer_lines_removed"] = core.Metric{Used: &lr, Unit: "lines", Window: "all-time"} + } + + for model, costCents := range modelCosts { + costUSD := costCents / 100.0 + modelKey := sanitizeCursorMetricName(model) + snap.Metrics["model_"+modelKey+"_cost"] = core.Metric{Used: &costUSD, Unit: "USD", Window: "all-time"} + if reqCount, ok := modelRequests[model]; ok { + r := float64(reqCount) + if existing, exists := snap.Metrics["model_"+modelKey+"_requests"]; exists && existing.Used != nil { + combined := *existing.Used + r + snap.Metrics["model_"+modelKey+"_requests"] = core.Metric{Used: &combined, Unit: "requests", Window: "all-time"} + } else { + snap.Metrics["model_"+modelKey+"_requests"] = core.Metric{Used: &r, Unit: "requests", Window: "all-time"} + } + } + rec := core.ModelUsageRecord{RawModelID: model, RawSource: "composer", Window: "all-time", CostUSD: core.Float64Ptr(costUSD)} + if reqCount, ok := modelRequests[model]; ok { + rec.Requests = core.Float64Ptr(float64(reqCount)) + } + snap.AppendModelUsage(rec) + } + + for mode, count := range modeSessions { + v := float64(count) + snap.Metrics["mode_"+sanitizeCursorMetricName(mode)+"_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: "all-time"} + } + if totalFilesChanged > 0 { + v := float64(totalFilesChanged) + snap.Metrics["composer_files_changed"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} + } + if totalFilesCreated > 0 { + v := float64(totalFilesCreated) + snap.Metrics["composer_files_created"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} + } + if totalFilesRemoved > 0 { + v := float64(totalFilesRemoved) + snap.Metrics["composer_files_removed"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} + } + if agenticSessions > 0 { + v := float64(agenticSessions) + snap.Metrics["agentic_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: "all-time"} + } + if nonAgenticSessions > 0 { + v := float64(nonAgenticSessions) + snap.Metrics["non_agentic_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: "all-time"} + } + for forceMode, count := range forceModes { + v := float64(count) + snap.Metrics["mode_"+sanitizeCursorMetricName(forceMode)+"_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: "all-time"} + } + if contextSampleCount > 0 { + avgPct := math.Round(((totalContextUsed/totalContextLimit)*100)*10) / 10 + hundred := 100.0 + remaining := hundred - avgPct + snap.Metrics["composer_context_pct"] = core.Metric{ + Used: &avgPct, + Remaining: &remaining, + Limit: &hundred, + Unit: "%", + Window: "avg", + } + } + for subagentType, count := range subagentTypes { + v := float64(count) + snap.Metrics["subagent_"+sanitizeCursorMetricName(subagentType)+"_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: "all-time"} + } + + snap.Raw["composer_total_cost"] = fmt.Sprintf("$%.2f", totalCostUSD) + snap.Raw["composer_total_sessions"] = strconv.Itoa(totalSessions) + snap.Raw["composer_total_requests"] = strconv.Itoa(totalRequests) + if totalLinesAdded > 0 { + snap.Raw["composer_lines_added"] = strconv.Itoa(totalLinesAdded) + snap.Raw["composer_lines_removed"] = strconv.Itoa(totalLinesRemoved) + } + + if len(dailyCost) > 1 { + points := make([]core.TimePoint, 0, len(dailyCost)) + for day, cents := range dailyCost { + points = append(points, core.TimePoint{Date: day, Value: cents / 100.0}) + } + sort.Slice(points, func(i, j int) bool { return points[i].Date < points[j].Date }) + snap.DailySeries["analytics_cost"] = points + } + if len(dailyRequests) > 1 { + points := mapToSortedDailyPoints(dailyRequests) + if existing, ok := snap.DailySeries["analytics_requests"]; ok && len(existing) > 0 { + snap.DailySeries["analytics_requests"] = mergeDailyPoints(existing, points) + } else { + snap.DailySeries["composer_requests_daily"] = points + } + } +} + +func mergeDailyPoints(a, b []core.TimePoint) []core.TimePoint { + byDay := make(map[string]float64) + for _, point := range a { + byDay[point.Date] += point.Value + } + for _, point := range b { + if byDay[point.Date] < point.Value { + byDay[point.Date] = point.Value + } + } + return mapToSortedDailyPoints(byDay) +} + +func (p *Provider) readStateMetadata(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { + var email string + if db.QueryRowContext(ctx, `SELECT value FROM ItemTable WHERE key = 'cursorAuth/cachedEmail'`).Scan(&email) == nil && email != "" { + snap.Raw["account_email"] = email + } + + var promptCount string + if db.QueryRowContext(ctx, `SELECT value FROM ItemTable WHERE key = 'freeBestOfN.promptCount'`).Scan(&promptCount) == nil && promptCount != "" { + if v, err := strconv.ParseFloat(promptCount, 64); err == nil && v > 0 { + snap.Metrics["total_prompts"] = core.Metric{Used: &v, Unit: "prompts", Window: "all-time"} + snap.Raw["total_prompts"] = promptCount + } + } + + var membership string + if db.QueryRowContext(ctx, `SELECT value FROM ItemTable WHERE key = 'cursorAuth/stripeMembershipType'`).Scan(&membership) == nil && membership != "" { + if snap.Raw["membership_type"] == "" { + snap.Raw["membership_type"] = membership + } + } +} + +func (p *Provider) readToolUsage(records []cursorBubbleRecord, snap *core.UsageSnapshot) { + toolCounts := make(map[string]int) + statusCounts := make(map[string]int) + totalCalls := 0 + + for _, record := range records { + if strings.TrimSpace(record.ToolName) == "" { + continue + } + name := normalizeToolName(record.ToolName) + toolCounts[name]++ + totalCalls++ + if strings.TrimSpace(record.ToolStatus) != "" { + statusCounts[record.ToolStatus]++ + } + } + + if totalCalls == 0 { + return + } + + tc := float64(totalCalls) + snap.Metrics["tool_calls_total"] = core.Metric{Used: &tc, Unit: "calls", Window: "all-time"} + for name, count := range toolCounts { + v := float64(count) + snap.Metrics["tool_"+sanitizeCursorMetricName(name)] = core.Metric{Used: &v, Unit: "calls", Window: "all-time"} + } + if completed, ok := statusCounts["completed"]; ok && completed > 0 { + v := float64(completed) + snap.Metrics["tool_completed"] = core.Metric{Used: &v, Unit: "calls", Window: "all-time"} + } + if errored, ok := statusCounts["error"]; ok && errored > 0 { + v := float64(errored) + snap.Metrics["tool_errored"] = core.Metric{Used: &v, Unit: "calls", Window: "all-time"} + } + if cancelled, ok := statusCounts["cancelled"]; ok && cancelled > 0 { + v := float64(cancelled) + snap.Metrics["tool_cancelled"] = core.Metric{Used: &v, Unit: "calls", Window: "all-time"} + } + + completed := float64(statusCounts["completed"]) + successPct := math.Round((completed/float64(totalCalls))*1000) / 10 + hundred := 100.0 + remaining := hundred - successPct + snap.Metrics["tool_success_rate"] = core.Metric{ + Used: &successPct, + Remaining: &remaining, + Limit: &hundred, + Unit: "%", + Window: "all-time", + } + snap.Raw["tool_calls_total"] = strconv.Itoa(totalCalls) + snap.Raw["tool_completed"] = strconv.Itoa(statusCounts["completed"]) + snap.Raw["tool_errored"] = strconv.Itoa(statusCounts["error"]) + snap.Raw["tool_cancelled"] = strconv.Itoa(statusCounts["cancelled"]) +} + +func normalizeToolName(raw string) string { + name := strings.TrimSpace(raw) + if name == "" { + return "unknown" + } + if strings.HasPrefix(name, "mcp-") || strings.HasPrefix(name, "mcp_") { + return normalizeCursorMCPName(name) + } + name = strings.TrimSuffix(name, "_v2") + name = strings.TrimSuffix(name, "_v3") + return name +} + +func normalizeCursorMCPName(name string) string { + if strings.HasPrefix(name, "mcp-") { + rest := name[4:] + parts := strings.SplitN(rest, "-user-", 2) + if len(parts) == 2 { + server := parts[0] + afterUser := parts[1] + serverDash := server + "-" + if strings.HasPrefix(afterUser, serverDash) { + return "mcp__" + server + "__" + afterUser[len(serverDash):] + } + if idx := strings.LastIndex(afterUser, "-"); idx > 0 { + return "mcp__" + server + "__" + afterUser[idx+1:] + } + return "mcp__" + server + "__" + afterUser + } + if idx := strings.Index(rest, "-"); idx > 0 { + return "mcp__" + rest[:idx] + "__" + rest[idx+1:] + } + return "mcp__" + rest + "__" + } + + if strings.HasPrefix(name, "mcp_") { + rest := name[4:] + if idx := strings.Index(rest, "_"); idx > 0 { + return "mcp__" + rest[:idx] + "__" + rest[idx+1:] + } + return "mcp__" + rest + "__" + } + return name +} + +func (p *Provider) readDailyStatsSeries(records []cursorDailyStatsRecord, snap *core.UsageSnapshot) { + for _, record := range records { + stats := record.Stats + dateStr := record.Date + if stats.TabSuggestedLines > 0 || stats.TabAcceptedLines > 0 { + snap.DailySeries["tab_suggested"] = append(snap.DailySeries["tab_suggested"], core.TimePoint{Date: dateStr, Value: float64(stats.TabSuggestedLines)}) + snap.DailySeries["tab_accepted"] = append(snap.DailySeries["tab_accepted"], core.TimePoint{Date: dateStr, Value: float64(stats.TabAcceptedLines)}) + } + if stats.ComposerSuggestedLines > 0 || stats.ComposerAcceptedLines > 0 { + snap.DailySeries["composer_suggested"] = append(snap.DailySeries["composer_suggested"], core.TimePoint{Date: dateStr, Value: float64(stats.ComposerSuggestedLines)}) + snap.DailySeries["composer_accepted"] = append(snap.DailySeries["composer_accepted"], core.TimePoint{Date: dateStr, Value: float64(stats.ComposerAcceptedLines)}) + } + totalLines := float64(stats.TabSuggestedLines + stats.ComposerSuggestedLines) + if totalLines > 0 { + snap.DailySeries["total_lines"] = append(snap.DailySeries["total_lines"], core.TimePoint{Date: dateStr, Value: totalLines}) + } + } +} + +func formatTimestamp(s string) string { + t := shared.FlexParseTime(s) + if t.IsZero() { + return s + } + return t.Format("Jan 02, 2006 15:04 MST") +} diff --git a/internal/providers/cursor/tracking_projection.go b/internal/providers/cursor/tracking_projection.go new file mode 100644 index 0000000..97e9735 --- /dev/null +++ b/internal/providers/cursor/tracking_projection.go @@ -0,0 +1,461 @@ +package cursor + +import ( + "context" + "database/sql" + "fmt" + "math" + "sort" + "strconv" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/samber/lo" +) + +func (p *Provider) readTrackingDB(ctx context.Context, dbPath string, snap *core.UsageSnapshot) error { + db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) + if err != nil { + return fmt.Errorf("opening tracking DB: %w", err) + } + defer db.Close() + + if !cursorTableExists(ctx, db, "ai_code_hashes") { + return nil + } + + trackingRecords, err := loadTrackingRecords(ctx, db, p.clock) + if err != nil { + return err + } + totalRequests := len(trackingRecords) + if totalRequests > 0 { + total := float64(totalRequests) + snap.Metrics["total_ai_requests"] = core.Metric{Used: &total, Unit: "requests", Window: "all-time"} + } + + today := p.now().Format("2006-01-02") + todayCount := 0 + for _, record := range trackingRecords { + if record.OccurredDay == today { + todayCount++ + } + } + if todayCount > 0 { + tc := float64(todayCount) + snap.Metrics["requests_today"] = core.Metric{Used: &tc, Unit: "requests", Window: "1d"} + } + + p.readTrackingSourceBreakdown(trackingRecords, snap, today) + p.readTrackingDailyRequests(trackingRecords, snap) + p.readTrackingModelBreakdown(trackingRecords, snap, today) + p.readTrackingLanguageBreakdown(trackingRecords, snap) + p.readScoredCommits(ctx, db, snap) + p.readDeletedFiles(ctx, db, snap) + p.readTrackedFileContent(ctx, db, snap) + return nil +} + +func (p *Provider) readScoredCommits(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { + var totalCommits int + if db.QueryRowContext(ctx, `SELECT COUNT(*) FROM scored_commits WHERE linesAdded IS NOT NULL AND linesAdded > 0`).Scan(&totalCommits) != nil || totalCommits == 0 { + return + } + + rows, err := db.QueryContext(ctx, ` + SELECT v2AiPercentage, linesAdded, linesDeleted, + tabLinesAdded, tabLinesDeleted, + composerLinesAdded, composerLinesDeleted, + humanLinesAdded, humanLinesDeleted, + blankLinesAdded, blankLinesDeleted + FROM scored_commits + WHERE linesAdded IS NOT NULL AND linesAdded > 0 + ORDER BY scoredAt DESC`) + if err != nil { + return + } + defer rows.Close() + + var ( + sumAIPct float64 + countWithPct int + totalTabAdd int + totalTabDel int + totalCompAdd int + totalCompDel int + totalHumanAdd int + totalHumanDel int + totalBlankAdd int + totalBlankDel int + totalLinesAdd int + totalLinesDel int + ) + + for rows.Next() { + var pctStr sql.NullString + var linesAdded, linesDeleted sql.NullInt64 + var tabAdd, tabDel, compAdd, compDel, humanAdd, humanDel sql.NullInt64 + var blankAdd, blankDel sql.NullInt64 + if rows.Scan(&pctStr, &linesAdded, &linesDeleted, &tabAdd, &tabDel, &compAdd, &compDel, &humanAdd, &humanDel, &blankAdd, &blankDel) != nil { + continue + } + if pctStr.Valid && pctStr.String != "" { + if v, err := strconv.ParseFloat(pctStr.String, 64); err == nil { + sumAIPct += v + countWithPct++ + } + } + if linesAdded.Valid { + totalLinesAdd += int(linesAdded.Int64) + } + if linesDeleted.Valid { + totalLinesDel += int(linesDeleted.Int64) + } + if tabAdd.Valid { + totalTabAdd += int(tabAdd.Int64) + } + if tabDel.Valid { + totalTabDel += int(tabDel.Int64) + } + if compAdd.Valid { + totalCompAdd += int(compAdd.Int64) + } + if compDel.Valid { + totalCompDel += int(compDel.Int64) + } + if humanAdd.Valid { + totalHumanAdd += int(humanAdd.Int64) + } + if humanDel.Valid { + totalHumanDel += int(humanDel.Int64) + } + if blankAdd.Valid { + totalBlankAdd += int(blankAdd.Int64) + } + if blankDel.Valid { + totalBlankDel += int(blankDel.Int64) + } + } + + tc := float64(totalCommits) + snap.Metrics["scored_commits"] = core.Metric{Used: &tc, Unit: "commits", Window: "all-time"} + snap.Raw["scored_commits_total"] = strconv.Itoa(totalCommits) + + if countWithPct > 0 { + avgPct := math.Round((sumAIPct/float64(countWithPct))*10) / 10 + hundred := 100.0 + remaining := hundred - avgPct + snap.Metrics["ai_code_percentage"] = core.Metric{ + Used: &avgPct, + Remaining: &remaining, + Limit: &hundred, + Unit: "%", + Window: "all-commits", + } + snap.Raw["ai_code_pct_avg"] = fmt.Sprintf("%.1f%%", avgPct) + snap.Raw["ai_code_pct_sample"] = strconv.Itoa(countWithPct) + } + + if totalLinesAdd > 0 || totalLinesDel > 0 { + snap.Raw["commit_total_lines_added"] = strconv.Itoa(totalLinesAdd) + snap.Raw["commit_total_lines_deleted"] = strconv.Itoa(totalLinesDel) + } + if totalTabAdd > 0 || totalCompAdd > 0 || totalHumanAdd > 0 { + snap.Raw["commit_tab_lines"] = strconv.Itoa(totalTabAdd) + snap.Raw["commit_composer_lines"] = strconv.Itoa(totalCompAdd) + snap.Raw["commit_human_lines"] = strconv.Itoa(totalHumanAdd) + } + if totalTabDel > 0 || totalCompDel > 0 || totalHumanDel > 0 { + snap.Raw["commit_tab_lines_deleted"] = strconv.Itoa(totalTabDel) + snap.Raw["commit_composer_lines_deleted"] = strconv.Itoa(totalCompDel) + snap.Raw["commit_human_lines_deleted"] = strconv.Itoa(totalHumanDel) + } + if totalBlankAdd > 0 || totalBlankDel > 0 { + snap.Raw["commit_blank_lines_added"] = strconv.Itoa(totalBlankAdd) + snap.Raw["commit_blank_lines_deleted"] = strconv.Itoa(totalBlankDel) + } +} + +func (p *Provider) readDeletedFiles(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { + var count int + if db.QueryRowContext(ctx, `SELECT COUNT(*) FROM ai_deleted_files`).Scan(&count) == nil && count > 0 { + v := float64(count) + snap.Metrics["ai_deleted_files"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} + } +} + +func (p *Provider) readTrackedFileContent(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) { + var count int + if db.QueryRowContext(ctx, `SELECT COUNT(*) FROM tracked_file_content`).Scan(&count) == nil && count > 0 { + v := float64(count) + snap.Metrics["ai_tracked_files"] = core.Metric{Used: &v, Unit: "files", Window: "all-time"} + } +} + +func chooseTrackingTimeExpr(ctx context.Context, db *sql.DB) string { + columns := cursorTableColumns(ctx, db, "ai_code_hashes") + hasCreatedAt := columns["createdat"] + hasTimestamp := columns["timestamp"] + switch { + case hasCreatedAt && hasTimestamp: + return "COALESCE(createdAt, timestamp)" + case hasCreatedAt: + return "createdAt" + case hasTimestamp: + return "timestamp" + default: + return "0" + } +} + +func (p *Provider) readTrackingSourceBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot, today string) { + clientTotals := map[string]float64{"ide": 0, "cli_agents": 0, "other": 0} + sourceTotals := make(map[string]int) + todaySourceTotals := make(map[string]int) + var sourceSummary []string + for _, record := range records { + sourceTotals[record.Source]++ + if record.OccurredDay == today { + todaySourceTotals[record.Source]++ + } + } + for source, count := range sourceTotals { + value := float64(count) + sourceKey := sanitizeCursorMetricName(source) + snap.Metrics["source_"+sourceKey+"_requests"] = core.Metric{Used: &value, Unit: "requests", Window: "all-time"} + ifaceValue := value + snap.Metrics["interface_"+sourceKey] = core.Metric{Used: &ifaceValue, Unit: "calls", Window: "all-time"} + clientTotals[cursorClientBucket(source)] += value + sourceSummary = append(sourceSummary, fmt.Sprintf("%s %d", sourceLabel(source), count)) + } + if len(sourceSummary) > 0 { + snap.Raw["source_usage"] = strings.Join(sourceSummary, " · ") + } + for bucket, value := range clientTotals { + if value <= 0 { + continue + } + v := value + snap.Metrics["client_"+bucket+"_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: "all-time"} + } + + var todaySummary []string + for source, count := range todaySourceTotals { + if count <= 0 { + continue + } + value := float64(count) + sourceKey := sanitizeCursorMetricName(source) + snap.Metrics["source_"+sourceKey+"_requests_today"] = core.Metric{Used: &value, Unit: "requests", Window: "1d"} + todaySummary = append(todaySummary, fmt.Sprintf("%s %d", sourceLabel(source), count)) + } + if len(todaySummary) > 0 { + snap.Raw["source_usage_today"] = strings.Join(todaySummary, " · ") + } +} + +func (p *Provider) readTrackingDailyRequests(records []cursorTrackingRecord, snap *core.UsageSnapshot) { + totalByDay := make(map[string]float64) + byClientDay := map[string]map[string]float64{ + "ide": make(map[string]float64), + "cli_agents": make(map[string]float64), + "other": make(map[string]float64), + } + bySourceDay := make(map[string]map[string]float64) + + for _, record := range records { + if record.OccurredDay == "" { + continue + } + totalByDay[record.OccurredDay] += 1 + clientKey := cursorClientBucket(record.Source) + byClientDay[clientKey][record.OccurredDay] += 1 + sourceKey := sanitizeCursorMetricName(record.Source) + if bySourceDay[sourceKey] == nil { + bySourceDay[sourceKey] = make(map[string]float64) + } + bySourceDay[sourceKey][record.OccurredDay] += 1 + } + + if len(totalByDay) > 1 { + snap.DailySeries["analytics_requests"] = mapToSortedDailyPoints(totalByDay) + } + for clientKey, pointsByDay := range byClientDay { + if len(pointsByDay) < 2 { + continue + } + snap.DailySeries["usage_client_"+clientKey] = mapToSortedDailyPoints(pointsByDay) + } + for sourceKey, pointsByDay := range bySourceDay { + if len(pointsByDay) < 2 { + continue + } + snap.DailySeries["usage_source_"+sourceKey] = mapToSortedDailyPoints(pointsByDay) + } +} + +func (p *Provider) readTrackingModelBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot, today string) { + modelTotals := make(map[string]int) + todayModelTotals := make(map[string]int) + byModelDay := make(map[string]map[string]float64) + var modelSummary []string + for _, record := range records { + modelTotals[record.Model]++ + if record.OccurredDay == today { + todayModelTotals[record.Model]++ + } + modelKey := sanitizeCursorMetricName(record.Model) + if byModelDay[modelKey] == nil { + byModelDay[modelKey] = make(map[string]float64) + } + if record.OccurredDay != "" { + byModelDay[modelKey][record.OccurredDay]++ + } + } + for model, count := range modelTotals { + if count <= 0 { + continue + } + value := float64(count) + modelKey := sanitizeCursorMetricName(model) + snap.Metrics["model_"+modelKey+"_requests"] = core.Metric{Used: &value, Unit: "requests", Window: "all-time"} + modelSummary = append(modelSummary, fmt.Sprintf("%s %d", sourceLabel(model), count)) + } + if len(modelSummary) > 0 { + snap.Raw["model_usage"] = strings.Join(modelSummary, " · ") + } + for model, count := range todayModelTotals { + if count <= 0 { + continue + } + value := float64(count) + modelKey := sanitizeCursorMetricName(model) + snap.Metrics["model_"+modelKey+"_requests_today"] = core.Metric{Used: &value, Unit: "requests", Window: "1d"} + } + for modelKey, pointsByDay := range byModelDay { + if len(pointsByDay) < 2 { + continue + } + snap.DailySeries["usage_model_"+modelKey] = mapToSortedDailyPoints(pointsByDay) + } +} + +func (p *Provider) readTrackingLanguageBreakdown(records []cursorTrackingRecord, snap *core.UsageSnapshot) { + langTotals := make(map[string]int) + var langSummary []string + for _, record := range records { + if strings.TrimSpace(record.FileExt) == "" { + continue + } + langTotals[record.FileExt]++ + } + for ext, count := range langTotals { + value := float64(count) + langName := extensionToLanguage(ext) + langKey := sanitizeCursorMetricName(langName) + snap.Metrics["lang_"+langKey] = core.Metric{Used: &value, Unit: "requests", Window: "all-time"} + langSummary = append(langSummary, fmt.Sprintf("%s %d", langName, count)) + } + if len(langSummary) > 0 { + snap.Raw["language_usage"] = strings.Join(langSummary, " · ") + } +} + +var extToLang = map[string]string{ + ".ts": "TypeScript", ".tsx": "TypeScript", ".js": "JavaScript", ".jsx": "JavaScript", + ".py": "Python", ".go": "Go", ".rs": "Rust", ".rb": "Ruby", + ".java": "Java", ".kt": "Kotlin", ".kts": "Kotlin", + ".cs": "C#", ".fs": "F#", + ".cpp": "C++", ".cc": "C++", ".cxx": "C++", ".hpp": "C++", + ".c": "C", ".h": "C/C++", + ".swift": "Swift", ".m": "Obj-C", + ".php": "PHP", ".lua": "Lua", ".r": "R", + ".scala": "Scala", ".clj": "Clojure", ".ex": "Elixir", ".exs": "Elixir", + ".hs": "Haskell", ".erl": "Erlang", + ".html": "HTML", ".htm": "HTML", ".css": "CSS", ".scss": "SCSS", ".less": "LESS", + ".json": "JSON", ".yaml": "YAML", ".yml": "YAML", ".toml": "TOML", ".xml": "XML", + ".md": "Markdown", ".mdx": "Markdown", + ".sql": "SQL", ".graphql": "GraphQL", ".gql": "GraphQL", + ".sh": "Shell", ".bash": "Shell", ".zsh": "Shell", ".fish": "Shell", + ".dockerfile": "Docker", ".tf": "Terraform", ".hcl": "HCL", + ".vue": "Vue", ".svelte": "Svelte", ".astro": "Astro", + ".dart": "Dart", ".zig": "Zig", ".nim": "Nim", ".v": "V", + ".proto": "Protobuf", ".wasm": "WASM", +} + +func extensionToLanguage(ext string) string { + ext = strings.ToLower(strings.TrimSpace(ext)) + if !strings.HasPrefix(ext, ".") { + ext = "." + ext + } + if lang, ok := extToLang[ext]; ok { + return lang + } + return strings.TrimPrefix(ext, ".") +} + +func mapToSortedDailyPoints(byDay map[string]float64) []core.TimePoint { + if len(byDay) == 0 { + return nil + } + days := lo.Keys(byDay) + sort.Strings(days) + points := make([]core.TimePoint, 0, len(days)) + for _, day := range days { + points = append(points, core.TimePoint{Date: day, Value: byDay[day]}) + } + return points +} + +func cursorClientBucket(source string) string { + s := strings.ToLower(strings.TrimSpace(source)) + switch { + case s == "": + return "other" + case strings.Contains(s, "cloud"), strings.Contains(s, "web"), s == "background-agent", s == "background_agent": + return "cloud_agents" + case strings.Contains(s, "cli"), strings.Contains(s, "agent"), strings.Contains(s, "terminal"), strings.Contains(s, "cmd"): + return "cli_agents" + case s == "composer", s == "tab", s == "human", strings.Contains(s, "ide"), strings.Contains(s, "editor"): + return "ide" + default: + return "other" + } +} + +func sanitizeCursorMetricName(source string) string { + s := strings.ToLower(strings.TrimSpace(source)) + if s == "" { + return "unknown" + } + var b strings.Builder + lastUnderscore := false + for _, r := range s { + switch { + case r >= 'a' && r <= 'z': + b.WriteRune(r) + lastUnderscore = false + case r >= '0' && r <= '9': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore { + b.WriteByte('_') + lastUnderscore = true + } + } + } + out := strings.Trim(b.String(), "_") + if out == "" { + return "unknown" + } + return out +} + +func sourceLabel(source string) string { + trimmed := strings.TrimSpace(source) + if trimmed == "" { + return "unknown" + } + return trimmed +} diff --git a/internal/providers/openrouter/analytics.go b/internal/providers/openrouter/analytics.go new file mode 100644 index 0000000..9d202d0 --- /dev/null +++ b/internal/providers/openrouter/analytics.go @@ -0,0 +1,729 @@ +package openrouter + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (p *Provider) fetchAnalytics(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { + var analytics analyticsResponse + var activityEndpoint string + var activityCachedAt string + forbiddenMsg := "" + yesterdayUTC := p.now().UTC().AddDate(0, 0, -1).Format("2006-01-02") + + for _, endpoint := range []string{ + "/activity", + "/activity?date=" + yesterdayUTC, + "/analytics/user-activity", + "/api/internal/v1/transaction-analytics?window=1mo", + } { + url := analyticsEndpointURL(baseURL, endpoint) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+apiKey) + req.Header.Set("Accept", "application/json") + req.Header.Set("Cache-Control", "no-cache, no-store, max-age=0") + req.Header.Set("Pragma", "no-cache") + + resp, err := p.Client().Do(req) + if err != nil { + return err + } + + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return err + } + + if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusNotFound { + if endpoint == "/activity" && resp.StatusCode == http.StatusForbidden { + msg := parseAPIErrorMessage(body) + if msg == "" { + msg = "activity endpoint requires management key" + } + forbiddenMsg = msg + } + continue + } + if resp.StatusCode != http.StatusOK { + continue + } + + parsed, cachedAt, ok, err := parseAnalyticsBody(body) + if err != nil || !ok { + continue + } + analytics = parsed + activityEndpoint = endpoint + activityCachedAt = cachedAt + break + } + + if activityEndpoint == "" { + if forbiddenMsg != "" { + return fmt.Errorf("%s (HTTP 403)", forbiddenMsg) + } + return fmt.Errorf("analytics endpoint not available (HTTP 404)") + } + + snap.Raw["activity_endpoint"] = activityEndpoint + if activityCachedAt != "" { + snap.Raw["activity_cached_at"] = activityCachedAt + } + + costByDate := make(map[string]float64) + tokensByDate := make(map[string]float64) + requestsByDate := make(map[string]float64) + byokCostByDate := make(map[string]float64) + reasoningTokensByDate := make(map[string]float64) + cachedTokensByDate := make(map[string]float64) + providerTokensByDate := make(map[string]map[string]float64) + providerRequestsByDate := make(map[string]map[string]float64) + modelCost := make(map[string]float64) + modelByokCost := make(map[string]float64) + modelInputTokens := make(map[string]float64) + modelOutputTokens := make(map[string]float64) + modelReasoningTokens := make(map[string]float64) + modelCachedTokens := make(map[string]float64) + modelTotalTokens := make(map[string]float64) + modelRequests := make(map[string]float64) + modelByokRequests := make(map[string]float64) + providerCost := make(map[string]float64) + providerByokCost := make(map[string]float64) + providerInputTokens := make(map[string]float64) + providerOutputTokens := make(map[string]float64) + providerReasoningTokens := make(map[string]float64) + providerRequests := make(map[string]float64) + endpointStatsMap := make(map[string]*endpointStats) + models := make(map[string]struct{}) + providers := make(map[string]struct{}) + endpoints := make(map[string]struct{}) + activeDays := make(map[string]struct{}) + + now := p.now().UTC() + todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC) + sevenDaysAgo := now.AddDate(0, 0, -7) + thirtyDaysAgo := now.AddDate(0, 0, -30) + + var totalCost, totalByok, totalRequests float64 + var totalInput, totalOutput, totalReasoning, totalCached, totalTokens float64 + var cost7d, byok7d, requests7d float64 + var input7d, output7d, reasoning7d, cached7d, tokens7d float64 + var todayByok, cost7dByok, cost30dByok float64 + var minDate, maxDate string + + for _, entry := range analytics.Data { + if entry.Date == "" { + continue + } + date, entryDate, hasParsedDate := normalizeActivityDate(entry.Date) + + cost := entry.Usage + if cost == 0 { + cost = entry.TotalCost + } + tokens := float64(entry.TotalTokens) + if tokens == 0 { + tokens = float64(entry.PromptTokens + entry.CompletionTokens + entry.ReasoningTokens) + } + inputTokens := float64(entry.PromptTokens) + outputTokens := float64(entry.CompletionTokens) + requests := float64(entry.Requests) + byokCost := entry.ByokUsageInference + byokRequests := float64(entry.ByokRequests) + reasoningTokens := float64(entry.ReasoningTokens) + cachedTokens := float64(entry.CachedTokens) + modelName := normalizeModelName(entry.Model) + if modelName == "" { + modelName = normalizeModelName(entry.ModelPermaslug) + } + if modelName == "" { + modelName = "unknown" + } + providerName := entry.ProviderName + if providerName == "" { + providerName = "unknown" + } + endpointID := strings.TrimSpace(entry.EndpointID) + if endpointID == "" { + endpointID = "unknown" + } + + costByDate[date] += cost + tokensByDate[date] += tokens + requestsByDate[date] += requests + byokCostByDate[date] += byokCost + reasoningTokensByDate[date] += reasoningTokens + cachedTokensByDate[date] += cachedTokens + modelCost[modelName] += cost + modelByokCost[modelName] += byokCost + modelInputTokens[modelName] += inputTokens + modelOutputTokens[modelName] += outputTokens + modelReasoningTokens[modelName] += reasoningTokens + modelCachedTokens[modelName] += cachedTokens + modelTotalTokens[modelName] += tokens + modelRequests[modelName] += requests + modelByokRequests[modelName] += byokRequests + providerCost[providerName] += cost + providerByokCost[providerName] += byokCost + providerInputTokens[providerName] += inputTokens + providerOutputTokens[providerName] += outputTokens + providerReasoningTokens[providerName] += reasoningTokens + providerRequests[providerName] += requests + providerClientKey := sanitizeName(strings.ToLower(providerName)) + if providerTokensByDate[providerClientKey] == nil { + providerTokensByDate[providerClientKey] = make(map[string]float64) + } + providerTokensByDate[providerClientKey][date] += inputTokens + outputTokens + reasoningTokens + if providerRequestsByDate[providerClientKey] == nil { + providerRequestsByDate[providerClientKey] = make(map[string]float64) + } + providerRequestsByDate[providerClientKey][date] += requests + + stats := endpointStatsMap[endpointID] + if stats == nil { + stats = &endpointStats{Model: modelName, Provider: providerName} + endpointStatsMap[endpointID] = stats + } + stats.Requests += entry.Requests + stats.TotalCost += cost + stats.ByokCost += byokCost + stats.PromptTokens += entry.PromptTokens + stats.CompletionTokens += entry.CompletionTokens + stats.ReasoningTokens += entry.ReasoningTokens + + models[modelName] = struct{}{} + providers[providerName] = struct{}{} + if endpointID != "unknown" { + endpoints[endpointID] = struct{}{} + } + activeDays[date] = struct{}{} + + if minDate == "" || date < minDate { + minDate = date + } + if maxDate == "" || date > maxDate { + maxDate = date + } + + totalCost += cost + totalByok += byokCost + totalRequests += requests + totalInput += inputTokens + totalOutput += outputTokens + totalReasoning += reasoningTokens + totalCached += cachedTokens + totalTokens += tokens + + if !hasParsedDate { + continue + } + if !entryDate.Before(todayStart) { + todayByok += byokCost + } + if entryDate.After(sevenDaysAgo) { + cost7dByok += byokCost + } + if entryDate.After(thirtyDaysAgo) { + cost30dByok += byokCost + } + if entryDate.After(sevenDaysAgo) { + cost7d += cost + byok7d += byokCost + requests7d += requests + input7d += inputTokens + output7d += outputTokens + reasoning7d += reasoningTokens + cached7d += cachedTokens + tokens7d += tokens + } + } + + snap.Raw["activity_rows"] = fmt.Sprintf("%d", len(analytics.Data)) + if minDate != "" && maxDate != "" { + snap.Raw["activity_date_range"] = minDate + " .. " + maxDate + } + if minDate != "" { + snap.Raw["activity_min_date"] = minDate + } + if maxDate != "" { + snap.Raw["activity_max_date"] = maxDate + } + if len(models) > 0 { + snap.Raw["activity_models"] = fmt.Sprintf("%d", len(models)) + } + if len(providers) > 0 { + snap.Raw["activity_providers"] = fmt.Sprintf("%d", len(providers)) + } + if len(endpoints) > 0 { + snap.Raw["activity_endpoints"] = fmt.Sprintf("%d", len(endpoints)) + } + if len(activeDays) > 0 { + snap.Raw["activity_days"] = fmt.Sprintf("%d", len(activeDays)) + } + + if len(costByDate) > 0 { + snap.DailySeries["analytics_cost"] = mapToSortedTimePoints(costByDate) + } + if len(tokensByDate) > 0 { + snap.DailySeries["analytics_tokens"] = mapToSortedTimePoints(tokensByDate) + } + if len(requestsByDate) > 0 { + snap.DailySeries["analytics_requests"] = mapToSortedTimePoints(requestsByDate) + } + if len(byokCostByDate) > 0 { + snap.DailySeries["analytics_byok_cost"] = mapToSortedTimePoints(byokCostByDate) + } + if len(reasoningTokensByDate) > 0 { + snap.DailySeries["analytics_reasoning_tokens"] = mapToSortedTimePoints(reasoningTokensByDate) + } + if len(cachedTokensByDate) > 0 { + snap.DailySeries["analytics_cached_tokens"] = mapToSortedTimePoints(cachedTokensByDate) + } + + if totalCost > 0 { + snap.Metrics["analytics_30d_cost"] = core.Metric{Used: &totalCost, Unit: "USD", Window: "30d"} + } + if totalByok > 0 { + snap.Metrics["analytics_30d_byok_cost"] = core.Metric{Used: &totalByok, Unit: "USD", Window: "30d"} + snap.Raw["byok_in_use"] = "true" + } + if totalRequests > 0 { + snap.Metrics["analytics_30d_requests"] = core.Metric{Used: &totalRequests, Unit: "requests", Window: "30d"} + } + if totalInput > 0 { + snap.Metrics["analytics_30d_input_tokens"] = core.Metric{Used: &totalInput, Unit: "tokens", Window: "30d"} + } + if totalOutput > 0 { + snap.Metrics["analytics_30d_output_tokens"] = core.Metric{Used: &totalOutput, Unit: "tokens", Window: "30d"} + } + if totalReasoning > 0 { + snap.Metrics["analytics_30d_reasoning_tokens"] = core.Metric{Used: &totalReasoning, Unit: "tokens", Window: "30d"} + } + if totalCached > 0 { + snap.Metrics["analytics_30d_cached_tokens"] = core.Metric{Used: &totalCached, Unit: "tokens", Window: "30d"} + } + if totalTokens > 0 { + snap.Metrics["analytics_30d_tokens"] = core.Metric{Used: &totalTokens, Unit: "tokens", Window: "30d"} + } + + if cost7d > 0 { + snap.Metrics["analytics_7d_cost"] = core.Metric{Used: &cost7d, Unit: "USD", Window: "7d"} + } + if byok7d > 0 { + snap.Metrics["analytics_7d_byok_cost"] = core.Metric{Used: &byok7d, Unit: "USD", Window: "7d"} + snap.Raw["byok_in_use"] = "true" + } + if requests7d > 0 { + snap.Metrics["analytics_7d_requests"] = core.Metric{Used: &requests7d, Unit: "requests", Window: "7d"} + } + if input7d > 0 { + snap.Metrics["analytics_7d_input_tokens"] = core.Metric{Used: &input7d, Unit: "tokens", Window: "7d"} + } + if output7d > 0 { + snap.Metrics["analytics_7d_output_tokens"] = core.Metric{Used: &output7d, Unit: "tokens", Window: "7d"} + } + if reasoning7d > 0 { + snap.Metrics["analytics_7d_reasoning_tokens"] = core.Metric{Used: &reasoning7d, Unit: "tokens", Window: "7d"} + } + if cached7d > 0 { + snap.Metrics["analytics_7d_cached_tokens"] = core.Metric{Used: &cached7d, Unit: "tokens", Window: "7d"} + } + if tokens7d > 0 { + snap.Metrics["analytics_7d_tokens"] = core.Metric{Used: &tokens7d, Unit: "tokens", Window: "7d"} + } + + if days := len(activeDays); days > 0 { + v := float64(days) + snap.Metrics["analytics_active_days"] = core.Metric{Used: &v, Unit: "days", Window: "30d"} + } + if count := len(models); count > 0 { + v := float64(count) + snap.Metrics["analytics_models"] = core.Metric{Used: &v, Unit: "models", Window: "30d"} + } + if count := len(providers); count > 0 { + v := float64(count) + snap.Metrics["analytics_providers"] = core.Metric{Used: &v, Unit: "providers", Window: "30d"} + } + if count := len(endpoints); count > 0 { + v := float64(count) + snap.Metrics["analytics_endpoints"] = core.Metric{Used: &v, Unit: "endpoints", Window: "30d"} + } + + emitAnalyticsPerModelMetrics(snap, modelCost, modelByokCost, modelInputTokens, modelOutputTokens, modelReasoningTokens, modelCachedTokens, modelTotalTokens, modelRequests, modelByokRequests) + filterRouterClientProviders(providerCost, providerByokCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests) + emitAnalyticsPerProviderMetrics(snap, providerCost, providerByokCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests) + emitUpstreamProviderMetrics(snap, providerCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests) + emitAnalyticsEndpointMetrics(snap, endpointStatsMap) + for name := range providerTokensByDate { + if isLikelyRouterClientProviderName(name) { + delete(providerTokensByDate, name) + } + } + for name := range providerRequestsByDate { + if isLikelyRouterClientProviderName(name) { + delete(providerRequestsByDate, name) + } + } + emitClientDailySeries(snap, providerTokensByDate, providerRequestsByDate) + emitModelDerivedToolUsageMetrics(snap, modelRequests, "30d inferred", "inferred_from_model_requests") + + if todayByok > 0 { + snap.Metrics["today_byok_cost"] = core.Metric{Used: &todayByok, Unit: "USD", Window: "1d"} + snap.Raw["byok_in_use"] = "true" + } + if cost7dByok > 0 { + snap.Metrics["7d_byok_cost"] = core.Metric{Used: &cost7dByok, Unit: "USD", Window: "7d"} + snap.Raw["byok_in_use"] = "true" + } + if cost30dByok > 0 { + snap.Metrics["30d_byok_cost"] = core.Metric{Used: &cost30dByok, Unit: "USD", Window: "30d"} + snap.Raw["byok_in_use"] = "true" + } + + return nil +} + +func analyticsEndpointURL(baseURL, endpoint string) string { + base := strings.TrimRight(baseURL, "/") + if strings.HasPrefix(endpoint, "/api/internal/") && strings.HasSuffix(base, "/api/v1") { + base = strings.TrimSuffix(base, "/api/v1") + } + return base + endpoint +} + +func parseAnalyticsBody(body []byte) (analyticsResponse, string, bool, error) { + var direct analyticsResponse + if err := json.Unmarshal(body, &direct); err == nil && direct.Data != nil { + return direct, "", true, nil + } + + var wrapped analyticsEnvelopeResponse + if err := json.Unmarshal(body, &wrapped); err == nil && wrapped.Data.Data != nil { + return analyticsResponse{Data: wrapped.Data.Data}, parseAnalyticsCachedAt(wrapped.Data.CachedAt), true, nil + } + + return analyticsResponse{}, "", false, fmt.Errorf("unrecognized analytics payload") +} + +func parseAnalyticsCachedAt(raw json.RawMessage) string { + s := strings.TrimSpace(string(raw)) + if s == "" || s == "null" { + return "" + } + + var str string + if err := json.Unmarshal(raw, &str); err == nil { + return strings.TrimSpace(str) + } + + var n float64 + if err := json.Unmarshal(raw, &n); err != nil { + return s + } + + sec := int64(n) + if sec > 1_000_000_000_000 { + sec /= 1000 + } + if sec <= 0 { + return fmt.Sprintf("%.0f", n) + } + return time.Unix(sec, 0).UTC().Format(time.RFC3339) +} + +func normalizeActivityDate(raw string) (string, time.Time, bool) { + raw = strings.TrimSpace(raw) + for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05", "2006-01-02"} { + if t, err := time.Parse(layout, raw); err == nil { + date := t.UTC().Format("2006-01-02") + return date, t.UTC(), true + } + } + if len(raw) >= 10 && raw[4] == '-' && raw[7] == '-' { + date := raw[:10] + if t, err := time.Parse("2006-01-02", date); err == nil { + return date, t.UTC(), true + } + return date, time.Time{}, false + } + return raw, time.Time{}, false +} + +func emitAnalyticsPerModelMetrics( + snap *core.UsageSnapshot, + modelCost, modelByokCost, modelInputTokens, modelOutputTokens, modelReasoningTokens, modelCachedTokens, modelTotalTokens, modelRequests, modelByokRequests map[string]float64, +) { + modelSet := make(map[string]struct{}) + for model := range modelCost { + modelSet[model] = struct{}{} + } + for model := range modelByokCost { + modelSet[model] = struct{}{} + } + for model := range modelInputTokens { + modelSet[model] = struct{}{} + } + for model := range modelOutputTokens { + modelSet[model] = struct{}{} + } + for model := range modelReasoningTokens { + modelSet[model] = struct{}{} + } + for model := range modelCachedTokens { + modelSet[model] = struct{}{} + } + for model := range modelTotalTokens { + modelSet[model] = struct{}{} + } + for model := range modelRequests { + modelSet[model] = struct{}{} + } + for model := range modelByokRequests { + modelSet[model] = struct{}{} + } + + for model := range modelSet { + safe := sanitizeName(model) + prefix := "model_" + safe + rec := core.ModelUsageRecord{RawModelID: model, RawSource: "api", Window: "activity"} + + if v := modelCost[model]; v > 0 { + snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} + rec.CostUSD = core.Float64Ptr(v) + } + if v := modelByokCost[model]; v > 0 { + snap.Metrics[prefix+"_byok_cost"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} + } + if v := modelInputTokens[model]; v > 0 { + snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + rec.InputTokens = core.Float64Ptr(v) + } + if v := modelOutputTokens[model]; v > 0 { + snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + rec.OutputTokens = core.Float64Ptr(v) + } + if v := modelReasoningTokens[model]; v > 0 { + snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + rec.ReasoningTokens = core.Float64Ptr(v) + } + if v := modelCachedTokens[model]; v > 0 { + snap.Metrics[prefix+"_cached_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + rec.CachedTokens = core.Float64Ptr(v) + } + if v := modelTotalTokens[model]; v > 0 { + snap.Metrics[prefix+"_total_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + rec.TotalTokens = core.Float64Ptr(v) + } + if v := modelRequests[model]; v > 0 { + snap.Metrics[prefix+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "activity"} + snap.Raw[prefix+"_requests"] = fmt.Sprintf("%.0f", v) + rec.Requests = core.Float64Ptr(v) + } + if v := modelByokRequests[model]; v > 0 { + snap.Metrics[prefix+"_byok_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "activity"} + } + if rec.InputTokens != nil || rec.OutputTokens != nil || rec.CostUSD != nil || rec.Requests != nil || rec.ReasoningTokens != nil || rec.CachedTokens != nil || rec.TotalTokens != nil { + snap.AppendModelUsage(rec) + } + } +} + +func filterRouterClientProviders(maps ...map[string]float64) { + for _, metrics := range maps { + for name := range metrics { + if isLikelyRouterClientProviderName(name) { + delete(metrics, name) + } + } + } +} + +func emitAnalyticsPerProviderMetrics( + snap *core.UsageSnapshot, + providerCost, providerByokCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests map[string]float64, +) { + providerSet := make(map[string]struct{}) + for provider := range providerCost { + providerSet[provider] = struct{}{} + } + for provider := range providerByokCost { + providerSet[provider] = struct{}{} + } + for provider := range providerInputTokens { + providerSet[provider] = struct{}{} + } + for provider := range providerOutputTokens { + providerSet[provider] = struct{}{} + } + for provider := range providerReasoningTokens { + providerSet[provider] = struct{}{} + } + for provider := range providerRequests { + providerSet[provider] = struct{}{} + } + + for provider := range providerSet { + prefix := "provider_" + sanitizeName(strings.ToLower(provider)) + if v := providerCost[provider]; v > 0 { + snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} + } + if v := providerByokCost[provider]; v > 0 { + snap.Metrics[prefix+"_byok_cost"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} + } + if v := providerInputTokens[provider]; v > 0 { + snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if v := providerOutputTokens[provider]; v > 0 { + snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if v := providerReasoningTokens[provider]; v > 0 { + snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if v := providerRequests[provider]; v > 0 { + snap.Metrics[prefix+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "activity"} + } + + snap.Raw[prefix+"_requests"] = fmt.Sprintf("%.0f", providerRequests[provider]) + snap.Raw[prefix+"_cost"] = fmt.Sprintf("$%.6f", providerCost[provider]) + if providerByokCost[provider] > 0 { + snap.Raw[prefix+"_byok_cost"] = fmt.Sprintf("$%.6f", providerByokCost[provider]) + } + snap.Raw[prefix+"_prompt_tokens"] = fmt.Sprintf("%.0f", providerInputTokens[provider]) + snap.Raw[prefix+"_completion_tokens"] = fmt.Sprintf("%.0f", providerOutputTokens[provider]) + if providerReasoningTokens[provider] > 0 { + snap.Raw[prefix+"_reasoning_tokens"] = fmt.Sprintf("%.0f", providerReasoningTokens[provider]) + } + } +} + +func emitUpstreamProviderMetrics( + snap *core.UsageSnapshot, + providerCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests map[string]float64, +) { + providerSet := make(map[string]struct{}) + for provider := range providerCost { + providerSet[provider] = struct{}{} + } + for provider := range providerInputTokens { + providerSet[provider] = struct{}{} + } + for provider := range providerOutputTokens { + providerSet[provider] = struct{}{} + } + for provider := range providerReasoningTokens { + providerSet[provider] = struct{}{} + } + for provider := range providerRequests { + providerSet[provider] = struct{}{} + } + + for provider := range providerSet { + prefix := "upstream_" + sanitizeName(strings.ToLower(provider)) + if v := providerCost[provider]; v > 0 { + snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} + } + if v := providerInputTokens[provider]; v > 0 { + snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if v := providerOutputTokens[provider]; v > 0 { + snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if v := providerReasoningTokens[provider]; v > 0 { + snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if v := providerRequests[provider]; v > 0 { + snap.Metrics[prefix+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "activity"} + } + } +} + +func emitAnalyticsEndpointMetrics(snap *core.UsageSnapshot, endpointStatsMap map[string]*endpointStats) { + type endpointEntry struct { + id string + stats *endpointStats + } + + var entries []endpointEntry + for id, stats := range endpointStatsMap { + if id == "unknown" { + continue + } + entries = append(entries, endpointEntry{id: id, stats: stats}) + } + sort.Slice(entries, func(i, j int) bool { + if entries[i].stats.TotalCost != entries[j].stats.TotalCost { + return entries[i].stats.TotalCost > entries[j].stats.TotalCost + } + return entries[i].stats.Requests > entries[j].stats.Requests + }) + + const maxEndpointMetrics = 8 + if len(entries) > maxEndpointMetrics { + entries = entries[:maxEndpointMetrics] + } + for _, entry := range entries { + prefix := "endpoint_" + sanitizeName(entry.id) + + if req := float64(entry.stats.Requests); req > 0 { + snap.Metrics[prefix+"_requests"] = core.Metric{Used: &req, Unit: "requests", Window: "activity"} + } + if entry.stats.TotalCost > 0 { + v := entry.stats.TotalCost + snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} + } + if entry.stats.ByokCost > 0 { + v := entry.stats.ByokCost + snap.Metrics[prefix+"_byok_cost"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} + } + if entry.stats.PromptTokens > 0 { + v := float64(entry.stats.PromptTokens) + snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if entry.stats.CompletionTokens > 0 { + v := float64(entry.stats.CompletionTokens) + snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if entry.stats.ReasoningTokens > 0 { + v := float64(entry.stats.ReasoningTokens) + snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} + } + if entry.stats.Provider != "" { + snap.Raw[prefix+"_provider"] = entry.stats.Provider + } + if entry.stats.Model != "" { + snap.Raw[prefix+"_model"] = entry.stats.Model + } + } +} + +func mapToSortedTimePoints(m map[string]float64) []core.TimePoint { + points := make([]core.TimePoint, 0, len(m)) + for date, val := range m { + points = append(points, core.TimePoint{Date: date, Value: val}) + } + sort.Slice(points, func(i, j int) bool { + return points[i].Date < points[j].Date + }) + return points +} + +func parseAPIErrorMessage(body []byte) string { + var apiErr apiErrorResponse + if err := json.Unmarshal(body, &apiErr); err != nil { + return "" + } + return strings.TrimSpace(apiErr.Error.Message) +} diff --git a/internal/providers/openrouter/openrouter.go b/internal/providers/openrouter/openrouter.go index adc80ac..8f652b5 100644 --- a/internal/providers/openrouter/openrouter.go +++ b/internal/providers/openrouter/openrouter.go @@ -6,10 +6,7 @@ import ( "errors" "fmt" "io" - "math" "net/http" - "sort" - "strings" "time" "github.com/janekbaraniewski/openusage/internal/core" @@ -670,1374 +667,3 @@ func (p *Provider) fetchKeysMeta(ctx context.Context, baseURL, apiKey string, sn return nil } - -func (p *Provider) fetchAnalytics(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { - var analytics analyticsResponse - var activityEndpoint string - var activityCachedAt string - forbiddenMsg := "" - yesterdayUTC := p.now().UTC().AddDate(0, 0, -1).Format("2006-01-02") - - for _, endpoint := range []string{ - "/activity", - "/activity?date=" + yesterdayUTC, - "/analytics/user-activity", - // Internal endpoint is web-dashboard oriented and may require session auth; - // keep it as a last-resort fallback only. - "/api/internal/v1/transaction-analytics?window=1mo", - } { - url := analyticsEndpointURL(baseURL, endpoint) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return err - } - req.Header.Set("Authorization", "Bearer "+apiKey) - req.Header.Set("Accept", "application/json") - req.Header.Set("Cache-Control", "no-cache, no-store, max-age=0") - req.Header.Set("Pragma", "no-cache") - - resp, err := p.Client().Do(req) - if err != nil { - return err - } - - body, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return err - } - - if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusNotFound { - if endpoint == "/activity" && resp.StatusCode == http.StatusForbidden { - msg := parseAPIErrorMessage(body) - if msg == "" { - msg = "activity endpoint requires management key" - } - forbiddenMsg = msg - } - continue - } - if resp.StatusCode != http.StatusOK { - continue - } - - parsed, cachedAt, ok, err := parseAnalyticsBody(body) - if err != nil { - continue - } - if !ok { - continue - } - analytics = parsed - activityEndpoint = endpoint - activityCachedAt = cachedAt - break - } - - if activityEndpoint == "" { - if forbiddenMsg != "" { - return fmt.Errorf("%s (HTTP 403)", forbiddenMsg) - } - return fmt.Errorf("analytics endpoint not available (HTTP 404)") - } - - snap.Raw["activity_endpoint"] = activityEndpoint - if activityCachedAt != "" { - snap.Raw["activity_cached_at"] = activityCachedAt - } - - costByDate := make(map[string]float64) - tokensByDate := make(map[string]float64) - requestsByDate := make(map[string]float64) - byokCostByDate := make(map[string]float64) - reasoningTokensByDate := make(map[string]float64) - cachedTokensByDate := make(map[string]float64) - providerTokensByDate := make(map[string]map[string]float64) - providerRequestsByDate := make(map[string]map[string]float64) - modelCost := make(map[string]float64) - modelByokCost := make(map[string]float64) - modelInputTokens := make(map[string]float64) - modelOutputTokens := make(map[string]float64) - modelReasoningTokens := make(map[string]float64) - modelCachedTokens := make(map[string]float64) - modelTotalTokens := make(map[string]float64) - modelRequests := make(map[string]float64) - modelByokRequests := make(map[string]float64) - providerCost := make(map[string]float64) - providerByokCost := make(map[string]float64) - providerInputTokens := make(map[string]float64) - providerOutputTokens := make(map[string]float64) - providerReasoningTokens := make(map[string]float64) - providerRequests := make(map[string]float64) - endpointStatsMap := make(map[string]*endpointStats) - models := make(map[string]struct{}) - providers := make(map[string]struct{}) - endpoints := make(map[string]struct{}) - activeDays := make(map[string]struct{}) - - now := p.now().UTC() - todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC) - sevenDaysAgo := now.AddDate(0, 0, -7) - thirtyDaysAgo := now.AddDate(0, 0, -30) - - var totalCost, totalByok, totalRequests float64 - var totalInput, totalOutput, totalReasoning, totalCached, totalTokens float64 - var cost7d, byok7d, requests7d float64 - var input7d, output7d, reasoning7d, cached7d, tokens7d float64 - var todayByok, cost7dByok, cost30dByok float64 - var minDate, maxDate string - - for _, entry := range analytics.Data { - if entry.Date == "" { - continue - } - date, entryDate, hasParsedDate := normalizeActivityDate(entry.Date) - - cost := entry.Usage - if cost == 0 { - cost = entry.TotalCost - } - tokens := float64(entry.TotalTokens) - if tokens == 0 { - tokens = float64(entry.PromptTokens + entry.CompletionTokens + entry.ReasoningTokens) - } - inputTokens := float64(entry.PromptTokens) - outputTokens := float64(entry.CompletionTokens) - requests := float64(entry.Requests) - byokCost := entry.ByokUsageInference - byokRequests := float64(entry.ByokRequests) - reasoningTokens := float64(entry.ReasoningTokens) - cachedTokens := float64(entry.CachedTokens) - modelName := normalizeModelName(entry.Model) - if modelName == "" { - modelName = normalizeModelName(entry.ModelPermaslug) - } - if modelName == "" { - modelName = "unknown" - } - providerName := entry.ProviderName - if providerName == "" { - providerName = "unknown" - } - endpointID := strings.TrimSpace(entry.EndpointID) - if endpointID == "" { - endpointID = "unknown" - } - - costByDate[date] += cost - tokensByDate[date] += tokens - requestsByDate[date] += requests - byokCostByDate[date] += byokCost - reasoningTokensByDate[date] += reasoningTokens - cachedTokensByDate[date] += cachedTokens - modelCost[modelName] += cost - modelByokCost[modelName] += byokCost - modelInputTokens[modelName] += inputTokens - modelOutputTokens[modelName] += outputTokens - modelReasoningTokens[modelName] += reasoningTokens - modelCachedTokens[modelName] += cachedTokens - modelTotalTokens[modelName] += tokens - modelRequests[modelName] += requests - modelByokRequests[modelName] += byokRequests - providerCost[providerName] += cost - providerByokCost[providerName] += byokCost - providerInputTokens[providerName] += inputTokens - providerOutputTokens[providerName] += outputTokens - providerReasoningTokens[providerName] += reasoningTokens - providerRequests[providerName] += requests - providerClientKey := sanitizeName(strings.ToLower(providerName)) - if providerTokensByDate[providerClientKey] == nil { - providerTokensByDate[providerClientKey] = make(map[string]float64) - } - providerTokensByDate[providerClientKey][date] += inputTokens + outputTokens + reasoningTokens - if providerRequestsByDate[providerClientKey] == nil { - providerRequestsByDate[providerClientKey] = make(map[string]float64) - } - providerRequestsByDate[providerClientKey][date] += requests - - es, ok := endpointStatsMap[endpointID] - if !ok { - es = &endpointStats{Model: modelName, Provider: providerName} - endpointStatsMap[endpointID] = es - } - es.Requests += entry.Requests - es.TotalCost += cost - es.ByokCost += byokCost - es.PromptTokens += entry.PromptTokens - es.CompletionTokens += entry.CompletionTokens - es.ReasoningTokens += entry.ReasoningTokens - - models[modelName] = struct{}{} - providers[providerName] = struct{}{} - if endpointID != "unknown" { - endpoints[endpointID] = struct{}{} - } - activeDays[date] = struct{}{} - - if minDate == "" || date < minDate { - minDate = date - } - if maxDate == "" || date > maxDate { - maxDate = date - } - - totalCost += cost - totalByok += byokCost - totalRequests += requests - totalInput += inputTokens - totalOutput += outputTokens - totalReasoning += reasoningTokens - totalCached += cachedTokens - totalTokens += tokens - - if !hasParsedDate { - continue - } - - if !entryDate.Before(todayStart) { - todayByok += byokCost - } - if entryDate.After(sevenDaysAgo) { - cost7dByok += byokCost - } - if entryDate.After(thirtyDaysAgo) { - cost30dByok += byokCost - } - if entryDate.After(sevenDaysAgo) { - cost7d += cost - byok7d += byokCost - requests7d += requests - input7d += inputTokens - output7d += outputTokens - reasoning7d += reasoningTokens - cached7d += cachedTokens - tokens7d += tokens - } - } - - snap.Raw["activity_rows"] = fmt.Sprintf("%d", len(analytics.Data)) - if minDate != "" && maxDate != "" { - snap.Raw["activity_date_range"] = minDate + " .. " + maxDate - } - if minDate != "" { - snap.Raw["activity_min_date"] = minDate - } - if maxDate != "" { - snap.Raw["activity_max_date"] = maxDate - } - if len(models) > 0 { - snap.Raw["activity_models"] = fmt.Sprintf("%d", len(models)) - } - if len(providers) > 0 { - snap.Raw["activity_providers"] = fmt.Sprintf("%d", len(providers)) - } - if len(endpoints) > 0 { - snap.Raw["activity_endpoints"] = fmt.Sprintf("%d", len(endpoints)) - } - if len(activeDays) > 0 { - snap.Raw["activity_days"] = fmt.Sprintf("%d", len(activeDays)) - } - - if len(costByDate) > 0 { - snap.DailySeries["analytics_cost"] = mapToSortedTimePoints(costByDate) - } - if len(tokensByDate) > 0 { - snap.DailySeries["analytics_tokens"] = mapToSortedTimePoints(tokensByDate) - } - if len(requestsByDate) > 0 { - snap.DailySeries["analytics_requests"] = mapToSortedTimePoints(requestsByDate) - } - if len(byokCostByDate) > 0 { - snap.DailySeries["analytics_byok_cost"] = mapToSortedTimePoints(byokCostByDate) - } - if len(reasoningTokensByDate) > 0 { - snap.DailySeries["analytics_reasoning_tokens"] = mapToSortedTimePoints(reasoningTokensByDate) - } - if len(cachedTokensByDate) > 0 { - snap.DailySeries["analytics_cached_tokens"] = mapToSortedTimePoints(cachedTokensByDate) - } - - if totalCost > 0 { - snap.Metrics["analytics_30d_cost"] = core.Metric{Used: &totalCost, Unit: "USD", Window: "30d"} - } - if totalByok > 0 { - snap.Metrics["analytics_30d_byok_cost"] = core.Metric{Used: &totalByok, Unit: "USD", Window: "30d"} - snap.Raw["byok_in_use"] = "true" - } - if totalRequests > 0 { - snap.Metrics["analytics_30d_requests"] = core.Metric{Used: &totalRequests, Unit: "requests", Window: "30d"} - } - if totalInput > 0 { - snap.Metrics["analytics_30d_input_tokens"] = core.Metric{Used: &totalInput, Unit: "tokens", Window: "30d"} - } - if totalOutput > 0 { - snap.Metrics["analytics_30d_output_tokens"] = core.Metric{Used: &totalOutput, Unit: "tokens", Window: "30d"} - } - if totalReasoning > 0 { - snap.Metrics["analytics_30d_reasoning_tokens"] = core.Metric{Used: &totalReasoning, Unit: "tokens", Window: "30d"} - } - if totalCached > 0 { - snap.Metrics["analytics_30d_cached_tokens"] = core.Metric{Used: &totalCached, Unit: "tokens", Window: "30d"} - } - if totalTokens > 0 { - snap.Metrics["analytics_30d_tokens"] = core.Metric{Used: &totalTokens, Unit: "tokens", Window: "30d"} - } - - if cost7d > 0 { - snap.Metrics["analytics_7d_cost"] = core.Metric{Used: &cost7d, Unit: "USD", Window: "7d"} - } - if byok7d > 0 { - snap.Metrics["analytics_7d_byok_cost"] = core.Metric{Used: &byok7d, Unit: "USD", Window: "7d"} - snap.Raw["byok_in_use"] = "true" - } - if requests7d > 0 { - snap.Metrics["analytics_7d_requests"] = core.Metric{Used: &requests7d, Unit: "requests", Window: "7d"} - } - if input7d > 0 { - snap.Metrics["analytics_7d_input_tokens"] = core.Metric{Used: &input7d, Unit: "tokens", Window: "7d"} - } - if output7d > 0 { - snap.Metrics["analytics_7d_output_tokens"] = core.Metric{Used: &output7d, Unit: "tokens", Window: "7d"} - } - if reasoning7d > 0 { - snap.Metrics["analytics_7d_reasoning_tokens"] = core.Metric{Used: &reasoning7d, Unit: "tokens", Window: "7d"} - } - if cached7d > 0 { - snap.Metrics["analytics_7d_cached_tokens"] = core.Metric{Used: &cached7d, Unit: "tokens", Window: "7d"} - } - if tokens7d > 0 { - snap.Metrics["analytics_7d_tokens"] = core.Metric{Used: &tokens7d, Unit: "tokens", Window: "7d"} - } - - if days := len(activeDays); days > 0 { - v := float64(days) - snap.Metrics["analytics_active_days"] = core.Metric{Used: &v, Unit: "days", Window: "30d"} - } - if count := len(models); count > 0 { - v := float64(count) - snap.Metrics["analytics_models"] = core.Metric{Used: &v, Unit: "models", Window: "30d"} - } - if count := len(providers); count > 0 { - v := float64(count) - snap.Metrics["analytics_providers"] = core.Metric{Used: &v, Unit: "providers", Window: "30d"} - } - if count := len(endpoints); count > 0 { - v := float64(count) - snap.Metrics["analytics_endpoints"] = core.Metric{Used: &v, Unit: "endpoints", Window: "30d"} - } - - emitAnalyticsPerModelMetrics(snap, modelCost, modelByokCost, modelInputTokens, modelOutputTokens, modelReasoningTokens, modelCachedTokens, modelTotalTokens, modelRequests, modelByokRequests) - filterRouterClientProviders(providerCost, providerByokCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests) - emitAnalyticsPerProviderMetrics(snap, providerCost, providerByokCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests) - emitUpstreamProviderMetrics(snap, providerCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests) - emitAnalyticsEndpointMetrics(snap, endpointStatsMap) - for name := range providerTokensByDate { - if isLikelyRouterClientProviderName(name) { - delete(providerTokensByDate, name) - } - } - for name := range providerRequestsByDate { - if isLikelyRouterClientProviderName(name) { - delete(providerRequestsByDate, name) - } - } - emitClientDailySeries(snap, providerTokensByDate, providerRequestsByDate) - emitModelDerivedToolUsageMetrics(snap, modelRequests, "30d inferred", "inferred_from_model_requests") - - if todayByok > 0 { - snap.Metrics["today_byok_cost"] = core.Metric{Used: &todayByok, Unit: "USD", Window: "1d"} - snap.Raw["byok_in_use"] = "true" - } - if cost7dByok > 0 { - snap.Metrics["7d_byok_cost"] = core.Metric{Used: &cost7dByok, Unit: "USD", Window: "7d"} - snap.Raw["byok_in_use"] = "true" - } - if cost30dByok > 0 { - snap.Metrics["30d_byok_cost"] = core.Metric{Used: &cost30dByok, Unit: "USD", Window: "30d"} - snap.Raw["byok_in_use"] = "true" - } - - return nil -} - -func analyticsEndpointURL(baseURL, endpoint string) string { - base := strings.TrimRight(baseURL, "/") - if strings.HasPrefix(endpoint, "/api/internal/") { - if strings.HasSuffix(base, "/api/v1") { - base = strings.TrimSuffix(base, "/api/v1") - } - } - return base + endpoint -} - -func parseAnalyticsBody(body []byte) (analyticsResponse, string, bool, error) { - var direct analyticsResponse - if err := json.Unmarshal(body, &direct); err == nil && direct.Data != nil { - return direct, "", true, nil - } - - var wrapped analyticsEnvelopeResponse - if err := json.Unmarshal(body, &wrapped); err == nil && wrapped.Data.Data != nil { - return analyticsResponse{Data: wrapped.Data.Data}, parseAnalyticsCachedAt(wrapped.Data.CachedAt), true, nil - } - - return analyticsResponse{}, "", false, fmt.Errorf("unrecognized analytics payload") -} - -func parseAnalyticsCachedAt(raw json.RawMessage) string { - s := strings.TrimSpace(string(raw)) - if s == "" || s == "null" { - return "" - } - - var str string - if err := json.Unmarshal(raw, &str); err == nil { - return strings.TrimSpace(str) - } - - var n float64 - if err := json.Unmarshal(raw, &n); err != nil { - return s - } - - sec := int64(n) - // treat large numeric values as milliseconds since epoch - if sec > 1_000_000_000_000 { - sec = sec / 1000 - } - if sec <= 0 { - return fmt.Sprintf("%.0f", n) - } - return time.Unix(sec, 0).UTC().Format(time.RFC3339) -} - -func normalizeActivityDate(raw string) (string, time.Time, bool) { - raw = strings.TrimSpace(raw) - for _, layout := range []string{ - time.RFC3339Nano, - time.RFC3339, - "2006-01-02 15:04:05", - "2006-01-02", - } { - if t, err := time.Parse(layout, raw); err == nil { - date := t.UTC().Format("2006-01-02") - return date, t.UTC(), true - } - } - if len(raw) >= 10 && raw[4] == '-' && raw[7] == '-' { - date := raw[:10] - if t, err := time.Parse("2006-01-02", date); err == nil { - return date, t.UTC(), true - } - return date, time.Time{}, false - } - return raw, time.Time{}, false -} - -func emitAnalyticsPerModelMetrics( - snap *core.UsageSnapshot, - modelCost, modelByokCost, modelInputTokens, modelOutputTokens, modelReasoningTokens, modelCachedTokens, modelTotalTokens, modelRequests, modelByokRequests map[string]float64, -) { - modelSet := make(map[string]struct{}) - for model := range modelCost { - modelSet[model] = struct{}{} - } - for model := range modelByokCost { - modelSet[model] = struct{}{} - } - for model := range modelInputTokens { - modelSet[model] = struct{}{} - } - for model := range modelOutputTokens { - modelSet[model] = struct{}{} - } - for model := range modelReasoningTokens { - modelSet[model] = struct{}{} - } - for model := range modelCachedTokens { - modelSet[model] = struct{}{} - } - for model := range modelTotalTokens { - modelSet[model] = struct{}{} - } - for model := range modelRequests { - modelSet[model] = struct{}{} - } - for model := range modelByokRequests { - modelSet[model] = struct{}{} - } - - for model := range modelSet { - safe := sanitizeName(model) - prefix := "model_" + safe - rec := core.ModelUsageRecord{ - RawModelID: model, - RawSource: "api", - Window: "activity", - } - - if v := modelCost[model]; v > 0 { - snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} - rec.CostUSD = core.Float64Ptr(v) - } - if v := modelByokCost[model]; v > 0 { - snap.Metrics[prefix+"_byok_cost"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} - } - if v := modelInputTokens[model]; v > 0 { - snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - rec.InputTokens = core.Float64Ptr(v) - } - if v := modelOutputTokens[model]; v > 0 { - snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - rec.OutputTokens = core.Float64Ptr(v) - } - if v := modelReasoningTokens[model]; v > 0 { - snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - rec.ReasoningTokens = core.Float64Ptr(v) - } - if v := modelCachedTokens[model]; v > 0 { - snap.Metrics[prefix+"_cached_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - rec.CachedTokens = core.Float64Ptr(v) - } - if v := modelTotalTokens[model]; v > 0 { - snap.Metrics[prefix+"_total_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - rec.TotalTokens = core.Float64Ptr(v) - } - if v := modelRequests[model]; v > 0 { - snap.Metrics[prefix+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "activity"} - snap.Raw[prefix+"_requests"] = fmt.Sprintf("%.0f", v) - rec.Requests = core.Float64Ptr(v) - } - if v := modelByokRequests[model]; v > 0 { - snap.Metrics[prefix+"_byok_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "activity"} - } - if rec.InputTokens != nil || rec.OutputTokens != nil || rec.CostUSD != nil || rec.Requests != nil || rec.ReasoningTokens != nil || rec.CachedTokens != nil || rec.TotalTokens != nil { - snap.AppendModelUsage(rec) - } - } -} - -// filterRouterClientProviders removes entries keyed by router/client app names -// (e.g. "Openusage", "OpenRouter") from analytics provider maps. The /activity -// endpoint sometimes returns the app/key name instead of the actual upstream -// hosting provider. Removing these avoids polluting the "Providers" breakdown -// with client names; real hosting provider data comes from /generations. -func filterRouterClientProviders(maps ...map[string]float64) { - for _, m := range maps { - for name := range m { - if isLikelyRouterClientProviderName(name) { - delete(m, name) - } - } - } -} - -func emitAnalyticsPerProviderMetrics( - snap *core.UsageSnapshot, - providerCost, providerByokCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests map[string]float64, -) { - providerSet := make(map[string]struct{}) - for provider := range providerCost { - providerSet[provider] = struct{}{} - } - for provider := range providerByokCost { - providerSet[provider] = struct{}{} - } - for provider := range providerInputTokens { - providerSet[provider] = struct{}{} - } - for provider := range providerOutputTokens { - providerSet[provider] = struct{}{} - } - for provider := range providerReasoningTokens { - providerSet[provider] = struct{}{} - } - for provider := range providerRequests { - providerSet[provider] = struct{}{} - } - - for provider := range providerSet { - prefix := "provider_" + sanitizeName(strings.ToLower(provider)) - if v := providerCost[provider]; v > 0 { - snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} - } - if v := providerByokCost[provider]; v > 0 { - snap.Metrics[prefix+"_byok_cost"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} - } - if v := providerInputTokens[provider]; v > 0 { - snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - if v := providerOutputTokens[provider]; v > 0 { - snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - if v := providerReasoningTokens[provider]; v > 0 { - snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - if v := providerRequests[provider]; v > 0 { - snap.Metrics[prefix+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "activity"} - } - - snap.Raw[prefix+"_requests"] = fmt.Sprintf("%.0f", providerRequests[provider]) - snap.Raw[prefix+"_cost"] = fmt.Sprintf("$%.6f", providerCost[provider]) - if providerByokCost[provider] > 0 { - snap.Raw[prefix+"_byok_cost"] = fmt.Sprintf("$%.6f", providerByokCost[provider]) - } - snap.Raw[prefix+"_prompt_tokens"] = fmt.Sprintf("%.0f", providerInputTokens[provider]) - snap.Raw[prefix+"_completion_tokens"] = fmt.Sprintf("%.0f", providerOutputTokens[provider]) - if providerReasoningTokens[provider] > 0 { - snap.Raw[prefix+"_reasoning_tokens"] = fmt.Sprintf("%.0f", providerReasoningTokens[provider]) - } - } -} - -func emitUpstreamProviderMetrics( - snap *core.UsageSnapshot, - providerCost, providerInputTokens, providerOutputTokens, providerReasoningTokens, providerRequests map[string]float64, -) { - providerSet := make(map[string]struct{}) - for p := range providerCost { - providerSet[p] = struct{}{} - } - for p := range providerInputTokens { - providerSet[p] = struct{}{} - } - for p := range providerOutputTokens { - providerSet[p] = struct{}{} - } - for p := range providerReasoningTokens { - providerSet[p] = struct{}{} - } - for p := range providerRequests { - providerSet[p] = struct{}{} - } - - for provider := range providerSet { - prefix := "upstream_" + sanitizeName(strings.ToLower(provider)) - if v := providerCost[provider]; v > 0 { - snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} - } - if v := providerInputTokens[provider]; v > 0 { - snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - if v := providerOutputTokens[provider]; v > 0 { - snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - if v := providerReasoningTokens[provider]; v > 0 { - snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - if v := providerRequests[provider]; v > 0 { - snap.Metrics[prefix+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "activity"} - } - } -} - -func emitAnalyticsEndpointMetrics(snap *core.UsageSnapshot, endpointStatsMap map[string]*endpointStats) { - type endpointEntry struct { - id string - stats *endpointStats - } - - var entries []endpointEntry - for id, stats := range endpointStatsMap { - if id == "unknown" { - continue - } - entries = append(entries, endpointEntry{id: id, stats: stats}) - } - sort.Slice(entries, func(i, j int) bool { - if entries[i].stats.TotalCost != entries[j].stats.TotalCost { - return entries[i].stats.TotalCost > entries[j].stats.TotalCost - } - return entries[i].stats.Requests > entries[j].stats.Requests - }) - - const maxEndpointMetrics = 8 - limit := maxEndpointMetrics - if len(entries) < limit { - limit = len(entries) - } - for _, entry := range entries[:limit] { - safe := sanitizeName(entry.id) - prefix := "endpoint_" + safe - - req := float64(entry.stats.Requests) - if req > 0 { - snap.Metrics[prefix+"_requests"] = core.Metric{Used: &req, Unit: "requests", Window: "activity"} - } - if entry.stats.TotalCost > 0 { - v := entry.stats.TotalCost - snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} - } - if entry.stats.ByokCost > 0 { - v := entry.stats.ByokCost - snap.Metrics[prefix+"_byok_cost"] = core.Metric{Used: &v, Unit: "USD", Window: "activity"} - } - if entry.stats.PromptTokens > 0 { - v := float64(entry.stats.PromptTokens) - snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - if entry.stats.CompletionTokens > 0 { - v := float64(entry.stats.CompletionTokens) - snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - if entry.stats.ReasoningTokens > 0 { - v := float64(entry.stats.ReasoningTokens) - snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "activity"} - } - - if entry.stats.Provider != "" { - snap.Raw[prefix+"_provider"] = entry.stats.Provider - } - if entry.stats.Model != "" { - snap.Raw[prefix+"_model"] = entry.stats.Model - } - } -} - -func mapToSortedTimePoints(m map[string]float64) []core.TimePoint { - points := make([]core.TimePoint, 0, len(m)) - for date, val := range m { - points = append(points, core.TimePoint{Date: date, Value: val}) - } - sort.Slice(points, func(i, j int) bool { - return points[i].Date < points[j].Date - }) - return points -} - -func parseAPIErrorMessage(body []byte) string { - var apiErr apiErrorResponse - if err := json.Unmarshal(body, &apiErr); err != nil { - return "" - } - return strings.TrimSpace(apiErr.Error.Message) -} - -func emitPerModelMetrics(modelStatsMap map[string]*modelStats, snap *core.UsageSnapshot) { - type entry struct { - name string - stats *modelStats - } - sorted := make([]entry, 0, len(modelStatsMap)) - for name, stats := range modelStatsMap { - sorted = append(sorted, entry{name, stats}) - } - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].stats.TotalCost > sorted[j].stats.TotalCost - }) - - for _, e := range sorted { - safeName := sanitizeName(e.name) - prefix := "model_" + safeName - rec := core.ModelUsageRecord{ - RawModelID: e.name, - RawSource: "api", - Window: "30d", - } - - inputTokens := float64(e.stats.PromptTokens) - snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &inputTokens, Unit: "tokens", Window: "30d"} - rec.InputTokens = core.Float64Ptr(inputTokens) - - outputTokens := float64(e.stats.CompletionTokens) - snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &outputTokens, Unit: "tokens", Window: "30d"} - rec.OutputTokens = core.Float64Ptr(outputTokens) - - if e.stats.ReasoningTokens > 0 { - reasoningTokens := float64(e.stats.ReasoningTokens) - snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &reasoningTokens, Unit: "tokens", Window: "30d"} - rec.ReasoningTokens = core.Float64Ptr(reasoningTokens) - } - if e.stats.CachedTokens > 0 { - cachedTokens := float64(e.stats.CachedTokens) - snap.Metrics[prefix+"_cached_tokens"] = core.Metric{Used: &cachedTokens, Unit: "tokens", Window: "30d"} - rec.CachedTokens = core.Float64Ptr(cachedTokens) - } - totalTokens := float64(e.stats.PromptTokens + e.stats.CompletionTokens + e.stats.ReasoningTokens + e.stats.CachedTokens) - if totalTokens > 0 { - snap.Metrics[prefix+"_total_tokens"] = core.Metric{Used: &totalTokens, Unit: "tokens", Window: "30d"} - rec.TotalTokens = core.Float64Ptr(totalTokens) - } - if e.stats.ImageTokens > 0 { - imageTokens := float64(e.stats.ImageTokens) - snap.Metrics[prefix+"_image_tokens"] = core.Metric{Used: &imageTokens, Unit: "tokens", Window: "30d"} - } - - costUSD := e.stats.TotalCost - snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &costUSD, Unit: "USD", Window: "30d"} - rec.CostUSD = core.Float64Ptr(costUSD) - requests := float64(e.stats.Requests) - snap.Metrics[prefix+"_requests"] = core.Metric{Used: &requests, Unit: "requests", Window: "30d"} - rec.Requests = core.Float64Ptr(requests) - if e.stats.NativePrompt > 0 { - nativeInput := float64(e.stats.NativePrompt) - snap.Metrics[prefix+"_native_input_tokens"] = core.Metric{Used: &nativeInput, Unit: "tokens", Window: "30d"} - } - if e.stats.NativeCompletion > 0 { - nativeOutput := float64(e.stats.NativeCompletion) - snap.Metrics[prefix+"_native_output_tokens"] = core.Metric{Used: &nativeOutput, Unit: "tokens", Window: "30d"} - } - - snap.Raw[prefix+"_requests"] = fmt.Sprintf("%d", e.stats.Requests) - - if e.stats.LatencyCount > 0 { - avgMs := float64(e.stats.TotalLatencyMs) / float64(e.stats.LatencyCount) - snap.Raw[prefix+"_avg_latency_ms"] = fmt.Sprintf("%.0f", avgMs) - avgSeconds := avgMs / 1000.0 - snap.Metrics[prefix+"_avg_latency"] = core.Metric{Used: &avgSeconds, Unit: "seconds", Window: "30d"} - } - if e.stats.GenerationCount > 0 { - avgMs := float64(e.stats.TotalGenMs) / float64(e.stats.GenerationCount) - avgSeconds := avgMs / 1000.0 - snap.Metrics[prefix+"_avg_generation_time"] = core.Metric{Used: &avgSeconds, Unit: "seconds", Window: "30d"} - } - if e.stats.ModerationCount > 0 { - avgMs := float64(e.stats.TotalModeration) / float64(e.stats.ModerationCount) - avgSeconds := avgMs / 1000.0 - snap.Metrics[prefix+"_avg_moderation_latency"] = core.Metric{Used: &avgSeconds, Unit: "seconds", Window: "30d"} - } - - if e.stats.CacheDiscountUSD > 0 { - snap.Raw[prefix+"_cache_savings"] = fmt.Sprintf("$%.6f", e.stats.CacheDiscountUSD) - } - - if len(e.stats.Providers) > 0 { - var provList []string - for prov := range e.stats.Providers { - provList = append(provList, prov) - } - sort.Strings(provList) - snap.Raw[prefix+"_providers"] = strings.Join(provList, ", ") - if len(provList) > 0 { - rec.SetDimension("upstream_providers", strings.Join(provList, ",")) - } - } - if rec.InputTokens != nil || rec.OutputTokens != nil || rec.CostUSD != nil || rec.Requests != nil || rec.ReasoningTokens != nil || rec.CachedTokens != nil { - snap.AppendModelUsage(rec) - } - } -} - -func emitPerProviderMetrics(providerStatsMap map[string]*providerStats, snap *core.UsageSnapshot) { - type entry struct { - name string - stats *providerStats - } - sorted := make([]entry, 0, len(providerStatsMap)) - for name, stats := range providerStatsMap { - sorted = append(sorted, entry{name, stats}) - } - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].stats.TotalCost > sorted[j].stats.TotalCost - }) - - for _, e := range sorted { - prefix := "provider_" + sanitizeName(strings.ToLower(e.name)) - requests := float64(e.stats.Requests) - snap.Metrics[prefix+"_requests"] = core.Metric{Used: &requests, Unit: "requests", Window: "30d"} - if e.stats.TotalCost > 0 { - v := e.stats.TotalCost - snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "30d"} - } - if e.stats.ByokCost > 0 { - v := e.stats.ByokCost - snap.Metrics[prefix+"_byok_cost"] = core.Metric{Used: &v, Unit: "USD", Window: "30d"} - } - if e.stats.PromptTokens > 0 { - v := float64(e.stats.PromptTokens) - snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "30d"} - } - if e.stats.CompletionTokens > 0 { - v := float64(e.stats.CompletionTokens) - snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "30d"} - } - if e.stats.ReasoningTokens > 0 { - v := float64(e.stats.ReasoningTokens) - snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "30d"} - } - snap.Raw[prefix+"_requests"] = fmt.Sprintf("%d", e.stats.Requests) - snap.Raw[prefix+"_cost"] = fmt.Sprintf("$%.6f", e.stats.TotalCost) - if e.stats.ByokCost > 0 { - snap.Raw[prefix+"_byok_cost"] = fmt.Sprintf("$%.6f", e.stats.ByokCost) - } - snap.Raw[prefix+"_prompt_tokens"] = fmt.Sprintf("%d", e.stats.PromptTokens) - snap.Raw[prefix+"_completion_tokens"] = fmt.Sprintf("%d", e.stats.CompletionTokens) - if e.stats.ReasoningTokens > 0 { - snap.Raw[prefix+"_reasoning_tokens"] = fmt.Sprintf("%d", e.stats.ReasoningTokens) - } - } -} - -func emitClientDailySeries(snap *core.UsageSnapshot, tokensByClient, requestsByClient map[string]map[string]float64) { - if snap.DailySeries == nil { - snap.DailySeries = make(map[string][]core.TimePoint) - } - for client, byDate := range tokensByClient { - if client == "" || len(byDate) == 0 { - continue - } - snap.DailySeries["tokens_client_"+client] = mapToSortedTimePoints(byDate) - } - for client, byDate := range requestsByClient { - if client == "" || len(byDate) == 0 { - continue - } - snap.DailySeries["usage_client_"+client] = mapToSortedTimePoints(byDate) - } -} - -type providerClientAggregate struct { - InputTokens float64 - OutputTokens float64 - ReasoningTokens float64 - Requests float64 - CostUSD float64 - Window string -} - -type modelUsageCount struct { - name string - count float64 -} - -func enrichDashboardRepresentations(snap *core.UsageSnapshot) { - if snap == nil || len(snap.Metrics) == 0 { - return - } - synthesizeClientMetricsFromProviderMetrics(snap) - synthesizeLanguageMetricsFromModelRequests(snap) - synthesizeUsageSummaries(snap) -} - -func synthesizeClientMetricsFromProviderMetrics(snap *core.UsageSnapshot) { - byClient := make(map[string]*providerClientAggregate) - for key, metric := range snap.Metrics { - if metric.Used == nil { - continue - } - client, field, ok := parseProviderMetricKey(key) - if !ok || client == "" { - continue - } - agg, exists := byClient[client] - if !exists { - agg = &providerClientAggregate{} - byClient[client] = agg - } - if agg.Window == "" && metric.Window != "" { - agg.Window = metric.Window - } - switch field { - case "input_tokens": - agg.InputTokens = *metric.Used - case "output_tokens": - agg.OutputTokens = *metric.Used - case "reasoning_tokens": - agg.ReasoningTokens = *metric.Used - case "requests": - agg.Requests = *metric.Used - case "cost_usd": - agg.CostUSD = *metric.Used - } - } - - for client, agg := range byClient { - window := strings.TrimSpace(agg.Window) - if window == "" { - window = "30d" - } - clientPrefix := "client_" + client - - if agg.InputTokens > 0 { - v := agg.InputTokens - snap.Metrics[clientPrefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: window} - } - if agg.OutputTokens > 0 { - v := agg.OutputTokens - snap.Metrics[clientPrefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: window} - } - if agg.ReasoningTokens > 0 { - v := agg.ReasoningTokens - snap.Metrics[clientPrefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: window} - } - totalTokens := agg.InputTokens + agg.OutputTokens + agg.ReasoningTokens - if totalTokens > 0 { - v := totalTokens - snap.Metrics[clientPrefix+"_total_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: window} - } - if agg.Requests > 0 { - v := agg.Requests - snap.Metrics[clientPrefix+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: window} - } - if agg.CostUSD > 0 { - v := agg.CostUSD - snap.Metrics[clientPrefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: window} - } - } -} - -func parseProviderMetricKey(key string) (name, field string, ok bool) { - const prefix = "provider_" - if !strings.HasPrefix(key, prefix) { - return "", "", false - } - rest := strings.TrimPrefix(key, prefix) - for _, suffix := range []string{ - "_input_tokens", - "_output_tokens", - "_reasoning_tokens", - "_requests", - "_cost_usd", - } { - if strings.HasSuffix(rest, suffix) { - return strings.TrimSuffix(rest, suffix), strings.TrimPrefix(suffix, "_"), true - } - } - return "", "", false -} - -func synthesizeLanguageMetricsFromModelRequests(snap *core.UsageSnapshot) { - byLanguage := make(map[string]float64) - window := "" - for key, metric := range snap.Metrics { - if metric.Used == nil { - continue - } - model, field, ok := parseModelMetricKey(key) - if !ok || field != "requests" { - continue - } - if window == "" && strings.TrimSpace(metric.Window) != "" { - window = strings.TrimSpace(metric.Window) - } - lang := inferModelWorkloadLanguage(model) - byLanguage[lang] += *metric.Used - } - if len(byLanguage) == 0 { - return - } - if window == "" { - window = "30d inferred" - } - for lang, count := range byLanguage { - if count <= 0 { - continue - } - v := count - snap.Metrics["lang_"+sanitizeName(lang)] = core.Metric{Used: &v, Unit: "requests", Window: window} - } - if summary := summarizeCountUsage(byLanguage, "req", 6); summary != "" { - snap.Raw["language_usage"] = summary - snap.Raw["language_usage_source"] = "inferred_from_model_ids" - } -} - -func parseModelMetricKey(key string) (name, field string, ok bool) { - const prefix = "model_" - if !strings.HasPrefix(key, prefix) { - return "", "", false - } - rest := strings.TrimPrefix(key, prefix) - for _, suffix := range []string{"_requests"} { - if strings.HasSuffix(rest, suffix) { - return strings.TrimSuffix(rest, suffix), strings.TrimPrefix(suffix, "_"), true - } - } - return "", "", false -} - -func inferModelWorkloadLanguage(model string) string { - model = strings.ToLower(strings.TrimSpace(model)) - if model == "" { - return "general" - } - switch { - case strings.Contains(model, "coder"), strings.Contains(model, "codestral"), strings.Contains(model, "devstral"), strings.Contains(model, "code"): - return "code" - case strings.Contains(model, "vision"), strings.Contains(model, "image"), strings.Contains(model, "multimodal"), strings.Contains(model, "omni"), strings.Contains(model, "vl"): - return "multimodal" - case strings.Contains(model, "audio"), strings.Contains(model, "speech"), strings.Contains(model, "voice"), strings.Contains(model, "whisper"), strings.Contains(model, "tts"), strings.Contains(model, "stt"): - return "audio" - case strings.Contains(model, "reason"), strings.Contains(model, "thinking"): - return "reasoning" - default: - return "general" - } -} - -func synthesizeUsageSummaries(snap *core.UsageSnapshot) { - modelTotals := make(map[string]float64) - modelWindow := "" - modelUnit := "tok" - for key, metric := range snap.Metrics { - if metric.Used == nil || !strings.HasPrefix(key, "model_") { - continue - } - switch { - case strings.HasSuffix(key, "_total_tokens"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_total_tokens") - modelTotals[name] = *metric.Used - if modelWindow == "" && strings.TrimSpace(metric.Window) != "" { - modelWindow = strings.TrimSpace(metric.Window) - } - case strings.HasSuffix(key, "_cost_usd"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cost_usd") - if _, ok := modelTotals[name]; !ok { - modelTotals[name] = *metric.Used - modelUnit = "usd" - if modelWindow == "" && strings.TrimSpace(metric.Window) != "" { - modelWindow = strings.TrimSpace(metric.Window) - } - } - } - } - if summary := summarizeShareUsage(modelTotals, 6); summary != "" { - snap.Raw["model_usage"] = summary - if modelWindow != "" { - snap.Raw["model_usage_window"] = modelWindow - } - snap.Raw["model_usage_unit"] = modelUnit - } - - clientTotals := make(map[string]float64) - for key, metric := range snap.Metrics { - if metric.Used == nil || !strings.HasPrefix(key, "client_") { - continue - } - switch { - case strings.HasSuffix(key, "_total_tokens"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "client_"), "_total_tokens") - clientTotals[name] = *metric.Used - case strings.HasSuffix(key, "_requests"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "client_"), "_requests") - if _, ok := clientTotals[name]; !ok { - clientTotals[name] = *metric.Used - } - } - } - if summary := summarizeShareUsage(clientTotals, 6); summary != "" { - snap.Raw["client_usage"] = summary - } -} - -func summarizeShareUsage(values map[string]float64, maxItems int) string { - type item struct { - name string - value float64 - } - list := make([]item, 0, len(values)) - total := 0.0 - for name, value := range values { - if value <= 0 { - continue - } - list = append(list, item{name: name, value: value}) - total += value - } - if len(list) == 0 || total <= 0 { - return "" - } - sort.Slice(list, func(i, j int) bool { - if list[i].value != list[j].value { - return list[i].value > list[j].value - } - return list[i].name < list[j].name - }) - if maxItems > 0 && len(list) > maxItems { - list = list[:maxItems] - } - parts := make([]string, 0, len(list)) - for _, entry := range list { - parts = append(parts, fmt.Sprintf("%s: %.0f%%", normalizeUsageLabel(entry.name), entry.value/total*100)) - } - return strings.Join(parts, ", ") -} - -func summarizeCountUsage(values map[string]float64, unit string, maxItems int) string { - type item struct { - name string - value float64 - } - list := make([]item, 0, len(values)) - for name, value := range values { - if value <= 0 { - continue - } - list = append(list, item{name: name, value: value}) - } - if len(list) == 0 { - return "" - } - sort.Slice(list, func(i, j int) bool { - if list[i].value != list[j].value { - return list[i].value > list[j].value - } - return list[i].name < list[j].name - }) - if maxItems > 0 && len(list) > maxItems { - list = list[:maxItems] - } - parts := make([]string, 0, len(list)) - for _, entry := range list { - parts = append(parts, fmt.Sprintf("%s: %.0f %s", normalizeUsageLabel(entry.name), entry.value, unit)) - } - return strings.Join(parts, ", ") -} - -func normalizeUsageLabel(name string) string { - name = strings.TrimSpace(name) - if name == "" { - return "unknown" - } - name = strings.ReplaceAll(name, "_", " ") - return name -} - -func emitModelDerivedToolUsageMetrics(snap *core.UsageSnapshot, modelRequests map[string]float64, window, source string) { - if snap == nil || len(modelRequests) == 0 { - return - } - if strings.TrimSpace(window) == "" { - window = "30d inferred" - } - counts := make(map[string]int, len(modelRequests)) - rows := make([]modelUsageCount, 0, len(modelRequests)) - totalCalls := 0.0 - for model, requests := range modelRequests { - if requests <= 0 { - continue - } - key := "tool_" + sanitizeName(model) - v := requests - snap.Metrics[key] = core.Metric{Used: &v, Unit: "calls", Window: window} - totalCalls += requests - counts[model] = int(math.Round(requests)) - rows = append(rows, modelUsageCount{name: model, count: requests}) - } - if totalCalls <= 0 { - return - } - if source != "" { - snap.Raw["tool_usage_source"] = source - } - if summary := summarizeModelCountUsage(rows, 6); summary != "" { - snap.Raw["tool_usage"] = summary - } else { - snap.Raw["tool_usage"] = summarizeTopCounts(counts, 6) - } - totalV := totalCalls - snap.Metrics["tool_calls_total"] = core.Metric{Used: &totalV, Unit: "calls", Window: "30d"} -} - -func emitToolOutcomeMetrics(snap *core.UsageSnapshot, totalRequests, totalCancelled int, window string) { - if snap == nil || totalRequests <= 0 { - return - } - if strings.TrimSpace(window) == "" { - window = "30d" - } - totalV := float64(totalRequests) - snap.Metrics["tool_calls_total"] = core.Metric{Used: &totalV, Unit: "calls", Window: window} - completed := totalRequests - totalCancelled - if completed < 0 { - completed = 0 - } - completedV := float64(completed) - snap.Metrics["tool_completed"] = core.Metric{Used: &completedV, Unit: "calls", Window: window} - if totalCancelled > 0 { - cancelledV := float64(totalCancelled) - snap.Metrics["tool_cancelled"] = core.Metric{Used: &cancelledV, Unit: "calls", Window: window} - } - successRate := completedV / totalV * 100 - snap.Metrics["tool_success_rate"] = core.Metric{Used: &successRate, Unit: "%", Window: window} -} - -func summarizeModelCountUsage(rows []modelUsageCount, limit int) string { - if len(rows) == 0 { - return "" - } - sort.Slice(rows, func(i, j int) bool { - if rows[i].count != rows[j].count { - return rows[i].count > rows[j].count - } - return rows[i].name < rows[j].name - }) - if limit > 0 && len(rows) > limit { - rows = rows[:limit] - } - parts := make([]string, 0, len(rows)) - for _, row := range rows { - parts = append(parts, fmt.Sprintf("%s: %.0f calls", row.name, row.count)) - } - return strings.Join(parts, ", ") -} - -func summarizeTopCounts(counts map[string]int, limit int) string { - type kv struct { - name string - count int - } - items := make([]kv, 0, len(counts)) - for name, count := range counts { - if count <= 0 { - continue - } - items = append(items, kv{name: name, count: count}) - } - sort.Slice(items, func(i, j int) bool { - if items[i].count != items[j].count { - return items[i].count > items[j].count - } - return items[i].name < items[j].name - }) - if limit <= 0 || limit > len(items) { - limit = len(items) - } - parts := make([]string, 0, limit) - for _, item := range items[:limit] { - parts = append(parts, fmt.Sprintf("%s=%d", item.name, item.count)) - } - return strings.Join(parts, ", ") -} - -func sanitizeName(name string) string { - name = strings.TrimSpace(name) - if name == "" { - return "unknown" - } - var b strings.Builder - b.Grow(len(name)) - for _, r := range name { - switch { - case r >= 'a' && r <= 'z': - b.WriteRune(r) - case r >= 'A' && r <= 'Z': - b.WriteRune(r) - case r >= '0' && r <= '9': - b.WriteRune(r) - case r == '-' || r == '_' || r == '.': - b.WriteRune(r) - default: - b.WriteByte('_') - } - } - safe := strings.Trim(b.String(), "_") - if safe == "" { - return "unknown" - } - return safe -} - -func normalizeModelName(name string) string { - name = strings.TrimSpace(strings.ToLower(name)) - if name == "" { - return "" - } - name = strings.ReplaceAll(name, "\\", "/") - name = strings.Trim(name, "/") - name = strings.Join(strings.Fields(name), "-") - if name == "" { - return "" - } - return name -} diff --git a/internal/providers/openrouter/snapshot_projection.go b/internal/providers/openrouter/snapshot_projection.go new file mode 100644 index 0000000..a7c7b2e --- /dev/null +++ b/internal/providers/openrouter/snapshot_projection.go @@ -0,0 +1,626 @@ +package openrouter + +import ( + "fmt" + "math" + "sort" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func emitPerModelMetrics(modelStatsMap map[string]*modelStats, snap *core.UsageSnapshot) { + type entry struct { + name string + stats *modelStats + } + sorted := make([]entry, 0, len(modelStatsMap)) + for name, stats := range modelStatsMap { + sorted = append(sorted, entry{name, stats}) + } + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].stats.TotalCost > sorted[j].stats.TotalCost + }) + + for _, entry := range sorted { + safeName := sanitizeName(entry.name) + prefix := "model_" + safeName + rec := core.ModelUsageRecord{RawModelID: entry.name, RawSource: "api", Window: "30d"} + + inputTokens := float64(entry.stats.PromptTokens) + snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &inputTokens, Unit: "tokens", Window: "30d"} + rec.InputTokens = core.Float64Ptr(inputTokens) + + outputTokens := float64(entry.stats.CompletionTokens) + snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &outputTokens, Unit: "tokens", Window: "30d"} + rec.OutputTokens = core.Float64Ptr(outputTokens) + + if entry.stats.ReasoningTokens > 0 { + reasoningTokens := float64(entry.stats.ReasoningTokens) + snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &reasoningTokens, Unit: "tokens", Window: "30d"} + rec.ReasoningTokens = core.Float64Ptr(reasoningTokens) + } + if entry.stats.CachedTokens > 0 { + cachedTokens := float64(entry.stats.CachedTokens) + snap.Metrics[prefix+"_cached_tokens"] = core.Metric{Used: &cachedTokens, Unit: "tokens", Window: "30d"} + rec.CachedTokens = core.Float64Ptr(cachedTokens) + } + totalTokens := float64(entry.stats.PromptTokens + entry.stats.CompletionTokens + entry.stats.ReasoningTokens + entry.stats.CachedTokens) + if totalTokens > 0 { + snap.Metrics[prefix+"_total_tokens"] = core.Metric{Used: &totalTokens, Unit: "tokens", Window: "30d"} + rec.TotalTokens = core.Float64Ptr(totalTokens) + } + if entry.stats.ImageTokens > 0 { + imageTokens := float64(entry.stats.ImageTokens) + snap.Metrics[prefix+"_image_tokens"] = core.Metric{Used: &imageTokens, Unit: "tokens", Window: "30d"} + } + + costUSD := entry.stats.TotalCost + snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &costUSD, Unit: "USD", Window: "30d"} + rec.CostUSD = core.Float64Ptr(costUSD) + requests := float64(entry.stats.Requests) + snap.Metrics[prefix+"_requests"] = core.Metric{Used: &requests, Unit: "requests", Window: "30d"} + rec.Requests = core.Float64Ptr(requests) + if entry.stats.NativePrompt > 0 { + nativeInput := float64(entry.stats.NativePrompt) + snap.Metrics[prefix+"_native_input_tokens"] = core.Metric{Used: &nativeInput, Unit: "tokens", Window: "30d"} + } + if entry.stats.NativeCompletion > 0 { + nativeOutput := float64(entry.stats.NativeCompletion) + snap.Metrics[prefix+"_native_output_tokens"] = core.Metric{Used: &nativeOutput, Unit: "tokens", Window: "30d"} + } + + snap.Raw[prefix+"_requests"] = fmt.Sprintf("%d", entry.stats.Requests) + + if entry.stats.LatencyCount > 0 { + avgMs := float64(entry.stats.TotalLatencyMs) / float64(entry.stats.LatencyCount) + snap.Raw[prefix+"_avg_latency_ms"] = fmt.Sprintf("%.0f", avgMs) + avgSeconds := avgMs / 1000.0 + snap.Metrics[prefix+"_avg_latency"] = core.Metric{Used: &avgSeconds, Unit: "seconds", Window: "30d"} + } + if entry.stats.GenerationCount > 0 { + avgMs := float64(entry.stats.TotalGenMs) / float64(entry.stats.GenerationCount) + avgSeconds := avgMs / 1000.0 + snap.Metrics[prefix+"_avg_generation_time"] = core.Metric{Used: &avgSeconds, Unit: "seconds", Window: "30d"} + } + if entry.stats.ModerationCount > 0 { + avgMs := float64(entry.stats.TotalModeration) / float64(entry.stats.ModerationCount) + avgSeconds := avgMs / 1000.0 + snap.Metrics[prefix+"_avg_moderation_latency"] = core.Metric{Used: &avgSeconds, Unit: "seconds", Window: "30d"} + } + + if entry.stats.CacheDiscountUSD > 0 { + snap.Raw[prefix+"_cache_savings"] = fmt.Sprintf("$%.6f", entry.stats.CacheDiscountUSD) + } + + if len(entry.stats.Providers) > 0 { + var provList []string + for prov := range entry.stats.Providers { + provList = append(provList, prov) + } + sort.Strings(provList) + snap.Raw[prefix+"_providers"] = strings.Join(provList, ", ") + if len(provList) > 0 { + rec.SetDimension("upstream_providers", strings.Join(provList, ",")) + } + } + if rec.InputTokens != nil || rec.OutputTokens != nil || rec.CostUSD != nil || rec.Requests != nil || rec.ReasoningTokens != nil || rec.CachedTokens != nil { + snap.AppendModelUsage(rec) + } + } +} + +func emitPerProviderMetrics(providerStatsMap map[string]*providerStats, snap *core.UsageSnapshot) { + type entry struct { + name string + stats *providerStats + } + sorted := make([]entry, 0, len(providerStatsMap)) + for name, stats := range providerStatsMap { + sorted = append(sorted, entry{name, stats}) + } + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].stats.TotalCost > sorted[j].stats.TotalCost + }) + + for _, entry := range sorted { + prefix := "provider_" + sanitizeName(strings.ToLower(entry.name)) + requests := float64(entry.stats.Requests) + snap.Metrics[prefix+"_requests"] = core.Metric{Used: &requests, Unit: "requests", Window: "30d"} + if entry.stats.TotalCost > 0 { + v := entry.stats.TotalCost + snap.Metrics[prefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: "30d"} + } + if entry.stats.ByokCost > 0 { + v := entry.stats.ByokCost + snap.Metrics[prefix+"_byok_cost"] = core.Metric{Used: &v, Unit: "USD", Window: "30d"} + } + if entry.stats.PromptTokens > 0 { + v := float64(entry.stats.PromptTokens) + snap.Metrics[prefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "30d"} + } + if entry.stats.CompletionTokens > 0 { + v := float64(entry.stats.CompletionTokens) + snap.Metrics[prefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "30d"} + } + if entry.stats.ReasoningTokens > 0 { + v := float64(entry.stats.ReasoningTokens) + snap.Metrics[prefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: "30d"} + } + snap.Raw[prefix+"_requests"] = fmt.Sprintf("%d", entry.stats.Requests) + snap.Raw[prefix+"_cost"] = fmt.Sprintf("$%.6f", entry.stats.TotalCost) + if entry.stats.ByokCost > 0 { + snap.Raw[prefix+"_byok_cost"] = fmt.Sprintf("$%.6f", entry.stats.ByokCost) + } + snap.Raw[prefix+"_prompt_tokens"] = fmt.Sprintf("%d", entry.stats.PromptTokens) + snap.Raw[prefix+"_completion_tokens"] = fmt.Sprintf("%d", entry.stats.CompletionTokens) + if entry.stats.ReasoningTokens > 0 { + snap.Raw[prefix+"_reasoning_tokens"] = fmt.Sprintf("%d", entry.stats.ReasoningTokens) + } + } +} + +func emitClientDailySeries(snap *core.UsageSnapshot, tokensByClient, requestsByClient map[string]map[string]float64) { + if snap.DailySeries == nil { + snap.DailySeries = make(map[string][]core.TimePoint) + } + for client, byDate := range tokensByClient { + if client == "" || len(byDate) == 0 { + continue + } + snap.DailySeries["tokens_client_"+client] = mapToSortedTimePoints(byDate) + } + for client, byDate := range requestsByClient { + if client == "" || len(byDate) == 0 { + continue + } + snap.DailySeries["usage_client_"+client] = mapToSortedTimePoints(byDate) + } +} + +type providerClientAggregate struct { + InputTokens float64 + OutputTokens float64 + ReasoningTokens float64 + Requests float64 + CostUSD float64 + Window string +} + +type modelUsageCount struct { + name string + count float64 +} + +func enrichDashboardRepresentations(snap *core.UsageSnapshot) { + if snap == nil || len(snap.Metrics) == 0 { + return + } + synthesizeClientMetricsFromProviderMetrics(snap) + synthesizeLanguageMetricsFromModelRequests(snap) + synthesizeUsageSummaries(snap) +} + +func synthesizeClientMetricsFromProviderMetrics(snap *core.UsageSnapshot) { + byClient := make(map[string]*providerClientAggregate) + for key, metric := range snap.Metrics { + if metric.Used == nil { + continue + } + client, field, ok := parseProviderMetricKey(key) + if !ok || client == "" { + continue + } + agg := byClient[client] + if agg == nil { + agg = &providerClientAggregate{} + byClient[client] = agg + } + if agg.Window == "" && metric.Window != "" { + agg.Window = metric.Window + } + switch field { + case "input_tokens": + agg.InputTokens = *metric.Used + case "output_tokens": + agg.OutputTokens = *metric.Used + case "reasoning_tokens": + agg.ReasoningTokens = *metric.Used + case "requests": + agg.Requests = *metric.Used + case "cost_usd": + agg.CostUSD = *metric.Used + } + } + + for client, agg := range byClient { + window := strings.TrimSpace(agg.Window) + if window == "" { + window = "30d" + } + clientPrefix := "client_" + client + + if agg.InputTokens > 0 { + v := agg.InputTokens + snap.Metrics[clientPrefix+"_input_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: window} + } + if agg.OutputTokens > 0 { + v := agg.OutputTokens + snap.Metrics[clientPrefix+"_output_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: window} + } + if agg.ReasoningTokens > 0 { + v := agg.ReasoningTokens + snap.Metrics[clientPrefix+"_reasoning_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: window} + } + totalTokens := agg.InputTokens + agg.OutputTokens + agg.ReasoningTokens + if totalTokens > 0 { + v := totalTokens + snap.Metrics[clientPrefix+"_total_tokens"] = core.Metric{Used: &v, Unit: "tokens", Window: window} + } + if agg.Requests > 0 { + v := agg.Requests + snap.Metrics[clientPrefix+"_requests"] = core.Metric{Used: &v, Unit: "requests", Window: window} + } + if agg.CostUSD > 0 { + v := agg.CostUSD + snap.Metrics[clientPrefix+"_cost_usd"] = core.Metric{Used: &v, Unit: "USD", Window: window} + } + } +} + +func parseProviderMetricKey(key string) (name, field string, ok bool) { + const prefix = "provider_" + if !strings.HasPrefix(key, prefix) { + return "", "", false + } + rest := strings.TrimPrefix(key, prefix) + for _, suffix := range []string{"_input_tokens", "_output_tokens", "_reasoning_tokens", "_requests", "_cost_usd"} { + if strings.HasSuffix(rest, suffix) { + return strings.TrimSuffix(rest, suffix), strings.TrimPrefix(suffix, "_"), true + } + } + return "", "", false +} + +func synthesizeLanguageMetricsFromModelRequests(snap *core.UsageSnapshot) { + byLanguage := make(map[string]float64) + window := "" + for key, metric := range snap.Metrics { + if metric.Used == nil { + continue + } + model, field, ok := parseModelMetricKey(key) + if !ok || field != "requests" { + continue + } + if window == "" && strings.TrimSpace(metric.Window) != "" { + window = strings.TrimSpace(metric.Window) + } + lang := inferModelWorkloadLanguage(model) + byLanguage[lang] += *metric.Used + } + if len(byLanguage) == 0 { + return + } + if window == "" { + window = "30d inferred" + } + for lang, count := range byLanguage { + if count <= 0 { + continue + } + v := count + snap.Metrics["lang_"+sanitizeName(lang)] = core.Metric{Used: &v, Unit: "requests", Window: window} + } + if summary := summarizeCountUsage(byLanguage, "req", 6); summary != "" { + snap.Raw["language_usage"] = summary + snap.Raw["language_usage_source"] = "inferred_from_model_ids" + } +} + +func parseModelMetricKey(key string) (name, field string, ok bool) { + const prefix = "model_" + if !strings.HasPrefix(key, prefix) { + return "", "", false + } + rest := strings.TrimPrefix(key, prefix) + if strings.HasSuffix(rest, "_requests") { + return strings.TrimSuffix(rest, "_requests"), "requests", true + } + return "", "", false +} + +func inferModelWorkloadLanguage(model string) string { + model = strings.ToLower(strings.TrimSpace(model)) + if model == "" { + return "general" + } + switch { + case strings.Contains(model, "coder"), strings.Contains(model, "codestral"), strings.Contains(model, "devstral"), strings.Contains(model, "code"): + return "code" + case strings.Contains(model, "vision"), strings.Contains(model, "image"), strings.Contains(model, "multimodal"), strings.Contains(model, "omni"), strings.Contains(model, "vl"): + return "multimodal" + case strings.Contains(model, "audio"), strings.Contains(model, "speech"), strings.Contains(model, "voice"), strings.Contains(model, "whisper"), strings.Contains(model, "tts"), strings.Contains(model, "stt"): + return "audio" + case strings.Contains(model, "reason"), strings.Contains(model, "thinking"): + return "reasoning" + default: + return "general" + } +} + +func synthesizeUsageSummaries(snap *core.UsageSnapshot) { + modelTotals := make(map[string]float64) + modelWindow := "" + modelUnit := "tok" + for key, metric := range snap.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "model_") { + continue + } + switch { + case strings.HasSuffix(key, "_total_tokens"): + name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_total_tokens") + modelTotals[name] = *metric.Used + if modelWindow == "" && strings.TrimSpace(metric.Window) != "" { + modelWindow = strings.TrimSpace(metric.Window) + } + case strings.HasSuffix(key, "_cost_usd"): + name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cost_usd") + if _, ok := modelTotals[name]; !ok { + modelTotals[name] = *metric.Used + modelUnit = "usd" + if modelWindow == "" && strings.TrimSpace(metric.Window) != "" { + modelWindow = strings.TrimSpace(metric.Window) + } + } + } + } + if summary := summarizeShareUsage(modelTotals, 6); summary != "" { + snap.Raw["model_usage"] = summary + if modelWindow != "" { + snap.Raw["model_usage_window"] = modelWindow + } + snap.Raw["model_usage_unit"] = modelUnit + } + + clientTotals := make(map[string]float64) + for key, metric := range snap.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "client_") { + continue + } + switch { + case strings.HasSuffix(key, "_total_tokens"): + name := strings.TrimSuffix(strings.TrimPrefix(key, "client_"), "_total_tokens") + clientTotals[name] = *metric.Used + case strings.HasSuffix(key, "_requests"): + name := strings.TrimSuffix(strings.TrimPrefix(key, "client_"), "_requests") + if _, ok := clientTotals[name]; !ok { + clientTotals[name] = *metric.Used + } + } + } + if summary := summarizeShareUsage(clientTotals, 6); summary != "" { + snap.Raw["client_usage"] = summary + } +} + +func summarizeShareUsage(values map[string]float64, maxItems int) string { + type item struct { + name string + value float64 + } + list := make([]item, 0, len(values)) + total := 0.0 + for name, value := range values { + if value <= 0 { + continue + } + list = append(list, item{name: name, value: value}) + total += value + } + if len(list) == 0 || total <= 0 { + return "" + } + sort.Slice(list, func(i, j int) bool { + if list[i].value != list[j].value { + return list[i].value > list[j].value + } + return list[i].name < list[j].name + }) + if maxItems > 0 && len(list) > maxItems { + list = list[:maxItems] + } + parts := make([]string, 0, len(list)) + for _, entry := range list { + parts = append(parts, fmt.Sprintf("%s: %.0f%%", normalizeUsageLabel(entry.name), entry.value/total*100)) + } + return strings.Join(parts, ", ") +} + +func summarizeCountUsage(values map[string]float64, unit string, maxItems int) string { + type item struct { + name string + value float64 + } + list := make([]item, 0, len(values)) + for name, value := range values { + if value <= 0 { + continue + } + list = append(list, item{name: name, value: value}) + } + if len(list) == 0 { + return "" + } + sort.Slice(list, func(i, j int) bool { + if list[i].value != list[j].value { + return list[i].value > list[j].value + } + return list[i].name < list[j].name + }) + if maxItems > 0 && len(list) > maxItems { + list = list[:maxItems] + } + parts := make([]string, 0, len(list)) + for _, entry := range list { + parts = append(parts, fmt.Sprintf("%s: %.0f %s", normalizeUsageLabel(entry.name), entry.value, unit)) + } + return strings.Join(parts, ", ") +} + +func normalizeUsageLabel(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "unknown" + } + return strings.ReplaceAll(name, "_", " ") +} + +func emitModelDerivedToolUsageMetrics(snap *core.UsageSnapshot, modelRequests map[string]float64, window, source string) { + if snap == nil || len(modelRequests) == 0 { + return + } + if strings.TrimSpace(window) == "" { + window = "30d inferred" + } + counts := make(map[string]int, len(modelRequests)) + rows := make([]modelUsageCount, 0, len(modelRequests)) + totalCalls := 0.0 + for model, requests := range modelRequests { + if requests <= 0 { + continue + } + key := "tool_" + sanitizeName(model) + v := requests + snap.Metrics[key] = core.Metric{Used: &v, Unit: "calls", Window: window} + totalCalls += requests + counts[model] = int(math.Round(requests)) + rows = append(rows, modelUsageCount{name: model, count: requests}) + } + if totalCalls <= 0 { + return + } + if source != "" { + snap.Raw["tool_usage_source"] = source + } + if summary := summarizeModelCountUsage(rows, 6); summary != "" { + snap.Raw["tool_usage"] = summary + } else { + snap.Raw["tool_usage"] = summarizeTopCounts(counts, 6) + } + totalV := totalCalls + snap.Metrics["tool_calls_total"] = core.Metric{Used: &totalV, Unit: "calls", Window: "30d"} +} + +func emitToolOutcomeMetrics(snap *core.UsageSnapshot, totalRequests, totalCancelled int, window string) { + if snap == nil || totalRequests <= 0 { + return + } + if strings.TrimSpace(window) == "" { + window = "30d" + } + totalV := float64(totalRequests) + snap.Metrics["tool_calls_total"] = core.Metric{Used: &totalV, Unit: "calls", Window: window} + completed := totalRequests - totalCancelled + if completed < 0 { + completed = 0 + } + completedV := float64(completed) + snap.Metrics["tool_completed"] = core.Metric{Used: &completedV, Unit: "calls", Window: window} + if totalCancelled > 0 { + cancelledV := float64(totalCancelled) + snap.Metrics["tool_cancelled"] = core.Metric{Used: &cancelledV, Unit: "calls", Window: window} + } + successRate := completedV / totalV * 100 + snap.Metrics["tool_success_rate"] = core.Metric{Used: &successRate, Unit: "%", Window: window} +} + +func summarizeModelCountUsage(rows []modelUsageCount, limit int) string { + if len(rows) == 0 { + return "" + } + sort.Slice(rows, func(i, j int) bool { + if rows[i].count != rows[j].count { + return rows[i].count > rows[j].count + } + return rows[i].name < rows[j].name + }) + if limit > 0 && len(rows) > limit { + rows = rows[:limit] + } + parts := make([]string, 0, len(rows)) + for _, row := range rows { + parts = append(parts, fmt.Sprintf("%s: %.0f calls", row.name, row.count)) + } + return strings.Join(parts, ", ") +} + +func summarizeTopCounts(counts map[string]int, limit int) string { + type kv struct { + name string + count int + } + items := make([]kv, 0, len(counts)) + for name, count := range counts { + if count <= 0 { + continue + } + items = append(items, kv{name: name, count: count}) + } + sort.Slice(items, func(i, j int) bool { + if items[i].count != items[j].count { + return items[i].count > items[j].count + } + return items[i].name < items[j].name + }) + if limit <= 0 || limit > len(items) { + limit = len(items) + } + parts := make([]string, 0, limit) + for _, item := range items[:limit] { + parts = append(parts, fmt.Sprintf("%s=%d", item.name, item.count)) + } + return strings.Join(parts, ", ") +} + +func sanitizeName(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "unknown" + } + var builder strings.Builder + builder.Grow(len(name)) + for _, r := range name { + switch { + case r >= 'a' && r <= 'z': + builder.WriteRune(r) + case r >= 'A' && r <= 'Z': + builder.WriteRune(r) + case r >= '0' && r <= '9': + builder.WriteRune(r) + case r == '-' || r == '_' || r == '.': + builder.WriteRune(r) + default: + builder.WriteByte('_') + } + } + safe := strings.Trim(builder.String(), "_") + if safe == "" { + return "unknown" + } + return safe +} + +func normalizeModelName(name string) string { + name = strings.TrimSpace(strings.ToLower(name)) + if name == "" { + return "" + } + name = strings.ReplaceAll(name, "\\", "/") + name = strings.Trim(name, "/") + name = strings.Join(strings.Fields(name), "-") + if name == "" { + return "" + } + return name +} From 676a48bce81facd1de0e395ea8b3f3486557a164 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 14:05:11 +0100 Subject: [PATCH 13/32] refactor: inject ollama clock and refresh audit table --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 14 +++--- internal/providers/ollama/ollama.go | 46 +++++++++++-------- 2 files changed, 35 insertions(+), 25 deletions(-) diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 16db02f..545fc0b 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -49,6 +49,9 @@ This table captures every issue found in this pass. It is broad and high-signal, | R29 | Fixed | Shared analytics series selection | `internal/core/analytics_snapshot.go`, `internal/tui/analytics.go` | Token/model series selection and fallback weighting for analytics charts moved out of TUI render code into shared core helpers. | Keep new per-series heuristics out of render code. | | R30 | Fixed | Daemon loop family split | `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go` | Collection/retention, spool/hook-spool, and provider polling loops now live in separate daemon files instead of a single loop-heavy unit. | Keep future loop additions in the matching family file instead of re-growing a monolith. | | R31 | Fixed | Analytics timestamp normalization | `internal/core/analytics_normalize.go` | Synthesized analytics daily-series dates now derive from the snapshot timestamp in UTC instead of ad hoc local `time.Now()` fallbacks. | Continue the same UTC/clock cleanup in remaining providers such as Ollama. | +| R32 | Fixed | Cursor orchestration and projection split | `internal/providers/cursor/cursor.go`, `internal/providers/cursor/fetch.go`, `internal/providers/cursor/runtime.go`, `internal/providers/cursor/state_projection.go`, `internal/providers/cursor/tracking_projection.go`, `internal/providers/cursor/api_projection.go` | Cursor fetch orchestration, runtime merge/token helpers, state projection, tracking projection, and API projection logic now live in dedicated units instead of one large provider file. `cursor.go` is now limited to provider construction, shared types, and clock/state wiring. | Keep future Cursor changes inside the matching unit instead of re-growing `cursor.go`. | +| R33 | Fixed | OpenRouter analytics and snapshot-projection split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/analytics.go`, `internal/providers/openrouter/snapshot_projection.go` | OpenRouter analytics endpoint parsing/aggregation and dashboard synthesis/projection helpers now live outside the main provider file. The main file is now focused on provider setup plus key/credits/account fetch paths. | If the remaining key/account path grows again, split it into a small API helper unit. | +| R34 | Fixed | Ollama clock injection | `internal/providers/ollama/ollama.go` | Ollama’s cloud-usage window parsing, local log windows, reset inference, and DB-derived token window logic now use the provider clock instead of direct `time.Now()` calls in behavioral paths. | Reuse the same clock path if more Ollama time-derived metrics are added. | ## Action Table @@ -57,11 +60,9 @@ This table captures every issue found in this pass. It is broad and high-signal, | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/dashboardapp/service.go` | Side effects are injected and the model file is split, but TUI state-transition and rendering logic is still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go` | Composition bars and analytics model views now consume shared extractors, but some analytics/detail sections still decode raw metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P1 | OpenRouter provider size | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go`, `internal/providers/openrouter/generations.go` | `openrouter.go` is materially smaller after the provider-resolution and generation-path splits, but it still mixes auth probing, credits, keys, analytics parsing, and some output/projection helpers. | Continue splitting into `api_client`, `analytics`, and remaining projection/helper units. | Easier maintenance, smaller diff surface, faster targeted testing. | -| A5 | P1 | Cursor provider responsibility overload | `internal/providers/cursor/cursor.go:181-335`, `internal/providers/cursor/cursor.go:903-1842`, `internal/providers/cursor/state_records.go`, `internal/providers/cursor/tracking_records.go` | Cursor provider no longer owns the API/cache helpers, but it still combines fetch orchestration, token loading, local SQLite projection, and some remaining snapshot assembly in one large file. | Continue splitting token-loading, local DB projection, and snapshot assembly into dedicated modules so `cursor.go` becomes a thin coordinator. | Cleaner boundaries and less risk of local/API logic regressions. | +| A4 | P2 | OpenRouter account/API follow-through | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/analytics.go`, `internal/providers/openrouter/snapshot_projection.go`, `internal/providers/openrouter/generations.go` | The large analytics/projection seams are now split, but the remaining account/key/credits paths still sit together in `openrouter.go` and could be isolated further if that flow keeps growing. | If future OpenRouter changes cluster around account probing or key metadata, split a small `account_api` helper file rather than adding back into `openrouter.go`. | Keeps the provider easy to review as the account/API path evolves. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go` | The usage-view code is materially smaller after the helper/projection/query splits, but the orchestration/materialization path still owns temp-table lifecycle, query fanout, and aggregate assembly in one place. | Continue splitting remaining orchestration/materialization concerns and consider a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | -| A11 | P2 | Time-dependent logic without injectable clock | `internal/providers/ollama/ollama.go:702`, `internal/providers/ollama/ollama.go:1075`, `internal/providers/ollama/ollama.go:1088`, `internal/providers/ollama/ollama.go:1575` | Cursor and OpenRouter now use injectable clocks in their main time-sensitive paths, and analytics normalization now uses snapshot UTC time, but Ollama still reads `time.Now()` directly in several behavioral paths. | Extend the clock abstraction to Ollama and any remaining provider-specific time windows/reset calculations. | Better determinism and fewer timezone edge cases. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | | A15 | P3 | Performance optimization opportunity in render path | `internal/tui/model.go:441-450`, `internal/tui/tiles_composition.go:302-322`, `internal/tui/detail.go:752-1046`, `internal/tui/analytics.go:663-729` | The UI recomputes display/composition structures from raw metric maps repeatedly during rendering. It is correct, but the work is duplicated across views and frames. | Cache derived display/composition sections per snapshot update instead of rebuilding them in each view path. | Lower render cost and less duplicated parsing logic. | @@ -69,10 +70,9 @@ This table captures every issue found in this pass. It is broad and high-signal, ## Suggested Execution Order 1. A2, A3 -2. A6, A5 -3. A4, A7 -4. A1, A11 -5. A12, A14, A15 +2. A6, A4 +3. A7, A1 +4. A12, A14, A15 ## Notes diff --git a/internal/providers/ollama/ollama.go b/internal/providers/ollama/ollama.go index d0a7ad3..972c52a 100644 --- a/internal/providers/ollama/ollama.go +++ b/internal/providers/ollama/ollama.go @@ -40,6 +40,7 @@ var settingsResetRe = regexp.MustCompile(`(?is)(Session usage|Weekly usage).*?da type Provider struct { providerbase.Base + clock core.Clock } func New() *Provider { @@ -64,6 +65,7 @@ func New() *Provider { }, Dashboard: dashboardWidget(), }), + clock: core.SystemClock{}, } } @@ -82,6 +84,13 @@ func (p *Provider) DetailWidget() core.DetailWidget { } } +func (p *Provider) now() time.Time { + if p != nil && p.clock != nil { + return p.clock.Now() + } + return time.Now() +} + func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.UsageSnapshot, error) { apiKey, authSnap := shared.RequireAPIKey(acct, p.ID()) cloudOnly := strings.EqualFold(acct.Auth, string(core.ProviderAuthTypeAPIKey)) @@ -90,6 +99,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa } snap := core.NewUsageSnapshot(p.ID(), acct.ID) + snap.Timestamp = p.now() snap.DailySeries = make(map[string][]core.TimePoint) hasData := false @@ -147,7 +157,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa } } - finalizeUsageWindows(&snap) + finalizeUsageWindows(&snap, p.now()) switch { case hasData: @@ -292,7 +302,7 @@ func (p *Provider) fetchLocalMe(ctx context.Context, baseURL string, snap *core. switch code { case http.StatusOK: - return applyCloudUserPayload(resp, snap), nil + return applyCloudUserPayload(resp, snap, p.now()), nil case http.StatusUnauthorized, http.StatusForbidden: if signinURL := anyStringCaseInsensitive(resp, "signin_url", "sign_in_url"); signinURL != "" { snap.SetAttribute("signin_url", signinURL) @@ -662,7 +672,7 @@ func (p *Provider) fetchDesktopDB(ctx context.Context, acct core.AccountConfig, snap.SetDiagnostic("desktop_model_usage_error", err.Error()) } - if err := populateEstimatedTokenUsageFromDB(ctx, db, snap); err != nil { + if err := populateEstimatedTokenUsageFromDB(ctx, db, snap, p.now()); err != nil { snap.SetDiagnostic("desktop_token_estimate_error", err.Error()) } @@ -699,7 +709,7 @@ func (p *Provider) fetchServerLogs(acct core.AccountConfig, snap *core.UsageSnap return false, nil } - now := time.Now() + now := p.now() start5h := now.Add(-5 * time.Hour) start24h := now.Add(-24 * time.Hour) start7d := now.Add(-7 * 24 * time.Hour) @@ -857,7 +867,7 @@ func (p *Provider) fetchCloudAPI(ctx context.Context, acct core.AccountConfig, a switch status { case http.StatusOK: snap.SetAttribute("auth_type", "api_key") - if applyCloudUserPayload(me, snap) { + if applyCloudUserPayload(me, snap, p.now()) { hasData = true } case http.StatusUnauthorized, http.StatusForbidden: @@ -901,7 +911,7 @@ func (p *Provider) fetchCloudAPI(ctx context.Context, acct core.AccountConfig, a return hasData, authFailed, limited, nil } -func applyCloudUserPayload(payload map[string]any, snap *core.UsageSnapshot) bool { +func applyCloudUserPayload(payload map[string]any, snap *core.UsageSnapshot, now time.Time) bool { if len(payload) == 0 { return false } @@ -942,20 +952,20 @@ func applyCloudUserPayload(payload map[string]any, snap *core.UsageSnapshot) boo snap.SetAttribute("billing_cycle_end", billingEnd.Format(time.RFC3339)) } - if extractCloudUsageWindows(payload, snap) { + if extractCloudUsageWindows(payload, snap, now) { hasData = true } return hasData } -func extractCloudUsageWindows(payload map[string]any, snap *core.UsageSnapshot) bool { +func extractCloudUsageWindows(payload map[string]any, snap *core.UsageSnapshot, now time.Time) bool { var found bool sessionKeys := []string{ "session_usage", "sessionusage", "usage_5h", "usagefivehour", "five_hour_usage", "fivehourusage", } - if metric, resetAt, ok := findUsageWindow(payload, sessionKeys, "5h"); ok { + if metric, resetAt, ok := findUsageWindow(payload, sessionKeys, "5h", now); ok { snap.Metrics["usage_five_hour"] = metric if !resetAt.IsZero() { snap.Resets["usage_five_hour"] = resetAt @@ -971,7 +981,7 @@ func extractCloudUsageWindows(payload map[string]any, snap *core.UsageSnapshot) dayKeys := []string{ "weekly_usage", "weeklyusage", "usage_1d", "usageoneday", "one_day_usage", "daily_usage", "dailyusage", } - if metric, resetAt, ok := findUsageWindow(payload, dayKeys, "1d"); ok { + if metric, resetAt, ok := findUsageWindow(payload, dayKeys, "1d", now); ok { snap.Metrics["usage_weekly"] = core.Metric{ Limit: metric.Limit, Remaining: metric.Remaining, @@ -991,7 +1001,7 @@ func extractCloudUsageWindows(payload map[string]any, snap *core.UsageSnapshot) return found } -func findUsageWindow(payload map[string]any, keys []string, fallbackWindow string) (core.Metric, time.Time, bool) { +func findUsageWindow(payload map[string]any, keys []string, fallbackWindow string, now time.Time) (core.Metric, time.Time, bool) { sources := []map[string]any{ payload, anyMapCaseInsensitive(payload, "usage"), @@ -1008,7 +1018,7 @@ func findUsageWindow(payload map[string]any, keys []string, fallbackWindow strin if !ok { continue } - if metric, resetAt, ok := parseUsageWindowValue(v, fallbackWindow); ok { + if metric, resetAt, ok := parseUsageWindowValue(v, fallbackWindow, now); ok { return metric, resetAt, true } } @@ -1017,7 +1027,7 @@ func findUsageWindow(payload map[string]any, keys []string, fallbackWindow strin return core.Metric{}, time.Time{}, false } -func parseUsageWindowValue(v any, fallbackWindow string) (core.Metric, time.Time, bool) { +func parseUsageWindowValue(v any, fallbackWindow string, now time.Time) (core.Metric, time.Time, bool) { if pct, ok := anyFloat(v); ok { return core.Metric{ Used: core.Float64Ptr(pct), @@ -1072,7 +1082,7 @@ func parseUsageWindowValue(v any, fallbackWindow string) (core.Metric, time.Time } if resetAt.IsZero() { if seconds, ok := anyFloatCaseInsensitive(raw, "reset_in", "reset_in_seconds", "resets_in", "seconds_to_reset"); ok && seconds > 0 { - resetAt = time.Now().Add(time.Duration(seconds * float64(time.Second))) + resetAt = now.Add(time.Duration(seconds * float64(time.Second))) } } @@ -1084,8 +1094,8 @@ func parseUsageWindowValue(v any, fallbackWindow string) (core.Metric, time.Time return core.Metric{}, time.Time{}, false } -func finalizeUsageWindows(snap *core.UsageSnapshot) { - now := time.Now().In(time.Local) +func finalizeUsageWindows(snap *core.UsageSnapshot, now time.Time) { + now = now.In(time.Local) blockStart, blockEnd := currentFiveHourBlock(now) // Keep usage windows strictly real-data-driven. @@ -1525,7 +1535,7 @@ func populateModelUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageS return nil } -func populateEstimatedTokenUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { +func populateEstimatedTokenUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot, now time.Time) error { hasThinking, err := tableHasColumn(ctx, db, "messages", "thinking") if err != nil { return err @@ -1572,7 +1582,7 @@ func populateEstimatedTokenUsageFromDB(ctx context.Context, db *sql.DB, snap *co sourceDailyRequests := make(map[string]map[string]float64) sessionsBySource := make(map[string]float64) - now := time.Now().In(time.Local) + now = now.In(time.Local) start5h := now.Add(-5 * time.Hour) start1d := now.Add(-24 * time.Hour) start7d := now.Add(-7 * 24 * time.Hour) From e3d9bc4ccc683c6d8c4f4387b1b247290b78556e Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 14:11:49 +0100 Subject: [PATCH 14/32] refactor: split openrouter account flow and detail tokens --- internal/providers/openrouter/account_api.go | 357 +++++++++++++++++ internal/providers/openrouter/openrouter.go | 380 ------------------- internal/tui/detail.go | 122 +----- internal/tui/detail_abstraction_test.go | 2 +- internal/tui/detail_tokens.go | 87 +++++ 5 files changed, 448 insertions(+), 500 deletions(-) create mode 100644 internal/providers/openrouter/account_api.go create mode 100644 internal/tui/detail_tokens.go diff --git a/internal/providers/openrouter/account_api.go b/internal/providers/openrouter/account_api.go new file mode 100644 index 0000000..456f838 --- /dev/null +++ b/internal/providers/openrouter/account_api.go @@ -0,0 +1,357 @@ +package openrouter + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/parsers" +) + +func (p *Provider) fetchAuthKey(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { + for _, endpoint := range []string{"/key", "/auth/key"} { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, baseURL+endpoint, nil) + if err != nil { + return fmt.Errorf("creating request: %w", err) + } + req.Header.Set("Authorization", "Bearer "+apiKey) + + resp, err := p.Client().Do(req) + if err != nil { + return fmt.Errorf("request failed: %w", err) + } + + snap.Raw = parsers.RedactHeaders(resp.Header) + if resp.StatusCode == http.StatusNotFound && endpoint == "/key" { + resp.Body.Close() + continue + } + + body, readErr := io.ReadAll(resp.Body) + resp.Body.Close() + if readErr != nil { + return fmt.Errorf("reading body: %w", readErr) + } + + switch resp.StatusCode { + case http.StatusUnauthorized, http.StatusForbidden: + snap.Status = core.StatusAuth + snap.Message = fmt.Sprintf("HTTP %d – check API key", resp.StatusCode) + return nil + case http.StatusOK: + default: + return fmt.Errorf("HTTP %d", resp.StatusCode) + } + + var keyResp keyResponse + if err := json.Unmarshal(body, &keyResp); err != nil { + snap.Status = core.StatusError + snap.Message = "failed to parse key response" + return nil + } + + applyKeyData(&keyResp.Data, snap) + parsers.ApplyRateLimitGroup(resp.Header, snap, "rpm_headers", "requests", "1m", + "x-ratelimit-limit-requests", "x-ratelimit-remaining-requests", "x-ratelimit-reset-requests") + parsers.ApplyRateLimitGroup(resp.Header, snap, "tpm_headers", "tokens", "1m", + "x-ratelimit-limit-tokens", "x-ratelimit-remaining-tokens", "x-ratelimit-reset-tokens") + return nil + } + + return fmt.Errorf("key endpoint not available (HTTP 404)") +} + +func applyKeyData(data *keyData, snap *core.UsageSnapshot) { + usage := data.Usage + var remaining *float64 + if data.LimitRemaining != nil { + remaining = data.LimitRemaining + } else if data.Limit != nil { + r := *data.Limit - usage + remaining = &r + } + + if data.Limit != nil { + snap.Metrics["credits"] = core.Metric{ + Limit: data.Limit, + Used: &usage, + Remaining: remaining, + Unit: "USD", + Window: "lifetime", + } + } else { + snap.Metrics["credits"] = core.Metric{Used: &usage, Unit: "USD", Window: "lifetime"} + } + + if remaining != nil { + snap.Metrics["limit_remaining"] = core.Metric{Used: remaining, Unit: "USD", Window: "current_period"} + } + if data.UsageDaily != nil { + snap.Metrics["usage_daily"] = core.Metric{Used: data.UsageDaily, Unit: "USD", Window: "1d"} + } + if data.UsageWeekly != nil { + snap.Metrics["usage_weekly"] = core.Metric{Used: data.UsageWeekly, Unit: "USD", Window: "7d"} + } + if data.UsageMonthly != nil { + snap.Metrics["usage_monthly"] = core.Metric{Used: data.UsageMonthly, Unit: "USD", Window: "30d"} + } + if data.ByokUsage != nil && *data.ByokUsage > 0 { + snap.Metrics["byok_usage"] = core.Metric{Used: data.ByokUsage, Unit: "USD", Window: "lifetime"} + snap.Raw["byok_in_use"] = "true" + } + if data.ByokUsageDaily != nil && *data.ByokUsageDaily > 0 { + snap.Metrics["byok_daily"] = core.Metric{Used: data.ByokUsageDaily, Unit: "USD", Window: "1d"} + snap.Raw["byok_in_use"] = "true" + } + if data.ByokUsageWeekly != nil && *data.ByokUsageWeekly > 0 { + snap.Metrics["byok_weekly"] = core.Metric{Used: data.ByokUsageWeekly, Unit: "USD", Window: "7d"} + snap.Raw["byok_in_use"] = "true" + } + if data.ByokUsageMonthly != nil && *data.ByokUsageMonthly > 0 { + snap.Metrics["byok_monthly"] = core.Metric{Used: data.ByokUsageMonthly, Unit: "USD", Window: "30d"} + snap.Raw["byok_in_use"] = "true" + } + if data.ByokUsageInference != nil && *data.ByokUsageInference > 0 { + snap.Metrics["today_byok_cost"] = core.Metric{Used: data.ByokUsageInference, Unit: "USD", Window: "1d"} + snap.Raw["byok_in_use"] = "true" + } + + if data.RateLimit.Requests > 0 { + rl := float64(data.RateLimit.Requests) + snap.Metrics["rpm"] = core.Metric{Limit: &rl, Unit: "requests", Window: data.RateLimit.Interval} + } + + keyLabel := data.Label + if keyLabel == "" { + keyLabel = data.Name + } + if keyLabel != "" { + snap.Raw["key_label"] = keyLabel + } + if data.IsFreeTier { + snap.Raw["tier"] = "free" + } else { + snap.Raw["tier"] = "paid" + } + + snap.Raw["is_free_tier"] = fmt.Sprintf("%t", data.IsFreeTier) + snap.Raw["is_management_key"] = fmt.Sprintf("%t", data.IsManagementKey) + snap.Raw["is_provisioning_key"] = fmt.Sprintf("%t", data.IsProvisioningKey) + snap.Raw["include_byok_in_limit"] = fmt.Sprintf("%t", data.IncludeByokInLimit) + if data.RateLimit.Note != "" { + snap.Raw["rate_limit_note"] = data.RateLimit.Note + } + + switch { + case data.IsManagementKey: + snap.Raw["key_type"] = "management" + case data.IsProvisioningKey: + snap.Raw["key_type"] = "provisioning" + default: + snap.Raw["key_type"] = "standard" + } + + if data.LimitReset != "" { + snap.Raw["limit_reset"] = data.LimitReset + if t, err := time.Parse(time.RFC3339, data.LimitReset); err == nil { + snap.Resets["limit_reset"] = t + } + } + if data.ExpiresAt != "" { + snap.Raw["expires_at"] = data.ExpiresAt + if t, err := time.Parse(time.RFC3339, data.ExpiresAt); err == nil { + snap.Resets["key_expires"] = t + } + } + + snap.Status = core.StatusOK + snap.Message = fmt.Sprintf("$%.4f used", usage) + if data.Limit != nil { + snap.Message += fmt.Sprintf(" / $%.2f limit", *data.Limit) + } +} + +func (p *Provider) fetchCreditsDetail(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, baseURL+"/credits", nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+apiKey) + + resp, err := p.Client().Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("HTTP %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + var detail creditsDetailResponse + if err := json.Unmarshal(body, &detail); err != nil { + return err + } + + remaining := detail.Data.TotalCredits - detail.Data.TotalUsage + if detail.Data.RemainingBalance != nil { + remaining = *detail.Data.RemainingBalance + } + if detail.Data.TotalCredits > 0 || detail.Data.TotalUsage > 0 || remaining > 0 { + totalCredits := detail.Data.TotalCredits + totalUsage := detail.Data.TotalUsage + snap.Metrics["credit_balance"] = core.Metric{ + Limit: &totalCredits, + Used: &totalUsage, + Remaining: &remaining, + Unit: "USD", + Window: "lifetime", + } + snap.Message = fmt.Sprintf("$%.4f used", totalUsage) + if totalCredits > 0 { + snap.Message += fmt.Sprintf(" / $%.2f credits", totalCredits) + } + } + + return nil +} + +func (p *Provider) fetchKeysMeta(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { + const ( + pageSizeHint = 100 + maxPages = 20 + ) + + var allKeys []keyListEntry + offset := 0 + for page := 0; page < maxPages; page++ { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/keys?include_disabled=true&offset=%d", baseURL, offset), nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+apiKey) + + resp, err := p.Client().Do(req) + if err != nil { + return err + } + + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return err + } + if resp.StatusCode == http.StatusForbidden { + return nil + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("HTTP %d", resp.StatusCode) + } + + var pageResp keysResponse + if err := json.Unmarshal(body, &pageResp); err != nil { + return fmt.Errorf("parsing keys list: %w", err) + } + if len(pageResp.Data) == 0 { + break + } + + allKeys = append(allKeys, pageResp.Data...) + offset += len(pageResp.Data) + if len(pageResp.Data) < pageSizeHint { + break + } + } + + snap.Raw["keys_total"] = fmt.Sprintf("%d", len(allKeys)) + + active := 0 + for _, key := range allKeys { + if !key.Disabled { + active++ + } + } + snap.Raw["keys_active"] = fmt.Sprintf("%d", active) + disabled := len(allKeys) - active + snap.Raw["keys_disabled"] = fmt.Sprintf("%d", disabled) + + totalF := float64(len(allKeys)) + activeF := float64(active) + disabledF := float64(disabled) + snap.Metrics["keys_total"] = core.Metric{Used: &totalF, Unit: "keys", Window: "account"} + snap.Metrics["keys_active"] = core.Metric{Used: &activeF, Unit: "keys", Window: "account"} + if disabled > 0 { + snap.Metrics["keys_disabled"] = core.Metric{Used: &disabledF, Unit: "keys", Window: "account"} + } + + currentLabel := snap.Raw["key_label"] + if currentLabel == "" { + return nil + } + + var current *keyListEntry + for i := range allKeys { + if allKeys[i].Label == currentLabel { + current = &allKeys[i] + break + } + } + if current == nil { + snap.Raw["key_lookup"] = "not_in_keys_list" + return nil + } + + if current.Name != "" { + snap.Raw["key_name"] = current.Name + } + snap.Raw["key_disabled"] = fmt.Sprintf("%t", current.Disabled) + if current.CreatedAt != "" { + snap.Raw["key_created_at"] = current.CreatedAt + } + if current.UpdatedAt != nil && *current.UpdatedAt != "" { + snap.Raw["key_updated_at"] = *current.UpdatedAt + } + if current.Hash != "" { + hash := current.Hash + if len(hash) > 12 { + hash = hash[:12] + } + snap.Raw["key_hash_prefix"] = hash + } + + if snap.Raw["is_management_key"] == "true" { + var totalUsage, daily, weekly, monthly float64 + for _, key := range allKeys { + totalUsage += key.Usage + daily += key.UsageDaily + weekly += key.UsageWeekly + monthly += key.UsageMonthly + } + if totalUsage > 0 { + snap.Metrics["credits"] = core.Metric{Used: &totalUsage, Unit: "USD", Window: "lifetime"} + if lim := snap.Metrics["credits"].Limit; lim != nil { + snap.Metrics["credits"] = core.Metric{Limit: lim, Used: &totalUsage, Unit: "USD", Window: "lifetime"} + } + } + if daily > 0 { + snap.Metrics["usage_daily"] = core.Metric{Used: &daily, Unit: "USD", Window: "1d"} + } + if weekly > 0 { + snap.Metrics["usage_weekly"] = core.Metric{Used: &weekly, Unit: "USD", Window: "7d"} + } + if monthly > 0 { + snap.Metrics["usage_monthly"] = core.Metric{Used: &monthly, Unit: "USD", Window: "30d"} + } + } + + return nil +} diff --git a/internal/providers/openrouter/openrouter.go b/internal/providers/openrouter/openrouter.go index 8f652b5..e20ca1d 100644 --- a/internal/providers/openrouter/openrouter.go +++ b/internal/providers/openrouter/openrouter.go @@ -5,12 +5,9 @@ import ( "encoding/json" "errors" "fmt" - "io" - "net/http" "time" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/janekbaraniewski/openusage/internal/parsers" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" "github.com/janekbaraniewski/openusage/internal/providers/shared" ) @@ -290,380 +287,3 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa return snap, nil } - -func (p *Provider) fetchAuthKey(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { - for _, endpoint := range []string{"/key", "/auth/key"} { - url := baseURL + endpoint - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return fmt.Errorf("creating request: %w", err) - } - req.Header.Set("Authorization", "Bearer "+apiKey) - - resp, err := p.Client().Do(req) - if err != nil { - return fmt.Errorf("request failed: %w", err) - } - - snap.Raw = parsers.RedactHeaders(resp.Header) - if resp.StatusCode == http.StatusNotFound && endpoint == "/key" { - resp.Body.Close() - continue - } - - body, readErr := io.ReadAll(resp.Body) - resp.Body.Close() - if readErr != nil { - return fmt.Errorf("reading body: %w", readErr) - } - - switch resp.StatusCode { - case http.StatusUnauthorized, http.StatusForbidden: - snap.Status = core.StatusAuth - snap.Message = fmt.Sprintf("HTTP %d – check API key", resp.StatusCode) - return nil - case http.StatusOK: - default: - return fmt.Errorf("HTTP %d", resp.StatusCode) - } - - var keyResp keyResponse - if err := json.Unmarshal(body, &keyResp); err != nil { - snap.Status = core.StatusError - snap.Message = "failed to parse key response" - return nil - } - - applyKeyData(&keyResp.Data, snap) - parsers.ApplyRateLimitGroup(resp.Header, snap, "rpm_headers", "requests", "1m", - "x-ratelimit-limit-requests", "x-ratelimit-remaining-requests", "x-ratelimit-reset-requests") - parsers.ApplyRateLimitGroup(resp.Header, snap, "tpm_headers", "tokens", "1m", - "x-ratelimit-limit-tokens", "x-ratelimit-remaining-tokens", "x-ratelimit-reset-tokens") - return nil - } - - return fmt.Errorf("key endpoint not available (HTTP 404)") -} - -func applyKeyData(data *keyData, snap *core.UsageSnapshot) { - usage := data.Usage - var remaining *float64 - if data.LimitRemaining != nil { - remaining = data.LimitRemaining - } else if data.Limit != nil { - r := *data.Limit - usage - remaining = &r - } - - if data.Limit != nil { - snap.Metrics["credits"] = core.Metric{ - Limit: data.Limit, - Used: &usage, - Remaining: remaining, - Unit: "USD", - Window: "lifetime", - } - } else { - snap.Metrics["credits"] = core.Metric{ - Used: &usage, - Unit: "USD", - Window: "lifetime", - } - } - - if remaining != nil { - snap.Metrics["limit_remaining"] = core.Metric{ - Used: remaining, - Unit: "USD", - Window: "current_period", - } - } - - if data.UsageDaily != nil { - snap.Metrics["usage_daily"] = core.Metric{Used: data.UsageDaily, Unit: "USD", Window: "1d"} - } - if data.UsageWeekly != nil { - snap.Metrics["usage_weekly"] = core.Metric{Used: data.UsageWeekly, Unit: "USD", Window: "7d"} - } - if data.UsageMonthly != nil { - snap.Metrics["usage_monthly"] = core.Metric{Used: data.UsageMonthly, Unit: "USD", Window: "30d"} - } - if data.ByokUsage != nil && *data.ByokUsage > 0 { - snap.Metrics["byok_usage"] = core.Metric{Used: data.ByokUsage, Unit: "USD", Window: "lifetime"} - snap.Raw["byok_in_use"] = "true" - } - if data.ByokUsageDaily != nil && *data.ByokUsageDaily > 0 { - snap.Metrics["byok_daily"] = core.Metric{Used: data.ByokUsageDaily, Unit: "USD", Window: "1d"} - snap.Raw["byok_in_use"] = "true" - } - if data.ByokUsageWeekly != nil && *data.ByokUsageWeekly > 0 { - snap.Metrics["byok_weekly"] = core.Metric{Used: data.ByokUsageWeekly, Unit: "USD", Window: "7d"} - snap.Raw["byok_in_use"] = "true" - } - if data.ByokUsageMonthly != nil && *data.ByokUsageMonthly > 0 { - snap.Metrics["byok_monthly"] = core.Metric{Used: data.ByokUsageMonthly, Unit: "USD", Window: "30d"} - snap.Raw["byok_in_use"] = "true" - } - if data.ByokUsageInference != nil && *data.ByokUsageInference > 0 { - snap.Metrics["today_byok_cost"] = core.Metric{Used: data.ByokUsageInference, Unit: "USD", Window: "1d"} - snap.Raw["byok_in_use"] = "true" - } - - if data.RateLimit.Requests > 0 { - rl := float64(data.RateLimit.Requests) - snap.Metrics["rpm"] = core.Metric{ - Limit: &rl, - Unit: "requests", - Window: data.RateLimit.Interval, - } - } - - keyLabel := data.Label - if keyLabel == "" { - keyLabel = data.Name - } - if keyLabel != "" { - snap.Raw["key_label"] = keyLabel - } - if data.IsFreeTier { - snap.Raw["tier"] = "free" - } else { - snap.Raw["tier"] = "paid" - } - - snap.Raw["is_free_tier"] = fmt.Sprintf("%t", data.IsFreeTier) - snap.Raw["is_management_key"] = fmt.Sprintf("%t", data.IsManagementKey) - snap.Raw["is_provisioning_key"] = fmt.Sprintf("%t", data.IsProvisioningKey) - snap.Raw["include_byok_in_limit"] = fmt.Sprintf("%t", data.IncludeByokInLimit) - if data.RateLimit.Note != "" { - snap.Raw["rate_limit_note"] = data.RateLimit.Note - } - - switch { - case data.IsManagementKey: - snap.Raw["key_type"] = "management" - case data.IsProvisioningKey: - snap.Raw["key_type"] = "provisioning" - default: - snap.Raw["key_type"] = "standard" - } - - if data.LimitReset != "" { - snap.Raw["limit_reset"] = data.LimitReset - if t, err := time.Parse(time.RFC3339, data.LimitReset); err == nil { - snap.Resets["limit_reset"] = t - } - } - if data.ExpiresAt != "" { - snap.Raw["expires_at"] = data.ExpiresAt - if t, err := time.Parse(time.RFC3339, data.ExpiresAt); err == nil { - snap.Resets["key_expires"] = t - } - } - - snap.Status = core.StatusOK - snap.Message = fmt.Sprintf("$%.4f used", usage) - if data.Limit != nil { - snap.Message += fmt.Sprintf(" / $%.2f limit", *data.Limit) - } -} - -func (p *Provider) fetchCreditsDetail(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { - url := baseURL + "/credits" - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return err - } - req.Header.Set("Authorization", "Bearer "+apiKey) - - resp, err := p.Client().Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("HTTP %d", resp.StatusCode) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - var detail creditsDetailResponse - if err := json.Unmarshal(body, &detail); err != nil { - return err - } - - remaining := detail.Data.TotalCredits - detail.Data.TotalUsage - if detail.Data.RemainingBalance != nil { - remaining = *detail.Data.RemainingBalance - } - - if detail.Data.TotalCredits > 0 || detail.Data.TotalUsage > 0 || remaining > 0 { - totalCredits := detail.Data.TotalCredits - totalUsage := detail.Data.TotalUsage - - snap.Metrics["credit_balance"] = core.Metric{ - Limit: &totalCredits, - Used: &totalUsage, - Remaining: &remaining, - Unit: "USD", - Window: "lifetime", - } - - snap.Message = fmt.Sprintf("$%.4f used", totalUsage) - if totalCredits > 0 { - snap.Message += fmt.Sprintf(" / $%.2f credits", totalCredits) - } - } - - return nil -} - -func (p *Provider) fetchKeysMeta(ctx context.Context, baseURL, apiKey string, snap *core.UsageSnapshot) error { - const ( - pageSizeHint = 100 - maxPages = 20 - ) - - var allKeys []keyListEntry - offset := 0 - - for page := 0; page < maxPages; page++ { - url := fmt.Sprintf("%s/keys?include_disabled=true&offset=%d", baseURL, offset) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return err - } - req.Header.Set("Authorization", "Bearer "+apiKey) - - resp, err := p.Client().Do(req) - if err != nil { - return err - } - - body, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return err - } - - if resp.StatusCode == http.StatusForbidden { - return nil - } - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("HTTP %d", resp.StatusCode) - } - - var pageResp keysResponse - if err := json.Unmarshal(body, &pageResp); err != nil { - return fmt.Errorf("parsing keys list: %w", err) - } - if len(pageResp.Data) == 0 { - break - } - - allKeys = append(allKeys, pageResp.Data...) - offset += len(pageResp.Data) - if len(pageResp.Data) < pageSizeHint { - break - } - } - - snap.Raw["keys_total"] = fmt.Sprintf("%d", len(allKeys)) - - active := 0 - for _, k := range allKeys { - if !k.Disabled { - active++ - } - } - snap.Raw["keys_active"] = fmt.Sprintf("%d", active) - disabled := len(allKeys) - active - snap.Raw["keys_disabled"] = fmt.Sprintf("%d", disabled) - - totalF := float64(len(allKeys)) - activeF := float64(active) - disabledF := float64(disabled) - snap.Metrics["keys_total"] = core.Metric{Used: &totalF, Unit: "keys", Window: "account"} - snap.Metrics["keys_active"] = core.Metric{Used: &activeF, Unit: "keys", Window: "account"} - if disabled > 0 { - snap.Metrics["keys_disabled"] = core.Metric{Used: &disabledF, Unit: "keys", Window: "account"} - } - - currentLabel := snap.Raw["key_label"] - if currentLabel == "" { - return nil - } - - var current *keyListEntry - for i := range allKeys { - if allKeys[i].Label == currentLabel { - current = &allKeys[i] - break - } - } - if current == nil { - snap.Raw["key_lookup"] = "not_in_keys_list" - return nil - } - - if current.Name != "" { - snap.Raw["key_name"] = current.Name - } - snap.Raw["key_disabled"] = fmt.Sprintf("%t", current.Disabled) - if current.CreatedAt != "" { - snap.Raw["key_created_at"] = current.CreatedAt - } - if current.UpdatedAt != nil && *current.UpdatedAt != "" { - snap.Raw["key_updated_at"] = *current.UpdatedAt - } - if current.Hash != "" { - hash := current.Hash - if len(hash) > 12 { - hash = hash[:12] - } - snap.Raw["key_hash_prefix"] = hash - } - - // For management keys, aggregate usage from all sub-keys. - // The /auth/key endpoint reports $0 for the management key itself; - // the real spend is spread across the provisioned sub-keys. - if snap.Raw["is_management_key"] == "true" { - var totalUsage, daily, weekly, monthly float64 - for _, k := range allKeys { - totalUsage += k.Usage - daily += k.UsageDaily - weekly += k.UsageWeekly - monthly += k.UsageMonthly - } - if totalUsage > 0 { - snap.Metrics["credits"] = core.Metric{ - Used: &totalUsage, - Unit: "USD", - Window: "lifetime", - } - if lim := snap.Metrics["credits"].Limit; lim != nil { - snap.Metrics["credits"] = core.Metric{ - Limit: lim, - Used: &totalUsage, - Unit: "USD", - Window: "lifetime", - } - } - } - if daily > 0 { - snap.Metrics["usage_daily"] = core.Metric{Used: &daily, Unit: "USD", Window: "1d"} - } - if weekly > 0 { - snap.Metrics["usage_weekly"] = core.Metric{Used: &weekly, Unit: "USD", Window: "7d"} - } - if monthly > 0 { - snap.Metrics["usage_monthly"] = core.Metric{Used: &monthly, Unit: "USD", Window: "30d"} - } - } - - return nil -} diff --git a/internal/tui/detail.go b/internal/tui/detail.go index 8356c9d..69df938 100644 --- a/internal/tui/detail.go +++ b/internal/tui/detail.go @@ -88,7 +88,7 @@ func RenderDetailContent(snap core.UsageSnapshot, w int, warnThresh, critThresh groups := groupMetrics(snap.Metrics, widget, details) for _, group := range groups { if showAll || group.title == tabName { - renderMetricGroup(&sb, group, widget, details, w, warnThresh, critThresh, snap.DailySeries, burnRate) + renderMetricGroup(&sb, snap, group, widget, details, w, warnThresh, critThresh, snap.DailySeries, burnRate) } } } @@ -506,7 +506,7 @@ func titleCase(s string) string { return strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) } -func renderMetricGroup(sb *strings.Builder, group metricGroup, widget core.DashboardWidget, details core.DetailWidget, w int, warnThresh, critThresh float64, series map[string][]core.TimePoint, burnRate float64) { +func renderMetricGroup(sb *strings.Builder, snap core.UsageSnapshot, group metricGroup, widget core.DashboardWidget, details core.DetailWidget, w int, warnThresh, critThresh float64, series map[string][]core.TimePoint, burnRate float64) { sb.WriteString("\n") renderDetailSectionHeader(sb, group.title, w) @@ -522,7 +522,7 @@ func renderMetricGroup(sb *strings.Builder, group metricGroup, widget core.Dashb case core.DetailSectionStyleSpending: renderSpendingSection(sb, entries, w, burnRate) case core.DetailSectionStyleTokens: - renderTokensSection(sb, entries, widget, w, series) + renderTokensSection(sb, snap, entries, widget, w, series) case core.DetailSectionStyleActivity: renderActivitySection(sb, entries, widget, w, series) case core.DetailSectionStyleLanguages: @@ -612,38 +612,6 @@ func renderSpendingSection(sb *strings.Builder, entries []metricEntry, w int, bu } } -func renderTokensSection(sb *strings.Builder, entries []metricEntry, widget core.DashboardWidget, w int, series map[string][]core.TimePoint) { - labelW := sectionLabelWidth(w) - - var perModelTokens []metricEntry - var otherTokens []metricEntry - - for _, e := range entries { - if isPerModelTokenKey(e.key) { - perModelTokens = append(perModelTokens, e) - } else { - otherTokens = append(otherTokens, e) - } - } - - for _, e := range otherTokens { - val := formatMetricValue(e.metric) - sb.WriteString(fmt.Sprintf(" %s %s\n", - labelStyle.Width(labelW).Render(e.label), valueStyle.Render(val))) - } - - if len(perModelTokens) > 0 { - if len(otherTokens) > 0 { - sb.WriteString("\n") - } - renderTokenUsageTable(sb, perModelTokens, w) - } - - renderSectionSparklines(sb, widget, w, series, []string{ - "tokens_total", "tokens_input", "tokens_output", - }) -} - func renderActivitySection(sb *strings.Builder, entries []metricEntry, widget core.DashboardWidget, w int, series map[string][]core.TimePoint) { labelW := sectionLabelWidth(w) @@ -1252,10 +1220,6 @@ func isModelCostKey(key string) bool { return core.IsModelCostMetricKey(key) } -func isPerModelTokenKey(key string) bool { - return core.IsPerModelTokenMetricKey(key) -} - func formatMetricValue(m core.Metric) string { var value string switch { @@ -1361,86 +1325,6 @@ func renderModelCostsTable(sb *strings.Builder, entries []metricEntry, w int) { } } -func renderTokenUsageTable(sb *strings.Builder, entries []metricEntry, w int) { - type tokenData struct { - name string - inputTokens float64 - outputTokens float64 - } - - models := make(map[string]*tokenData) - var modelOrder []string - - for _, e := range entries { - key := e.key // use the raw metric key for pattern matching - var modelName string - var isInput bool - - switch { - case strings.HasPrefix(key, "input_tokens_"): - modelName = strings.TrimPrefix(key, "input_tokens_") - isInput = true - case strings.HasPrefix(key, "output_tokens_"): - modelName = strings.TrimPrefix(key, "output_tokens_") - isInput = false - case strings.HasSuffix(key, "_input_tokens"): - modelName = strings.TrimPrefix( - strings.TrimSuffix(key, "_input_tokens"), "model_") - isInput = true - case strings.HasSuffix(key, "_output_tokens"): - modelName = strings.TrimPrefix( - strings.TrimSuffix(key, "_output_tokens"), "model_") - isInput = false - default: - continue - } - - md, ok := models[modelName] - if !ok { - md = &tokenData{name: modelName} - models[modelName] = md - modelOrder = append(modelOrder, modelName) - } - if e.metric.Used != nil { - if isInput { - md.inputTokens = *e.metric.Used - } else { - md.outputTokens = *e.metric.Used - } - } - } - - if len(modelOrder) == 0 { - return - } - - nameW := 26 - colW := 10 - if w < 55 { - nameW = 18 - colW = 8 - } - - sb.WriteString(fmt.Sprintf(" %-*s %*s %*s\n", - nameW, dimStyle.Bold(true).Render("Model"), - colW, dimStyle.Bold(true).Render("Input"), - colW, dimStyle.Bold(true).Render("Output"), - )) - - for _, name := range modelOrder { - md := models[name] - displayName := prettifyModelName(md.name) - if len(displayName) > nameW { - displayName = displayName[:nameW-1] + "…" - } - sb.WriteString(fmt.Sprintf(" %-*s %*s %*s\n", - nameW, valueStyle.Render(displayName), - colW, lipgloss.NewStyle().Foreground(colorSubtext).Render(formatTokens(md.inputTokens)), - colW, lipgloss.NewStyle().Foreground(colorSubtext).Render(formatTokens(md.outputTokens)), - )) - } -} - func renderUsageTable(sb *strings.Builder, entries []metricEntry, w int, warnThresh, critThresh float64) { if len(entries) == 0 { return diff --git a/internal/tui/detail_abstraction_test.go b/internal/tui/detail_abstraction_test.go index 2c36d90..652d258 100644 --- a/internal/tui/detail_abstraction_test.go +++ b/internal/tui/detail_abstraction_test.go @@ -71,7 +71,7 @@ func TestRenderMetricGroup_UnknownSectionFallsBackToList(t *testing.T) { } var sb strings.Builder - renderMetricGroup(&sb, group, widget, details, 80, 0.3, 0.1, nil, 0) + renderMetricGroup(&sb, core.UsageSnapshot{}, group, widget, details, 80, 0.3, 0.1, nil, 0) out := sb.String() if !strings.Contains(out, "Models") { t.Fatalf("output missing metric label: %q", out) diff --git a/internal/tui/detail_tokens.go b/internal/tui/detail_tokens.go new file mode 100644 index 0000000..72ebe58 --- /dev/null +++ b/internal/tui/detail_tokens.go @@ -0,0 +1,87 @@ +package tui + +import ( + "fmt" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func renderTokensSection(sb *strings.Builder, snap core.UsageSnapshot, entries []metricEntry, widget core.DashboardWidget, w int, series map[string][]core.TimePoint) { + labelW := sectionLabelWidth(w) + + var otherTokens []metricEntry + for _, entry := range entries { + if !isPerModelTokenKey(entry.key) { + otherTokens = append(otherTokens, entry) + } + } + + for _, entry := range otherTokens { + val := formatMetricValue(entry.metric) + sb.WriteString(fmt.Sprintf(" %s %s\n", + labelStyle.Width(labelW).Render(entry.label), valueStyle.Render(val))) + } + + models := core.ExtractAnalyticsModelUsage(snap) + hasPerModelTokens := false + for _, model := range models { + if model.InputTokens > 0 || model.OutputTokens > 0 { + hasPerModelTokens = true + break + } + } + if hasPerModelTokens { + if len(otherTokens) > 0 { + sb.WriteString("\n") + } + renderTokenUsageTable(sb, models, w) + } + + renderSectionSparklines(sb, widget, w, series, []string{ + "tokens_total", "tokens_input", "tokens_output", + }) +} + +func isPerModelTokenKey(key string) bool { + return core.IsPerModelTokenMetricKey(key) +} + +func renderTokenUsageTable(sb *strings.Builder, models []core.AnalyticsModelUsageEntry, w int) { + rows := make([]core.AnalyticsModelUsageEntry, 0, len(models)) + for _, model := range models { + if model.InputTokens <= 0 && model.OutputTokens <= 0 { + continue + } + rows = append(rows, model) + } + if len(rows) == 0 { + return + } + + nameW := 26 + colW := 10 + if w < 55 { + nameW = 18 + colW = 8 + } + + sb.WriteString(fmt.Sprintf(" %-*s %*s %*s\n", + nameW, dimStyle.Bold(true).Render("Model"), + colW, dimStyle.Bold(true).Render("Input"), + colW, dimStyle.Bold(true).Render("Output"), + )) + + for _, model := range rows { + displayName := prettifyModelName(model.Name) + if len(displayName) > nameW { + displayName = displayName[:nameW-1] + "…" + } + sb.WriteString(fmt.Sprintf(" %-*s %*s %*s\n", + nameW, valueStyle.Render(displayName), + colW, lipgloss.NewStyle().Foreground(colorSubtext).Render(formatTokens(model.InputTokens)), + colW, lipgloss.NewStyle().Foreground(colorSubtext).Render(formatTokens(model.OutputTokens)), + )) + } +} From 804a10e16830659a194c570efc6ca4eb11a55208 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 15:41:19 +0100 Subject: [PATCH 15/32] refactor: bind telemetry sources to accounts and move metric parsing into core --- cmd/openusage/telemetry.go | 5 +- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 17 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 140 ++++++++ internal/core/metric_labels.go | 145 +++++++++ internal/core/usage_breakdowns.go | 81 +++++ internal/core/usage_breakdowns_test.go | 30 ++ internal/daemon/server.go | 17 +- internal/daemon/server_collect.go | 8 +- internal/daemon/server_http.go | 10 +- internal/daemon/server_spool.go | 5 +- internal/daemon/source_collectors.go | 300 ++++++++++++++++++ internal/daemon/source_collectors_test.go | 106 +++++++ internal/telemetry/provider_event_mapper.go | 2 +- .../telemetry/provider_event_mapper_test.go | 6 +- internal/tui/detail.go | 129 +------- internal/tui/tiles_composition.go | 104 +----- 16 files changed, 852 insertions(+), 253 deletions(-) create mode 100644 docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md create mode 100644 internal/core/metric_labels.go create mode 100644 internal/daemon/source_collectors.go create mode 100644 internal/daemon/source_collectors_test.go diff --git a/cmd/openusage/telemetry.go b/cmd/openusage/telemetry.go index aa58a84..952e66f 100644 --- a/cmd/openusage/telemetry.go +++ b/cmd/openusage/telemetry.go @@ -13,7 +13,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/detect" "github.com/janekbaraniewski/openusage/internal/integrations" "github.com/janekbaraniewski/openusage/internal/providers" - "github.com/janekbaraniewski/openusage/internal/providers/shared" "github.com/janekbaraniewski/openusage/internal/telemetry" "github.com/spf13/cobra" ) @@ -161,13 +160,15 @@ func ingestHookLocally( if !ok { return daemon.HookResponse{}, fmt.Errorf("unknown hook source %q", sourceName) } - reqs, err := telemetry.ParseSourceHookPayload(source, payload, shared.TelemetryCollectOptions{}, accountID) + options, effectiveAccountID, warnings := daemon.ResolveTelemetrySourceOptions(source, accountID) + reqs, err := telemetry.ParseSourceHookPayload(source, payload, options, effectiveAccountID) if err != nil { return daemon.HookResponse{}, fmt.Errorf("parse hook payload: %w", err) } resp := daemon.HookResponse{ Source: sourceName, Enqueued: len(reqs), + Warnings: warnings, } if len(reqs) == 0 { return resp, nil diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 545fc0b..24af736 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -52,16 +52,21 @@ This table captures every issue found in this pass. It is broad and high-signal, | R32 | Fixed | Cursor orchestration and projection split | `internal/providers/cursor/cursor.go`, `internal/providers/cursor/fetch.go`, `internal/providers/cursor/runtime.go`, `internal/providers/cursor/state_projection.go`, `internal/providers/cursor/tracking_projection.go`, `internal/providers/cursor/api_projection.go` | Cursor fetch orchestration, runtime merge/token helpers, state projection, tracking projection, and API projection logic now live in dedicated units instead of one large provider file. `cursor.go` is now limited to provider construction, shared types, and clock/state wiring. | Keep future Cursor changes inside the matching unit instead of re-growing `cursor.go`. | | R33 | Fixed | OpenRouter analytics and snapshot-projection split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/analytics.go`, `internal/providers/openrouter/snapshot_projection.go` | OpenRouter analytics endpoint parsing/aggregation and dashboard synthesis/projection helpers now live outside the main provider file. The main file is now focused on provider setup plus key/credits/account fetch paths. | If the remaining key/account path grows again, split it into a small API helper unit. | | R34 | Fixed | Ollama clock injection | `internal/providers/ollama/ollama.go` | Ollama’s cloud-usage window parsing, local log windows, reset inference, and DB-derived token window logic now use the provider clock instead of direct `time.Now()` calls in behavioral paths. | Reuse the same clock path if more Ollama time-derived metrics are added. | +| R35 | Fixed | OpenRouter account API split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/account_api.go` | OpenRouter key/auth, credits, and key-metadata fetch helpers now live in a dedicated account API unit instead of the main provider file. The coordinator file is down to provider setup and fetch orchestration. | Keep further OpenRouter account mutations inside the account unit. | +| R36 | Fixed | Detail token section decomposition | `internal/tui/detail.go`, `internal/tui/detail_tokens.go` | The detail token section now renders from shared analytics model extraction instead of reverse-parsing token metric keys, and the token-specific renderer lives in its own file. | Continue splitting other detail subsections the same way. | +| R37 | Fixed | Telemetry source account binding and safer fallback | `internal/daemon/source_collectors.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_http.go`, `internal/daemon/server_spool.go`, `cmd/openusage/telemetry.go`, `internal/telemetry/provider_event_mapper.go` | Local collectors and hook ingestion now bind to configured source accounts when unambiguous, ambiguous shared-path setups degrade to explicit source-scoped attribution instead of silently choosing one account, and account fallback prefers source system before upstream provider. | If hook ingest logic is centralized later, keep using the same resolver. | ## Action Table | ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | -| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/dashboardapp/service.go` | Side effects are injected and the model file is split, but TUI state-transition and rendering logic is still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go` | Composition bars and analytics model views now consume shared extractors, but some analytics/detail sections still decode raw metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P2 | OpenRouter account/API follow-through | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/analytics.go`, `internal/providers/openrouter/snapshot_projection.go`, `internal/providers/openrouter/generations.go` | The large analytics/projection seams are now split, but the remaining account/key/credits paths still sit together in `openrouter.go` and could be isolated further if that flow keeps growing. | If future OpenRouter changes cluster around account probing or key metadata, split a small `account_api` helper file rather than adding back into `openrouter.go`. | Keeps the provider easy to review as the account/API path evolves. | +| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/dashboardapp/service.go` | Side effects are injected and some detail logic is split, but TUI state-transition and rendering logic is still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | +| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go` | Composition bars and analytics model views now consume shared extractors, and the token detail table no longer parses raw token keys, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/codex/codex.go` | Cursor and OpenRouter are now materially decomposed, but several other providers still combine transport, parsing, normalization, and projection in single 1900-2600 LOC files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go` | The usage-view code is materially smaller after the helper/projection/query splits, but the orchestration/materialization path still owns temp-table lifecycle, query fanout, and aggregate assembly in one place. | Continue splitting remaining orchestration/materialization concerns and consider a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | +| A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | +| A9 | P2 | Hook ingestion duplication | `internal/daemon/server_spool.go`, `cmd/openusage/telemetry.go` | Daemon and CLI fallback still own overlapping hook ingest/spool behavior with different control flow and user messaging. | Extract a shared hook ingest service and keep only transport/output differences at the edges. | Less drift between daemon and CLI ingest behavior. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | @@ -71,11 +76,11 @@ This table captures every issue found in this pass. It is broad and high-signal, 1. A2, A3 2. A6, A4 -3. A7, A1 -4. A12, A14, A15 +3. A9, A7, A1 +4. A8, A12, A14, A15 ## Notes - The highest-risk remaining issues are architectural rather than immediately broken behavior. -- The biggest remaining drift risk is the metric-prefix parsing still spread across the TUI render path. +- The biggest remaining drift risks are the metric-prefix parsing still spread across the TUI render path and duplicated hook-ingest control flow across daemon and CLI paths. - The race pass completed cleanly for the core dashboard/daemon/telemetry packages after the timeframe fix. diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md new file mode 100644 index 0000000..76808a9 --- /dev/null +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -0,0 +1,140 @@ +# System Review: Remaining Responsibility and Duplication Gaps + +Date: 2026-03-09 +Repository: `/Users/janekbaraniewski/Workspace/priv/openusage` + +## Scope + +This is a refreshed architecture review after the dashboard race fix, daemon/read-model cleanup, provider parser consolidation, and the recent Cursor/OpenRouter/Ollama/TUI refactors on branch `feat/dashboard-race-parser-cleanups`. + +The goal of this report is not to restate already-fixed issues. It documents the meaningful problems still left in the current tree. + +## What Is No Longer Open + +These were major concerns in earlier reviews and are now materially addressed: + +- Dashboard timeframe race and stale snapshot acceptance. +- Read-model cache dedupe ignoring time window. +- Stringly typed daemon/telemetry time-window flow. +- Telemetry source account binding for unambiguous local collectors and hooks. +- Cursor parser/SQLite duplication across dashboard and telemetry paths. +- Codex and Claude Code raw parser duplication. +- OpenRouter provider-resolution, analytics, generation, projection, and account-path monolith sprawl. +- TUI side-effect leakage into config persistence / integration install / provider validation. +- Ollama hot-path `time.Now()` usage in behavioral window/reset logic. + +## Findings + +### 1. [P2] TUI rendering and state handling are still concentrated in a few very large files + +The TUI is much better than before, but [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go), and [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) are still large enough that unrelated concerns move together. + +Refs: +- [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go) +- [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go) +- [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go) +- [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) + +What to address: +- Continue section-level file extraction from `detail.go`. +- Split model orchestration further by update/action/display boundaries. +- Push more typed extractor work out of rendering code. + +### 2. [P2] Some analytics/detail sections still decode raw metric-key conventions in UI code + +The major composition and token-table paths now use shared extractors, but analytics/detail still contain pockets of renderer-owned key interpretation. That is better than before, but it is still a drift vector. + +Refs: +- [analytics.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/analytics.go) +- [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go) +- [usage_breakdowns.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/usage_breakdowns.go) +- [analytics_snapshot.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/analytics_snapshot.go) + +What to address: +- Promote remaining analytics/detail extractors into `internal/core`. +- Keep renderers as display adapters over typed sections. + +### 3. [P2] Several providers are still large mixed-responsibility units + +Cursor and OpenRouter are now in much better shape, but several other providers remain monoliths that mix transport, parsing, normalization, and projection in one place. + +Refs: +- [ollama.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/ollama.go) +- [zai.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/zai.go) +- [gemini_cli.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/gemini_cli.go) +- [copilot.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/copilot.go) +- [claude_code.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/claude_code.go) +- [codex.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/codex/codex.go) + +What to address: +- Split by concern, not by arbitrary line count: +- account/API fetch +- local-data adapters +- projection helpers +- telemetry-specific collectors + +### 4. [P2] Hook ingestion behavior is still duplicated between daemon and CLI fallback + +The daemon and CLI fallback paths still own overlapping hook-ingest/spool behavior. The structure is workable, but the logic can drift. + +Refs: +- [server_spool.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/daemon/server_spool.go) +- [telemetry.go](/Users/janekbaraniewski/Workspace/priv/openusage/cmd/openusage/telemetry.go) + +What to address: +- Extract a shared hook ingest service. +- Keep transport/output concerns at the command/daemon edge. + +### 5. [P3] Ambiguous shared-path local sources still require explicit account disambiguation + +The daemon now binds local telemetry to configured accounts when the source/account mapping is unambiguous. If multiple accounts share the same source paths, it intentionally degrades to source-scoped attribution instead of silently guessing. That is the correct behavior today, but it means truly ambiguous local multi-account setups still need an explicit binding mechanism if they become a first-class use case. + +Refs: +- [source_collectors.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/daemon/source_collectors.go) +- [server_http.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/daemon/server_http.go) +- [telemetry.go](/Users/janekbaraniewski/Workspace/priv/openusage/cmd/openusage/telemetry.go) + +What to address: +- Add persisted source/account alias mapping only if ambiguous local multi-account setups become common. +- Keep ambiguous attribution explicit; do not reintroduce silent account guessing. + +### 6. [P3] Account config contract cleanup is not finished + +The hot-path abuse of `Binary`/`BaseURL` is fixed, but the type still allows path-like runtime hints and canonical provider config to coexist ambiguously. + +Refs: +- [provider.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/provider.go) +- [config.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/config/config.go) + +What to address: +- Introduce a dedicated typed runtime-hints structure. +- Retire compatibility comments and residual semantic ambiguity in `AccountConfig`. + +### 7. [P3] Test suites are strong but still expensive to maintain + +Some package tests remain extremely large and inline too much fixture logic. + +Refs: +- [openrouter_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/openrouter/openrouter_test.go) +- [copilot_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/copilot_test.go) +- [usage_view_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view_test.go) +- [config_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/config/config_test.go) + +What to address: +- Extract fixture builders and scenario helpers. +- Keep top-level tests declarative. + +## Recommended Order + +1. Telemetry account identity mapping. +2. TUI extractor/decomposition follow-through. +3. Remaining provider monolith splits. +4. Shared hook ingest service. +5. Account config contract hardening. +6. Test fixture cleanup. + +## Notes + +- The repo is in materially better shape than it was at the start of this cleanup branch. +- The main remaining risks are now architectural and maintainability-oriented rather than immediate correctness regressions. +- The highest near-term drift risk is the duplicated hook-ingest control flow plus the remaining metric-prefix parsing still sitting in TUI render code. diff --git a/internal/core/metric_labels.go b/internal/core/metric_labels.go new file mode 100644 index 0000000..89ab379 --- /dev/null +++ b/internal/core/metric_labels.go @@ -0,0 +1,145 @@ +package core + +import "strings" + +var prettifyKeyOverrides = map[string]string{ + "plan_percent_used": "Plan Used", + "plan_total_spend_usd": "Total Plan Spend", + "spend_limit": "Spend Limit", + "individual_spend": "Individual Spend", + "context_window": "Context Window", +} + +func MetricLabel(widget DashboardWidget, key string) string { + if widget.MetricLabelOverrides != nil { + if label, ok := widget.MetricLabelOverrides[key]; ok && label != "" { + return NormalizeMetricLabel(label) + } + } + return NormalizeMetricLabel(PrettifyMetricKey(key)) +} + +func NormalizeMetricLabel(label string) string { + label = strings.TrimSpace(label) + if label == "" { + return label + } + + replacements := []struct { + old string + new string + }{ + {"5h Block", "Usage 5h"}, + {"5-Hour Usage", "Usage 5h"}, + {"5h Usage", "Usage 5h"}, + {"7-Day Usage", "Usage 7d"}, + {"7d Usage", "Usage 7d"}, + } + for _, repl := range replacements { + label = strings.ReplaceAll(label, repl.old, repl.new) + } + return label +} + +func PrettifyUsageMetricLabel(key string, widget DashboardWidget) string { + lastUnderscore := strings.LastIndex(key, "_") + if lastUnderscore > 0 && lastUnderscore < len(key)-1 { + suffix := key[lastUnderscore+1:] + prefix := key[:lastUnderscore] + if suffix == strings.ToUpper(suffix) && len(suffix) > 1 { + return prettifyModelHyphens(prefix) + " " + titleCase(suffix) + } + } + return MetricLabel(widget, key) +} + +func PrettifyMetricKey(key string) string { + if label, ok := prettifyKeyOverrides[key]; ok { + return label + } + parts := strings.Split(key, "_") + for i, p := range parts { + if len(p) > 0 { + parts[i] = strings.ToUpper(p[:1]) + p[1:] + } + } + result := strings.Join(parts, " ") + for _, pair := range [][2]string{ + {"Usd", "USD"}, {"Rpm", "RPM"}, {"Tpm", "TPM"}, + {"Rpd", "RPD"}, {"Tpd", "TPD"}, {"Api", "API"}, + } { + result = strings.ReplaceAll(result, pair[0], pair[1]) + } + return result +} + +func ClassifyDetailMetric(key string, m Metric, widget DashboardWidget, details DetailWidget) (group, label string, order int) { + if override, ok := widget.MetricGroupOverrides[key]; ok && override.Group != "" { + label = override.Label + if label == "" { + label = MetricLabel(widget, key) + } + label = NormalizeMetricLabel(label) + order = override.Order + if order <= 0 { + order = detailMetricGroupOrder(details, override.Group, 4) + } + return override.Group, label, order + } + + group = string(InferMetricGroup(key, m)) + label = MetricLabel(widget, key) + switch group { + case string(MetricGroupUsage): + if strings.HasPrefix(key, "rate_limit_") { + label = MetricLabel(widget, strings.TrimPrefix(key, "rate_limit_")) + } else if m.Remaining != nil && m.Limit != nil && m.Unit != "%" && m.Unit != "USD" { + label = PrettifyUsageMetricLabel(key, widget) + } + order = detailMetricGroupOrder(details, group, 1) + case string(MetricGroupSpending): + if strings.HasPrefix(key, "model_") && + !strings.HasSuffix(key, "_input_tokens") && + !strings.HasSuffix(key, "_output_tokens") { + label = strings.TrimPrefix(key, "model_") + } + order = detailMetricGroupOrder(details, group, 2) + case string(MetricGroupTokens): + if strings.HasPrefix(key, "session_") { + label = MetricLabel(widget, strings.TrimPrefix(key, "session_")) + } + order = detailMetricGroupOrder(details, group, 3) + default: + order = detailMetricGroupOrder(details, string(MetricGroupActivity), 4) + group = string(MetricGroupActivity) + } + return group, label, order +} + +func detailMetricGroupOrder(details DetailWidget, group string, fallback int) int { + if order := details.SectionOrder(group); order > 0 { + return order + } + return fallback +} + +func prettifyModelHyphens(name string) string { + parts := strings.Split(name, "-") + for i, p := range parts { + if len(p) == 0 { + continue + } + if p[0] >= '0' && p[0] <= '9' { + continue + } + parts[i] = strings.ToUpper(p[:1]) + p[1:] + } + return strings.Join(parts, " ") +} + +func titleCase(s string) string { + if len(s) <= 1 { + return s + } + return strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) +} diff --git a/internal/core/usage_breakdowns.go b/internal/core/usage_breakdowns.go index 0bc1ce3..86c0d37 100644 --- a/internal/core/usage_breakdowns.go +++ b/internal/core/usage_breakdowns.go @@ -60,6 +60,11 @@ type ClientBreakdownEntry struct { Series []TimePoint } +type ActualToolUsageEntry struct { + RawName string + Calls float64 +} + func ExtractLanguageUsage(s UsageSnapshot) ([]LanguageUsageEntry, map[string]bool) { byLang := make(map[string]float64) usedKeys := make(map[string]bool) @@ -839,6 +844,82 @@ func ExtractInterfaceClientBreakdown(s UsageSnapshot) ([]ClientBreakdownEntry, m return out, usedKeys } +var actualToolAggregateKeys = map[string]bool{ + "tool_calls_total": true, + "tool_completed": true, + "tool_errored": true, + "tool_cancelled": true, + "tool_success_rate": true, +} + +func ExtractActualToolUsage(s UsageSnapshot) ([]ActualToolUsageEntry, map[string]bool) { + byTool := make(map[string]float64) + usedKeys := make(map[string]bool) + + for key, metric := range s.Metrics { + if metric.Used == nil { + continue + } + if !strings.HasPrefix(key, "tool_") { + continue + } + if actualToolAggregateKeys[key] { + usedKeys[key] = true + continue + } + if strings.HasSuffix(key, "_today") || strings.HasSuffix(key, "_1d") || strings.HasSuffix(key, "_7d") || strings.HasSuffix(key, "_30d") { + usedKeys[key] = true + continue + } + name := strings.TrimPrefix(key, "tool_") + if name == "" { + continue + } + if IsMCPToolMetricName(name) { + usedKeys[key] = true + continue + } + byTool[name] += *metric.Used + usedKeys[key] = true + } + + if len(byTool) == 0 { + return nil, usedKeys + } + + out := make([]ActualToolUsageEntry, 0, len(byTool)) + for name, calls := range byTool { + if calls <= 0 { + continue + } + out = append(out, ActualToolUsageEntry{ + RawName: name, + Calls: calls, + }) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Calls != out[j].Calls { + return out[i].Calls > out[j].Calls + } + return out[i].RawName < out[j].RawName + }) + return out, usedKeys +} + +func IsMCPToolMetricName(name string) bool { + normalized := strings.ToLower(strings.TrimSpace(name)) + if normalized == "" { + return false + } + if strings.HasPrefix(normalized, "mcp_") { + return true + } + if strings.Contains(normalized, "_mcp_server_") || strings.Contains(normalized, "-mcp-server-") { + return true + } + return strings.HasSuffix(normalized, "_mcp") +} + func parseProjectMetricKey(key string) (name, field string, ok bool) { const prefix = "project_" if !strings.HasPrefix(key, prefix) { diff --git a/internal/core/usage_breakdowns_test.go b/internal/core/usage_breakdowns_test.go index 5933d00..51f13a1 100644 --- a/internal/core/usage_breakdowns_test.go +++ b/internal/core/usage_breakdowns_test.go @@ -209,3 +209,33 @@ func TestExtractInterfaceClientBreakdown(t *testing.T) { t.Fatalf("used keys missing expected interface metrics: %#v", used) } } + +func TestExtractActualToolUsage(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "tool_bash": {Used: Float64Ptr(3)}, + "tool_read": {Used: Float64Ptr(5)}, + "tool_bash_today": {Used: Float64Ptr(1)}, + "tool_calls_total": {Used: Float64Ptr(9)}, + "tool_mcp_github_list_issues": {Used: Float64Ptr(2)}, + "tool_github_mcp_server_get_commit": {Used: Float64Ptr(1)}, + }, + } + + got, used := ExtractActualToolUsage(snap) + if len(got) != 2 { + t.Fatalf("len(got) = %d, want 2", len(got)) + } + if got[0].RawName != "read" || got[0].Calls != 5 { + t.Fatalf("got[0] = %#v, want read/5", got[0]) + } + if got[1].RawName != "bash" || got[1].Calls != 3 { + t.Fatalf("got[1] = %#v, want bash/3", got[1]) + } + if !used["tool_calls_total"] || !used["tool_bash_today"] { + t.Fatalf("used keys missing expected tool metrics: %#v", used) + } + if !used["tool_mcp_github_list_issues"] || !used["tool_github_mcp_server_get_commit"] { + t.Fatalf("mcp tool metrics should still be marked used: %#v", used) + } +} diff --git a/internal/daemon/server.go b/internal/daemon/server.go index 13cb94e..0167eb6 100644 --- a/internal/daemon/server.go +++ b/internal/daemon/server.go @@ -18,7 +18,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers" - "github.com/janekbaraniewski/openusage/internal/providers/shared" "github.com/janekbaraniewski/openusage/internal/telemetry" ) @@ -29,7 +28,6 @@ type Service struct { store *telemetry.Store pipeline *telemetry.Pipeline quotaIngest *telemetry.QuotaSnapshotIngestor - collectors []telemetry.Collector providerByID map[string]core.UsageProvider spoolMu sync.Mutex // guards spool filesystem operations (read/write/cleanup) @@ -100,7 +98,6 @@ func startService(ctx context.Context, cfg Config) (*Service, error) { store: store, pipeline: telemetry.NewPipeline(store, telemetry.NewSpool(cfg.SpoolDir)), quotaIngest: telemetry.NewQuotaSnapshotIngestor(store), - collectors: buildCollectors(), providerByID: providersByID(), logThrottle: core.NewLogThrottle(200, 10*time.Minute), rmCache: newReadModelCache(), @@ -114,7 +111,7 @@ func startService(ctx context.Context, cfg Config) (*Service, error) { svc.cfg.SpoolDir, svc.cfg.CollectInterval, svc.cfg.PollInterval, - len(svc.collectors), + telemetrySourceCount(), len(svc.providerByID), ) @@ -292,18 +289,6 @@ func EnsureSocketPathAvailable(socketPath string) error { // --- Helpers --- -func buildCollectors() []telemetry.Collector { - collectors := make([]telemetry.Collector, 0) - for _, provider := range providers.AllProviders() { - source, ok := provider.(shared.TelemetrySource) - if !ok { - continue - } - collectors = append(collectors, telemetry.NewSourceCollector(source, source.DefaultCollectOptions(), "")) - } - return collectors -} - func providersByID() map[string]core.UsageProvider { out := make(map[string]core.UsageProvider) for _, provider := range providers.AllProviders() { diff --git a/internal/daemon/server_collect.go b/internal/daemon/server_collect.go index 9080fa9..ec90c8c 100644 --- a/internal/daemon/server_collect.go +++ b/internal/daemon/server_collect.go @@ -36,8 +36,14 @@ func (s *Service) collectAndFlush(ctx context.Context) { var allReqs []telemetry.IngestRequest totalCollected := 0 var warnings []string + accounts, accountsErr := loadTelemetrySourceAccounts() + if accountsErr != nil { + warnings = append(warnings, fmt.Sprintf("collector account config: %v", accountsErr)) + } + collectors, collectorWarnings := buildCollectors(accounts) + warnings = append(warnings, collectorWarnings...) - for _, collector := range s.collectors { + for _, collector := range collectors { reqs, err := collector.Collect(ctx) if err != nil { warnings = append(warnings, fmt.Sprintf("%s: %v", collector.Name(), err)) diff --git a/internal/daemon/server_http.go b/internal/daemon/server_http.go index 82ec5d4..487a5e9 100644 --- a/internal/daemon/server_http.go +++ b/internal/daemon/server_http.go @@ -56,18 +56,18 @@ func (s *Service) handleHook(w http.ResponseWriter, r *http.Request) { } accountID := strings.TrimSpace(r.URL.Query().Get("account_id")) - reqs, err := telemetry.ParseSourceHookPayload(source, payload, source.DefaultCollectOptions(), accountID) + options, effectiveAccountID, warnings := ResolveTelemetrySourceOptions(source, accountID) + reqs, err := telemetry.ParseSourceHookPayload(source, payload, options, effectiveAccountID) if err != nil { writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("parse hook payload: %v", err)) return } if len(reqs) == 0 { - writeJSON(w, http.StatusOK, HookResponse{Source: sourceName}) + writeJSON(w, http.StatusOK, HookResponse{Source: sourceName, Warnings: warnings}) return } tally, _ := s.ingestBatch(r.Context(), reqs) - var warnings []string if tally.failed > 0 { warnings = append(warnings, fmt.Sprintf("%d ingest failures", tally.failed)) } @@ -91,14 +91,14 @@ func (s *Service) handleHook(w http.ResponseWriter, r *http.Request) { if tally.failed > 0 { s.warnf(logLevel, "source=%s account_id=%q duration_ms=%d enqueued=%d processed=%d ingested=%d deduped=%d failed=%d", - sourceName, accountID, durationMs, + sourceName, effectiveAccountID, durationMs, len(reqs), tally.processed, tally.ingested, tally.deduped, tally.failed, ) return } s.infof(logLevel, "source=%s account_id=%q duration_ms=%d enqueued=%d processed=%d ingested=%d deduped=%d failed=%d", - sourceName, accountID, durationMs, + sourceName, effectiveAccountID, durationMs, len(reqs), tally.processed, tally.ingested, tally.deduped, tally.failed, ) } diff --git a/internal/daemon/server_spool.go b/internal/daemon/server_spool.go index 177ba99..43ae0b9 100644 --- a/internal/daemon/server_spool.go +++ b/internal/daemon/server_spool.go @@ -178,11 +178,12 @@ func (s *Service) processHookSpool(ctx context.Context, dir string) { continue } + options, effectiveAccountID, _ := ResolveTelemetrySourceOptions(source, strings.TrimSpace(raw.AccountID)) reqs, parseErr := telemetry.ParseSourceHookPayload( source, raw.Payload, - source.DefaultCollectOptions(), - strings.TrimSpace(raw.AccountID), + options, + effectiveAccountID, ) if parseErr != nil || len(reqs) == 0 { _ = os.Remove(path) diff --git a/internal/daemon/source_collectors.go b/internal/daemon/source_collectors.go new file mode 100644 index 0000000..016c732 --- /dev/null +++ b/internal/daemon/source_collectors.go @@ -0,0 +1,300 @@ +package daemon + +import ( + "sort" + "strings" + + "github.com/janekbaraniewski/openusage/internal/config" + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers" + "github.com/janekbaraniewski/openusage/internal/providers/shared" + "github.com/janekbaraniewski/openusage/internal/telemetry" +) + +type sourceCollectorSpec struct { + source shared.TelemetrySource + options shared.TelemetryCollectOptions + accountID string +} + +func buildCollectors(accounts []core.AccountConfig) ([]telemetry.Collector, []string) { + specs, warnings := buildSourceCollectorSpecs(accounts) + collectors := make([]telemetry.Collector, 0, len(specs)) + for _, spec := range specs { + collectors = append(collectors, telemetry.NewSourceCollector(spec.source, spec.options, spec.accountID)) + } + return collectors, warnings +} + +func telemetrySourceCount() int { + count := 0 + for _, provider := range providers.AllProviders() { + if _, ok := provider.(shared.TelemetrySource); ok { + count++ + } + } + return count +} + +func ResolveTelemetrySourceOptions( + source shared.TelemetrySource, + requestedAccountID string, +) (shared.TelemetryCollectOptions, string, []string) { + accountID := strings.TrimSpace(requestedAccountID) + if source == nil { + return shared.TelemetryCollectOptions{}, accountID, nil + } + accounts, err := loadTelemetrySourceAccounts() + if err != nil { + opts := cloneCollectOptions(source.DefaultCollectOptions()) + if accountID != "" { + if opts.Paths == nil { + opts.Paths = make(map[string]string) + } + opts.Paths["account_id"] = accountID + } + return opts, accountID, []string{"telemetry config unavailable; using default source options"} + } + + return resolveTelemetrySourceOptionsFromAccounts(source, accounts, accountID) +} + +func loadTelemetrySourceAccounts() ([]core.AccountConfig, error) { + cfg, err := config.Load() + if err != nil { + return nil, err + } + accounts := core.MergeAccounts(cfg.Accounts, cfg.AutoDetectedAccounts) + return ApplyCredentials(accounts), nil +} + +func resolveTelemetrySourceOptionsFromAccounts( + source shared.TelemetrySource, + accounts []core.AccountConfig, + requestedAccountID string, +) (shared.TelemetryCollectOptions, string, []string) { + accountID := strings.TrimSpace(requestedAccountID) + if source == nil { + return shared.TelemetryCollectOptions{}, accountID, nil + } + defaults := cloneCollectOptions(source.DefaultCollectOptions()) + + candidates := telemetryAccountsForSource(source, accounts) + if accountID != "" { + for _, acct := range candidates { + if strings.EqualFold(strings.TrimSpace(acct.ID), accountID) { + return collectOptionsForAccount(source, acct), strings.TrimSpace(acct.ID), nil + } + } + if defaults.Paths == nil { + defaults.Paths = make(map[string]string) + } + defaults.Paths["account_id"] = accountID + return defaults, accountID, []string{"telemetry account override not found in config; using source defaults"} + } + + switch len(candidates) { + case 0: + return defaults, "", nil + case 1: + acct := candidates[0] + return collectOptionsForAccount(source, acct), strings.TrimSpace(acct.ID), nil + default: + return defaults, "", []string{"multiple telemetry accounts configured for source; account override required for precise hook attribution"} + } +} + +func buildSourceCollectorSpecs(accounts []core.AccountConfig) ([]sourceCollectorSpec, []string) { + providersBySource := telemetrySourcesBySystem() + sourceNames := make([]string, 0, len(providersBySource)) + for sourceName := range providersBySource { + sourceNames = append(sourceNames, sourceName) + } + sort.Strings(sourceNames) + + specs := make([]sourceCollectorSpec, 0, len(sourceNames)) + var warnings []string + for _, sourceName := range sourceNames { + source := providersBySource[sourceName] + candidates := telemetryAccountsForSource(source, accounts) + if len(candidates) == 0 { + specs = append(specs, sourceCollectorSpec{ + source: source, + options: cloneCollectOptions(source.DefaultCollectOptions()), + }) + continue + } + + groups := make(map[string][]core.AccountConfig) + groupOptions := make(map[string]shared.TelemetryCollectOptions) + groupKeys := make([]string, 0, len(candidates)) + for _, acct := range candidates { + opts := collectOptionsForAccount(source, acct) + key := collectOptionsSignature(opts) + if _, ok := groups[key]; !ok { + groupKeys = append(groupKeys, key) + groupOptions[key] = opts + } + groups[key] = append(groups[key], acct) + } + sort.Strings(groupKeys) + + for _, key := range groupKeys { + group := groups[key] + opts := groupOptions[key] + if len(group) == 1 { + specs = append(specs, sourceCollectorSpec{ + source: source, + options: opts, + accountID: strings.TrimSpace(group[0].ID), + }) + continue + } + + accountIDs := make([]string, 0, len(group)) + for _, acct := range group { + accountIDs = append(accountIDs, strings.TrimSpace(acct.ID)) + } + sort.Strings(accountIDs) + delete(opts.Paths, "account_id") + specs = append(specs, sourceCollectorSpec{ + source: source, + options: opts, + }) + warnings = append(warnings, sourceName+": shared telemetry source paths for accounts "+strings.Join(accountIDs, ", ")+": using source-scoped attribution") + } + } + + return specs, warnings +} + +func telemetrySourcesBySystem() map[string]shared.TelemetrySource { + out := make(map[string]shared.TelemetrySource) + for _, provider := range providers.AllProviders() { + source, ok := provider.(shared.TelemetrySource) + if !ok { + continue + } + system := strings.ToLower(strings.TrimSpace(source.System())) + if system == "" { + continue + } + out[system] = source + } + return out +} + +func telemetryAccountsForSource(source shared.TelemetrySource, accounts []core.AccountConfig) []core.AccountConfig { + if source == nil || len(accounts) == 0 { + return nil + } + system := strings.ToLower(strings.TrimSpace(source.System())) + if system == "" { + return nil + } + + out := make([]core.AccountConfig, 0, len(accounts)) + for _, acct := range accounts { + if !strings.EqualFold(strings.TrimSpace(acct.Provider), system) { + continue + } + if strings.TrimSpace(acct.ID) == "" { + continue + } + out = append(out, acct) + } + sort.Slice(out, func(i, j int) bool { return out[i].ID < out[j].ID }) + return out +} + +func collectOptionsForAccount(source shared.TelemetrySource, acct core.AccountConfig) shared.TelemetryCollectOptions { + opts := cloneCollectOptions(source.DefaultCollectOptions()) + if opts.Paths == nil { + opts.Paths = make(map[string]string) + } + for key, value := range opts.Paths { + opts.Paths[key] = strings.TrimSpace(acct.Path(key, value)) + } + for key, value := range acct.Paths { + trimmedKey := strings.TrimSpace(key) + trimmedValue := strings.TrimSpace(value) + if trimmedKey == "" || trimmedValue == "" { + continue + } + opts.Paths[trimmedKey] = trimmedValue + } + opts.Paths["account_id"] = strings.TrimSpace(acct.ID) + return opts +} + +func cloneCollectOptions(in shared.TelemetryCollectOptions) shared.TelemetryCollectOptions { + out := shared.TelemetryCollectOptions{} + if len(in.Paths) > 0 { + out.Paths = make(map[string]string, len(in.Paths)) + for key, value := range in.Paths { + out.Paths[key] = strings.TrimSpace(value) + } + } + if len(in.PathLists) > 0 { + out.PathLists = make(map[string][]string, len(in.PathLists)) + for key, values := range in.PathLists { + if len(values) == 0 { + continue + } + cloned := make([]string, 0, len(values)) + for _, value := range values { + if trimmed := strings.TrimSpace(value); trimmed != "" { + cloned = append(cloned, trimmed) + } + } + out.PathLists[key] = cloned + } + } + return out +} + +func collectOptionsSignature(opts shared.TelemetryCollectOptions) string { + pathKeys := make([]string, 0, len(opts.Paths)) + for key, value := range opts.Paths { + trimmedKey := strings.TrimSpace(key) + if trimmedKey == "" || trimmedKey == "account_id" { + continue + } + if strings.TrimSpace(value) == "" { + continue + } + pathKeys = append(pathKeys, trimmedKey) + } + sort.Strings(pathKeys) + + listKeys := make([]string, 0, len(opts.PathLists)) + for key, values := range opts.PathLists { + if strings.TrimSpace(key) == "" || len(values) == 0 { + continue + } + listKeys = append(listKeys, strings.TrimSpace(key)) + } + sort.Strings(listKeys) + + var b strings.Builder + for _, key := range pathKeys { + b.WriteString("p:") + b.WriteString(key) + b.WriteByte('=') + b.WriteString(strings.TrimSpace(opts.Paths[key])) + b.WriteByte(';') + } + for _, key := range listKeys { + values := append([]string{}, opts.PathLists[key]...) + for i := range values { + values[i] = strings.TrimSpace(values[i]) + } + sort.Strings(values) + b.WriteString("l:") + b.WriteString(key) + b.WriteByte('=') + b.WriteString(strings.Join(values, ",")) + b.WriteByte(';') + } + return b.String() +} diff --git a/internal/daemon/source_collectors_test.go b/internal/daemon/source_collectors_test.go new file mode 100644 index 0000000..5288655 --- /dev/null +++ b/internal/daemon/source_collectors_test.go @@ -0,0 +1,106 @@ +package daemon + +import ( + "testing" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers" + "github.com/janekbaraniewski/openusage/internal/telemetry" +) + +func TestBuildCollectors_ScopesConfiguredAccount(t *testing.T) { + collectors, warnings := buildCollectors([]core.AccountConfig{ + { + ID: "codex-main", + Provider: "codex", + ExtraData: map[string]string{ + "sessions_dir": "/tmp/codex-main", + }, + }, + }) + + collector := findSourceCollector(t, collectors, "codex") + if collector.AccountOverride != "codex-main" { + t.Fatalf("account override = %q, want codex-main", collector.AccountOverride) + } + if got := collector.Options.Paths["account_id"]; got != "codex-main" { + t.Fatalf("account_id option = %q, want codex-main", got) + } + if got := collector.Options.Paths["sessions_dir"]; got != "/tmp/codex-main" { + t.Fatalf("sessions_dir = %q, want /tmp/codex-main", got) + } + if len(warnings) != 0 { + t.Fatalf("warnings = %v, want none", warnings) + } +} + +func TestBuildCollectors_AmbiguousAccountsFallBackToSourceScope(t *testing.T) { + collectors, warnings := buildCollectors([]core.AccountConfig{ + {ID: "codex-a", Provider: "codex"}, + {ID: "codex-b", Provider: "codex"}, + }) + + collector := findSourceCollector(t, collectors, "codex") + if collector.AccountOverride != "" { + t.Fatalf("account override = %q, want empty", collector.AccountOverride) + } + if got := collector.Options.Paths["account_id"]; got != "" { + t.Fatalf("account_id option = %q, want empty", got) + } + if len(warnings) != 1 { + t.Fatalf("warnings len = %d, want 1", len(warnings)) + } +} + +func TestResolveTelemetrySourceOptionsFromAccounts_UsesExplicitAccount(t *testing.T) { + source, ok := providers.TelemetrySourceBySystem("codex") + if !ok { + t.Fatal("codex telemetry source not found") + } + + options, accountID, warnings := resolveTelemetrySourceOptionsFromAccounts(source, []core.AccountConfig{ + { + ID: "codex-a", + Provider: "codex", + ExtraData: map[string]string{ + "sessions_dir": "/tmp/codex-a", + }, + }, + { + ID: "codex-b", + Provider: "codex", + ExtraData: map[string]string{ + "sessions_dir": "/tmp/codex-b", + }, + }, + }, "codex-b") + + if accountID != "codex-b" { + t.Fatalf("account id = %q, want codex-b", accountID) + } + if got := options.Paths["sessions_dir"]; got != "/tmp/codex-b" { + t.Fatalf("sessions_dir = %q, want /tmp/codex-b", got) + } + if got := options.Paths["account_id"]; got != "codex-b" { + t.Fatalf("account_id option = %q, want codex-b", got) + } + if len(warnings) != 0 { + t.Fatalf("warnings = %v, want none", warnings) + } +} + +func findSourceCollector(t *testing.T, collectors []telemetry.Collector, name string) *telemetry.SourceCollector { + t.Helper() + for _, collector := range collectors { + if collector.Name() != name { + continue + } + sourceCollector, ok := collector.(*telemetry.SourceCollector) + if !ok { + t.Fatalf("collector %q has type %T, want *telemetry.SourceCollector", name, collector) + } + return sourceCollector + } + t.Fatalf("collector %q not found", name) + return nil +} diff --git a/internal/telemetry/provider_event_mapper.go b/internal/telemetry/provider_event_mapper.go index 85a496b..d774d41 100644 --- a/internal/telemetry/provider_event_mapper.go +++ b/internal/telemetry/provider_event_mapper.go @@ -17,7 +17,7 @@ func mapProviderEvent(sourceSystem string, ev shared.TelemetryEvent, accountOver MessageID: ev.MessageID, ToolCallID: ev.ToolCallID, ProviderID: ev.ProviderID, - AccountID: core.FirstNonEmpty(accountOverride, ev.AccountID, ev.ProviderID, sourceSystem), + AccountID: core.FirstNonEmpty(accountOverride, ev.AccountID, sourceSystem, ev.ProviderID), AgentName: core.FirstNonEmpty(ev.AgentName, sourceSystem), EventType: mapProviderEventType(ev.EventType), ModelRaw: ev.ModelRaw, diff --git a/internal/telemetry/provider_event_mapper_test.go b/internal/telemetry/provider_event_mapper_test.go index ca28f05..04d6394 100644 --- a/internal/telemetry/provider_event_mapper_test.go +++ b/internal/telemetry/provider_event_mapper_test.go @@ -7,7 +7,7 @@ import ( "github.com/janekbaraniewski/openusage/internal/providers/shared" ) -func TestMapProviderEvent_AccountFallbacks(t *testing.T) { +func TestMapProviderEvent_AccountFallsBackToSourceSystemBeforeProvider(t *testing.T) { ev := shared.TelemetryEvent{ Channel: shared.TelemetryChannelHook, OccurredAt: time.Date(2026, time.February, 22, 12, 0, 0, 0, time.UTC), @@ -17,8 +17,8 @@ func TestMapProviderEvent_AccountFallbacks(t *testing.T) { } req := mapProviderEvent("opencode", ev, "") - if req.AccountID != "openrouter" { - t.Fatalf("account_id = %q, want openrouter", req.AccountID) + if req.AccountID != "opencode" { + t.Fatalf("account_id = %q, want opencode", req.AccountID) } } diff --git a/internal/tui/detail.go b/internal/tui/detail.go index 69df938..d0f6f2d 100644 --- a/internal/tui/detail.go +++ b/internal/tui/detail.go @@ -393,110 +393,11 @@ func groupMetrics(metrics map[string]core.Metric, widget core.DashboardWidget, d } func classifyMetric(key string, m core.Metric, widget core.DashboardWidget, details core.DetailWidget) (group, label string, order int) { - if override, ok := widget.MetricGroupOverrides[key]; ok && override.Group != "" { - label = override.Label - if label == "" { - label = metricLabel(widget, key) - } - label = normalizeWidgetLabel(label) - order = override.Order - if order <= 0 { - order = groupOrder(details, override.Group, 4) - } - return override.Group, label, order - } - - group = string(core.InferMetricGroup(key, m)) - label = metricLabel(widget, key) - switch group { - case string(core.MetricGroupUsage): - if strings.HasPrefix(key, "rate_limit_") { - label = metricLabel(widget, strings.TrimPrefix(key, "rate_limit_")) - } else if m.Remaining != nil && m.Limit != nil && m.Unit != "%" && m.Unit != "USD" { - label = prettifyUsageKey(key, widget) - } - order = groupOrder(details, group, 1) - case string(core.MetricGroupSpending): - if strings.HasPrefix(key, "model_") && - !strings.HasSuffix(key, "_input_tokens") && - !strings.HasSuffix(key, "_output_tokens") { - label = strings.TrimPrefix(key, "model_") - } - order = groupOrder(details, group, 2) - case string(core.MetricGroupTokens): - if strings.HasPrefix(key, "session_") { - label = metricLabel(widget, strings.TrimPrefix(key, "session_")) - } - order = groupOrder(details, group, 3) - default: - order = groupOrder(details, string(core.MetricGroupActivity), 4) - group = string(core.MetricGroupActivity) - } - return group, label, order -} - -func groupOrder(details core.DetailWidget, group string, fallback int) int { - if order := details.SectionOrder(group); order > 0 { - return order - } - return fallback + return core.ClassifyDetailMetric(key, m, widget, details) } func metricLabel(widget core.DashboardWidget, key string) string { - if widget.MetricLabelOverrides != nil { - if label, ok := widget.MetricLabelOverrides[key]; ok && label != "" { - return normalizeWidgetLabel(label) - } - } - return normalizeWidgetLabel(prettifyKey(key)) -} - -func normalizeWidgetLabel(label string) string { - label = strings.TrimSpace(label) - if label == "" { - return label - } - - replacements := []struct { - old string - new string - }{ - {"5h Block", "Usage 5h"}, - {"5-Hour Usage", "Usage 5h"}, - {"5h Usage", "Usage 5h"}, - {"7-Day Usage", "Usage 7d"}, - {"7d Usage", "Usage 7d"}, - } - for _, repl := range replacements { - label = strings.ReplaceAll(label, repl.old, repl.new) - } - return label -} - -func prettifyUsageKey(key string, widget core.DashboardWidget) string { - lastUnderscore := strings.LastIndex(key, "_") - if lastUnderscore > 0 && lastUnderscore < len(key)-1 { - suffix := key[lastUnderscore+1:] - prefix := key[:lastUnderscore] - if suffix == strings.ToUpper(suffix) && len(suffix) > 1 { - return prettifyModelHyphens(prefix) + " " + titleCase(suffix) - } - } - return metricLabel(widget, key) -} - -func prettifyModelHyphens(name string) string { - parts := strings.Split(name, "-") - for i, p := range parts { - if len(p) == 0 { - continue - } - if p[0] >= '0' && p[0] <= '9' { - continue - } - parts[i] = strings.ToUpper(p[:1]) + p[1:] - } - return strings.Join(parts, " ") + return core.MetricLabel(widget, key) } func titleCase(s string) string { @@ -1634,32 +1535,8 @@ func formatDuration(d time.Duration) string { } } -var prettifyKeyOverrides = map[string]string{ - "plan_percent_used": "Plan Used", - "plan_total_spend_usd": "Total Plan Spend", - "spend_limit": "Spend Limit", - "individual_spend": "Individual Spend", - "context_window": "Context Window", -} - func prettifyKey(key string) string { - if label, ok := prettifyKeyOverrides[key]; ok { - return label - } - parts := strings.Split(key, "_") - for i, p := range parts { - if len(p) > 0 { - parts[i] = strings.ToUpper(p[:1]) + p[1:] - } - } - result := strings.Join(parts, " ") - for _, pair := range [][2]string{ - {"Usd", "USD"}, {"Rpm", "RPM"}, {"Tpm", "TPM"}, - {"Rpd", "RPD"}, {"Tpd", "TPD"}, {"Api", "API"}, - } { - result = strings.ReplaceAll(result, pair[0], pair[1]) - } - return result + return core.PrettifyMetricKey(key) } func prettifyModelName(name string) string { diff --git a/internal/tui/tiles_composition.go b/internal/tui/tiles_composition.go index 92907db..63d6a45 100644 --- a/internal/tui/tiles_composition.go +++ b/internal/tui/tiles_composition.go @@ -1435,34 +1435,14 @@ func buildProviderToolCompositionLines(snap core.UsageSnapshot, innerW int, expa } func collectProviderToolMix(snap core.UsageSnapshot) ([]toolMixEntry, map[string]bool) { - byTool := make(map[string]float64) - usedKeys := make(map[string]bool) - - for key, met := range snap.Metrics { - if met.Used == nil || strings.HasSuffix(key, "_today") { - continue - } - if !strings.HasPrefix(key, "interface_") { - continue - } - name := strings.TrimPrefix(key, "interface_") - if name == "" { - continue - } - byTool[name] += *met.Used - usedKeys[key] = true - } - - tools := make([]toolMixEntry, 0, len(byTool)) - for name, count := range byTool { - if count <= 0 { - continue - } - tools = append(tools, toolMixEntry{name: name, count: count}) + entries, usedKeys := core.ExtractInterfaceClientBreakdown(snap) + tools := make([]toolMixEntry, 0, len(entries)) + for _, entry := range entries { + tools = append(tools, toolMixEntry{ + name: entry.Name, + count: entry.Requests, + }) } - - sortToolMixEntries(tools) - return tools, usedKeys } @@ -1688,61 +1668,17 @@ func buildProviderCodeStatsLines(snap core.UsageSnapshot, widget core.DashboardW return lines, usedKeys } -// actualToolUsage status/aggregate keys that should not appear as individual tool entries. -var actualToolAggregateKeys = map[string]bool{ - "tool_calls_total": true, - "tool_completed": true, - "tool_errored": true, - "tool_cancelled": true, - "tool_success_rate": true, -} - func buildActualToolUsageLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { - byTool := make(map[string]float64) - usedKeys := make(map[string]bool) - - for key, met := range snap.Metrics { - if met.Used == nil { - continue - } - if !strings.HasPrefix(key, "tool_") { - continue - } - if actualToolAggregateKeys[key] { - usedKeys[key] = true - continue - } - // Skip time-bucketed variants (e.g. tool_bash_today) — these are - // supplementary metrics that would appear as duplicate entries. - if strings.HasSuffix(key, "_today") || strings.HasSuffix(key, "_1d") || strings.HasSuffix(key, "_7d") || strings.HasSuffix(key, "_30d") { - usedKeys[key] = true - continue - } - name := strings.TrimPrefix(key, "tool_") - if name == "" { - continue - } - // Skip MCP tools — they have their own dedicated section. - if isMCPToolMetricName(name) { - usedKeys[key] = true - continue - } - byTool[name] += *met.Used - usedKeys[key] = true - } - - if len(byTool) == 0 { + rawTools, usedKeys := core.ExtractActualToolUsage(snap) + if len(rawTools) == 0 { return nil, usedKeys } - allTools := make([]toolMixEntry, 0, len(byTool)) + allTools := make([]toolMixEntry, 0, len(rawTools)) var totalCalls float64 - for name, count := range byTool { - if count <= 0 { - continue - } - allTools = append(allTools, toolMixEntry{name: name, count: count}) - totalCalls += count + for _, rawTool := range rawTools { + allTools = append(allTools, toolMixEntry{name: rawTool.RawName, count: rawTool.Calls}) + totalCalls += rawTool.Calls } if totalCalls <= 0 { return nil, nil @@ -1809,20 +1745,6 @@ func buildActualToolUsageLines(snap core.UsageSnapshot, innerW int, expanded boo return lines, usedKeys } -func isMCPToolMetricName(name string) bool { - normalized := strings.ToLower(strings.TrimSpace(name)) - if normalized == "" { - return false - } - if strings.HasPrefix(normalized, "mcp_") { - return true - } - if strings.Contains(normalized, "_mcp_server_") || strings.Contains(normalized, "-mcp-server-") { - return true - } - return strings.HasSuffix(normalized, "_mcp") -} - func buildMCPUsageLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { type funcEntry struct { name string From e7565a81660c439a7178c4548ea5abe8641573cd Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 15:55:25 +0100 Subject: [PATCH 16/32] refactor: replace duplicate utility helpers and move legacy path shims to providers --- internal/config/config.go | 1 - internal/core/collections.go | 23 ++++++++++ internal/core/provider.go | 25 ----------- internal/core/provider_test.go | 43 ------------------ internal/providers/claude_code/claude_code.go | 2 +- .../providers/claude_code/legacy_paths.go | 19 ++++++++ .../claude_code/legacy_paths_test.go | 23 ++++++++++ internal/providers/codex/codex.go | 31 +++---------- internal/providers/copilot/copilot.go | 22 +-------- internal/providers/copilot/copilot_test.go | 4 +- internal/providers/cursor/fetch.go | 2 +- internal/providers/cursor/legacy_paths.go | 19 ++++++++ .../providers/cursor/legacy_paths_test.go | 23 ++++++++++ internal/providers/gemini_cli/gemini_cli.go | 35 +++------------ internal/providers/ollama/ollama.go | 45 +++++-------------- internal/providers/openrouter/analytics.go | 23 +++------- internal/providers/openrouter/generations.go | 6 +-- .../openrouter/snapshot_projection.go | 4 +- internal/providers/zai/zai.go | 29 +++--------- internal/telemetry/usage_view_projection.go | 15 ------- internal/telemetry/usage_view_queries.go | 4 +- internal/tui/analytics.go | 45 +++++-------------- internal/tui/charts.go | 10 ++--- internal/tui/tiles_composition.go | 18 -------- 24 files changed, 176 insertions(+), 295 deletions(-) create mode 100644 internal/core/collections.go delete mode 100644 internal/core/provider_test.go create mode 100644 internal/providers/claude_code/legacy_paths.go create mode 100644 internal/providers/claude_code/legacy_paths_test.go create mode 100644 internal/providers/cursor/legacy_paths.go create mode 100644 internal/providers/cursor/legacy_paths_test.go diff --git a/internal/config/config.go b/internal/config/config.go index e982359..2a5273d 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -226,7 +226,6 @@ func normalizeAccounts(in []core.AccountConfig) []core.AccountConfig { } normalized := lo.Map(in, func(acct core.AccountConfig, _ int) core.AccountConfig { acct.ID = normalizeAccountID(acct.ID) - acct.NormalizeRuntimePaths() return acct }) filtered := lo.Filter(normalized, func(acct core.AccountConfig, _ int) bool { return acct.ID != "" }) diff --git a/internal/core/collections.go b/internal/core/collections.go new file mode 100644 index 0000000..83c9d61 --- /dev/null +++ b/internal/core/collections.go @@ -0,0 +1,23 @@ +package core + +import ( + "maps" + "slices" + "strings" +) + +func SortedTimePoints(values map[string]float64) []TimePoint { + if len(values) == 0 { + return nil + } + + keys := slices.Sorted(maps.Keys(values)) + points := make([]TimePoint, 0, len(keys)) + for _, key := range keys { + if strings.TrimSpace(key) == "" { + continue + } + points = append(points, TimePoint{Date: key, Value: values[key]}) + } + return points +} diff --git a/internal/core/provider.go b/internal/core/provider.go index 92739e9..aee5267 100644 --- a/internal/core/provider.go +++ b/internal/core/provider.go @@ -62,31 +62,6 @@ func (c *AccountConfig) SetPath(key, value string) { c.Paths[key] = strings.TrimSpace(value) } -// NormalizeRuntimePaths migrates legacy provider-specific path overloads out of -// Binary/BaseURL into Paths so runtime code can use a single access pattern. -func (c *AccountConfig) NormalizeRuntimePaths() { - if c == nil { - return - } - - switch strings.TrimSpace(c.Provider) { - case "cursor": - if strings.TrimSpace(c.Binary) != "" { - c.SetPath("tracking_db", c.Binary) - } - if strings.TrimSpace(c.BaseURL) != "" { - c.SetPath("state_db", c.BaseURL) - } - case "claude_code": - if strings.TrimSpace(c.Binary) != "" { - c.SetPath("stats_cache", c.Binary) - } - if strings.TrimSpace(c.BaseURL) != "" { - c.SetPath("account_config", c.BaseURL) - } - } -} - func (c AccountConfig) ResolveAPIKey() string { if c.Token != "" { return c.Token diff --git a/internal/core/provider_test.go b/internal/core/provider_test.go deleted file mode 100644 index 003a9ed..0000000 --- a/internal/core/provider_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package core - -import "testing" - -func TestAccountConfigNormalizeRuntimePaths(t *testing.T) { - tests := []struct { - name string - account AccountConfig - wantKey string - wantPath string - }{ - { - name: "cursor migrates legacy db fields", - account: AccountConfig{ - Provider: "cursor", - Binary: "/tmp/tracking.db", - BaseURL: "/tmp/state.vscdb", - }, - wantKey: "tracking_db", - wantPath: "/tmp/tracking.db", - }, - { - name: "claude migrates legacy config fields", - account: AccountConfig{ - Provider: "claude_code", - Binary: "/tmp/stats-cache.json", - BaseURL: "/tmp/.claude.json", - }, - wantKey: "stats_cache", - wantPath: "/tmp/stats-cache.json", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - acct := tt.account - acct.NormalizeRuntimePaths() - if got := acct.Path(tt.wantKey, ""); got != tt.wantPath { - t.Fatalf("Path(%q) = %q, want %q", tt.wantKey, got, tt.wantPath) - } - }) - } -} diff --git a/internal/providers/claude_code/claude_code.go b/internal/providers/claude_code/claude_code.go index 240704a..03eda4e 100644 --- a/internal/providers/claude_code/claude_code.go +++ b/internal/providers/claude_code/claude_code.go @@ -322,7 +322,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa home = filepath.Dir(claudeDir) // derive "home" from the override } - acct.NormalizeRuntimePaths() + normalizeLegacyPaths(&acct) statsPath := acct.Path("stats_cache", "") accountPath := acct.Path("account_config", "") diff --git a/internal/providers/claude_code/legacy_paths.go b/internal/providers/claude_code/legacy_paths.go new file mode 100644 index 0000000..602eb45 --- /dev/null +++ b/internal/providers/claude_code/legacy_paths.go @@ -0,0 +1,19 @@ +package claude_code + +import ( + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func normalizeLegacyPaths(acct *core.AccountConfig) { + if acct == nil { + return + } + if strings.TrimSpace(acct.Binary) != "" { + acct.SetPath("stats_cache", acct.Binary) + } + if strings.TrimSpace(acct.BaseURL) != "" { + acct.SetPath("account_config", acct.BaseURL) + } +} diff --git a/internal/providers/claude_code/legacy_paths_test.go b/internal/providers/claude_code/legacy_paths_test.go new file mode 100644 index 0000000..d465575 --- /dev/null +++ b/internal/providers/claude_code/legacy_paths_test.go @@ -0,0 +1,23 @@ +package claude_code + +import ( + "testing" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestNormalizeLegacyPaths(t *testing.T) { + acct := core.AccountConfig{ + Binary: "/tmp/stats-cache.json", + BaseURL: "/tmp/.claude.json", + } + + normalizeLegacyPaths(&acct) + + if got := acct.Path("stats_cache", ""); got != "/tmp/stats-cache.json" { + t.Fatalf("stats_cache = %q, want /tmp/stats-cache.json", got) + } + if got := acct.Path("account_config", ""); got != "/tmp/.claude.json" { + t.Fatalf("account_config = %q, want /tmp/.claude.json", got) + } +} diff --git a/internal/providers/codex/codex.go b/internal/providers/codex/codex.go index 6d7033c..ddf65c8 100644 --- a/internal/providers/codex/codex.go +++ b/internal/providers/codex/codex.go @@ -1367,12 +1367,12 @@ func emitProductivityMetrics(stats patchStats, promptCount, commits, totalReques func emitDailyUsageSeries(dailyTokenTotals, dailyRequestTotals map[string]float64, interfaceDaily map[string]map[string]float64, snap *core.UsageSnapshot) { if len(dailyTokenTotals) > 0 { - points := mapToSortedTimePoints(dailyTokenTotals) + points := core.SortedTimePoints(dailyTokenTotals) snap.DailySeries["analytics_tokens"] = points snap.DailySeries["tokens_total"] = points } if len(dailyRequestTotals) > 0 { - points := mapToSortedTimePoints(dailyRequestTotals) + points := core.SortedTimePoints(dailyRequestTotals) snap.DailySeries["analytics_requests"] = points snap.DailySeries["requests"] = points } @@ -1381,8 +1381,8 @@ func emitDailyUsageSeries(dailyTokenTotals, dailyRequestTotals map[string]float6 continue } key := sanitizeMetricName(name) - snap.DailySeries["usage_client_"+key] = mapToSortedTimePoints(byDay) - snap.DailySeries["usage_source_"+key] = mapToSortedTimePoints(byDay) + snap.DailySeries["usage_client_"+key] = core.SortedTimePoints(byDay) + snap.DailySeries["usage_source_"+key] = core.SortedTimePoints(byDay) } } @@ -1404,7 +1404,7 @@ func formatCountSummary(entries []countEntry, max int) string { parts := make([]string, 0, limit+1) for i := 0; i < limit; i++ { pct := float64(entries[i].count) / float64(total) * 100 - parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entries[i].name, formatTokenCount(entries[i].count), pct)) + parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entries[i].name, shared.FormatTokenCount(entries[i].count), pct)) } if len(entries) > limit { parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) @@ -1435,7 +1435,7 @@ func emitBreakdownMetrics(prefix string, totals map[string]tokenUsage, daily map } if byDay, ok := daily[entry.Name]; ok { - series := mapToSortedTimePoints(byDay) + series := core.SortedTimePoints(byDay) snap.DailySeries["tokens_"+prefix+"_"+sanitizeMetricName(entry.Name)] = series snap.DailySeries["usage_"+prefix+"_"+sanitizeMetricName(entry.Name)] = series } @@ -1557,7 +1557,7 @@ func formatUsageSummary(entries []usageEntry, max int) string { for i := 0; i < limit; i++ { entry := entries[i] pct := float64(entry.Data.TotalTokens) / float64(total) * 100 - parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, formatTokenCount(entry.Data.TotalTokens), pct)) + parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, shared.FormatTokenCount(entry.Data.TotalTokens), pct)) } if len(entries) > limit { @@ -1566,8 +1566,6 @@ func formatUsageSummary(entries []usageEntry, max int) string { return strings.Join(parts, ", ") } -func formatTokenCount(value int) string { return shared.FormatTokenCount(value) } - func usageDelta(current, previous tokenUsage) tokenUsage { return tokenUsage{ InputTokens: current.InputTokens - previous.InputTokens, @@ -1693,21 +1691,6 @@ func dayFromSessionPath(path, sessionsDir string) string { return candidate } -func mapToSortedTimePoints(byDate map[string]float64) []core.TimePoint { - if len(byDate) == 0 { - return nil - } - - keys := lo.Keys(byDate) - sort.Strings(keys) - - points := make([]core.TimePoint, 0, len(keys)) - for _, date := range keys { - points = append(points, core.TimePoint{Date: date, Value: byDate[date]}) - } - return points -} - func (p *Provider) applyRateLimitStatus(snap *core.UsageSnapshot) { if snap.Status == core.StatusAuth || snap.Status == core.StatusError || snap.Status == core.StatusUnknown || snap.Status == core.StatusUnsupported { return diff --git a/internal/providers/copilot/copilot.go b/internal/providers/copilot/copilot.go index e50a025..968150c 100644 --- a/internal/providers/copilot/copilot.go +++ b/internal/providers/copilot/copilot.go @@ -1598,7 +1598,7 @@ func (p *Provider) readSessions(copilotDir string, snap *core.UsageSnapshot, log snap.Metrics["context_window"] = core.Metric{ Limit: &limit, Used: core.Float64Ptr(sessionTokens), - Remaining: core.Float64Ptr(maxFloat(limit-sessionTokens, 0)), + Remaining: core.Float64Ptr(max(limit-sessionTokens, 0)), Unit: "tokens", Window: "session", } @@ -1780,20 +1780,9 @@ func parseSimpleYAML(content string) map[string]string { return result } -func mapToSeries(m map[string]float64) []core.TimePoint { - pts := make([]core.TimePoint, 0, len(m)) - for date, val := range m { - pts = append(pts, core.TimePoint{Date: date, Value: val}) - } - sort.Slice(pts, func(i, j int) bool { - return pts[i].Date < pts[j].Date - }) - return pts -} - func storeSeries(snap *core.UsageSnapshot, key string, m map[string]float64) { if len(m) > 0 { - snap.DailySeries[key] = mapToSeries(m) + snap.DailySeries[key] = core.SortedTimePoints(m) } } @@ -2356,13 +2345,6 @@ func clampPercent(v float64) float64 { return v } -func maxFloat(a, b float64) float64 { - if a > b { - return a - } - return b -} - func sanitizeMetricName(name string) string { name = strings.ToLower(strings.TrimSpace(name)) if name == "" { diff --git a/internal/providers/copilot/copilot_test.go b/internal/providers/copilot/copilot_test.go index c86393e..ebc251a 100644 --- a/internal/providers/copilot/copilot_test.go +++ b/internal/providers/copilot/copilot_test.go @@ -71,7 +71,7 @@ func TestMapToSeries(t *testing.T) { "2026-02-18": 3, "2026-02-19": 7, } - series := mapToSeries(m) + series := core.SortedTimePoints(m) if len(series) != 3 { t.Fatalf("expected 3 points, got %d", len(series)) } @@ -87,7 +87,7 @@ func TestMapToSeries(t *testing.T) { } func TestMapToSeries_Empty(t *testing.T) { - series := mapToSeries(map[string]float64{}) + series := core.SortedTimePoints(map[string]float64{}) if len(series) != 0 { t.Errorf("expected 0 points, got %d", len(series)) } diff --git a/internal/providers/cursor/fetch.go b/internal/providers/cursor/fetch.go index ba7849b..9bc1034 100644 --- a/internal/providers/cursor/fetch.go +++ b/internal/providers/cursor/fetch.go @@ -33,7 +33,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa } } - acct.NormalizeRuntimePaths() + normalizeLegacyPaths(&acct) trackingDBPath := acct.Path("tracking_db", "") stateDBPath := acct.Path("state_db", "") diff --git a/internal/providers/cursor/legacy_paths.go b/internal/providers/cursor/legacy_paths.go new file mode 100644 index 0000000..8197f8a --- /dev/null +++ b/internal/providers/cursor/legacy_paths.go @@ -0,0 +1,19 @@ +package cursor + +import ( + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func normalizeLegacyPaths(acct *core.AccountConfig) { + if acct == nil { + return + } + if strings.TrimSpace(acct.Binary) != "" { + acct.SetPath("tracking_db", acct.Binary) + } + if strings.TrimSpace(acct.BaseURL) != "" { + acct.SetPath("state_db", acct.BaseURL) + } +} diff --git a/internal/providers/cursor/legacy_paths_test.go b/internal/providers/cursor/legacy_paths_test.go new file mode 100644 index 0000000..35759e0 --- /dev/null +++ b/internal/providers/cursor/legacy_paths_test.go @@ -0,0 +1,23 @@ +package cursor + +import ( + "testing" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestNormalizeLegacyPaths(t *testing.T) { + acct := core.AccountConfig{ + Binary: "/tmp/tracking.db", + BaseURL: "/tmp/state.vscdb", + } + + normalizeLegacyPaths(&acct) + + if got := acct.Path("tracking_db", ""); got != "/tmp/tracking.db" { + t.Fatalf("tracking_db = %q, want /tmp/tracking.db", got) + } + if got := acct.Path("state_db", ""); got != "/tmp/state.vscdb" { + t.Fatalf("state_db = %q, want /tmp/state.vscdb", got) + } +} diff --git a/internal/providers/gemini_cli/gemini_cli.go b/internal/providers/gemini_cli/gemini_cli.go index 813b56f..99d4a9d 100644 --- a/internal/providers/gemini_cli/gemini_cli.go +++ b/internal/providers/gemini_cli/gemini_cli.go @@ -7,11 +7,13 @@ import ( "fmt" "io" "log" + "maps" "net/http" "net/url" "os" "os/exec" "path/filepath" + "slices" "sort" "strconv" "strings" @@ -1040,15 +1042,8 @@ func mapKeysSorted(values map[string]bool) []string { if len(values) == 0 { return nil } - out := make([]string, 0, len(values)) - for key := range values { - if strings.TrimSpace(key) == "" { - continue - } - out = append(out, key) - } - sort.Strings(out) - return out + out := slices.Sorted(maps.Keys(values)) + return slices.DeleteFunc(out, func(key string) bool { return strings.TrimSpace(key) == "" }) } func formatGeminiNameList(values []string, max int) string { @@ -1608,7 +1603,7 @@ func emitBreakdownMetrics(prefix string, totals map[string]tokenUsage, daily map if byDay, ok := daily[entry.Name]; ok { seriesKey := "tokens_" + prefix + "_" + sanitizeMetricName(entry.Name) - snap.DailySeries[seriesKey] = mapToSortedTimePoints(byDay) + snap.DailySeries[seriesKey] = core.SortedTimePoints(byDay) } if prefix == "model" { @@ -1827,7 +1822,7 @@ func formatUsageSummary(entries []usageEntry, max int) string { for i := 0; i < limit; i++ { entry := entries[i] pct := float64(entry.Data.TotalTokens) / float64(total) * 100 - parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, formatTokenCount(entry.Data.TotalTokens), pct)) + parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, shared.FormatTokenCount(entry.Data.TotalTokens), pct)) } if len(entries) > limit { parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) @@ -2148,8 +2143,6 @@ func inferGeminiLanguageFromPath(path string) string { return "" } -func formatTokenCount(value int) string { return shared.FormatTokenCount(value) } - func usageDelta(current, previous tokenUsage) tokenUsage { return tokenUsage{ InputTokens: current.InputTokens - previous.InputTokens, @@ -2271,25 +2264,11 @@ func dayFromSession(startTime, lastUpdated string) string { return dayFromTimestamp(startTime) } -func mapToSortedTimePoints(byDate map[string]float64) []core.TimePoint { - if len(byDate) == 0 { - return nil - } - keys := lo.Keys(byDate) - sort.Strings(keys) - - points := make([]core.TimePoint, 0, len(keys)) - for _, date := range keys { - points = append(points, core.TimePoint{Date: date, Value: byDate[date]}) - } - return points -} - func storeSeries(snap *core.UsageSnapshot, key string, values map[string]float64) { if len(values) == 0 { return } - snap.DailySeries[key] = mapToSortedTimePoints(values) + snap.DailySeries[key] = core.SortedTimePoints(values) } func latestSeriesValue(values map[string]float64) (string, float64) { diff --git a/internal/providers/ollama/ollama.go b/internal/providers/ollama/ollama.go index 972c52a..d6b9f1f 100644 --- a/internal/providers/ollama/ollama.go +++ b/internal/providers/ollama/ollama.go @@ -26,7 +26,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/parsers" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" "github.com/janekbaraniewski/openusage/internal/providers/shared" - "github.com/samber/lo" ) const ( @@ -822,7 +821,7 @@ func (p *Provider) fetchServerLogs(acct core.AccountConfig, snap *core.UsageSnap setValueMetric(snap, "avg_latency_ms_today", avgMs, "ms", "today") } - snap.DailySeries["requests"] = mapToSortedTimePoints(metrics.dailyRequests) + snap.DailySeries["requests"] = core.SortedTimePoints(metrics.dailyRequests) return true, nil } @@ -1527,9 +1526,9 @@ func populateModelUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageS for model, byDate := range perModelDaily { seriesKey := "requests_model_" + sanitizeMetricPart(model) - snap.DailySeries[seriesKey] = mapToSortedTimePoints(byDate) + snap.DailySeries[seriesKey] = core.SortedTimePoints(byDate) usageSeriesKey := "usage_model_" + sanitizeMetricPart(model) - snap.DailySeries[usageSeriesKey] = mapToSortedTimePoints(byDate) + snap.DailySeries[usageSeriesKey] = core.SortedTimePoints(byDate) } return nil @@ -1737,7 +1736,7 @@ func populateEstimatedTokenUsageFromDB(ctx context.Context, db *sql.DB, snap *co return topModels[i].tok > topModels[j].tok }) if len(topModels) > 0 { - top := make([]string, 0, minInt(len(topModels), 6)) + top := make([]string, 0, min(len(topModels), 6)) for i := 0; i < len(topModels) && i < 6; i++ { top = append(top, fmt.Sprintf("%s=%.0f", topModels[i].name, topModels[i].tok)) } @@ -1763,25 +1762,25 @@ func populateEstimatedTokenUsageFromDB(ctx context.Context, db *sql.DB, snap *co if len(byDay) == 0 { continue } - snap.DailySeries["tokens_client_"+sourceKey] = mapToSortedTimePoints(byDay) + snap.DailySeries["tokens_client_"+sourceKey] = core.SortedTimePoints(byDay) } for sourceKey, byDay := range sourceDailyRequests { if len(byDay) == 0 { continue } - snap.DailySeries["usage_client_"+sourceKey] = mapToSortedTimePoints(byDay) + snap.DailySeries["usage_client_"+sourceKey] = core.SortedTimePoints(byDay) } for modelKey, byDay := range modelDailyTokens { if len(byDay) == 0 { continue } - snap.DailySeries["tokens_model_"+modelKey] = mapToSortedTimePoints(byDay) + snap.DailySeries["tokens_model_"+modelKey] = core.SortedTimePoints(byDay) } if len(dailyTokens) > 0 { - snap.DailySeries["analytics_tokens"] = mapToSortedTimePoints(dailyTokens) + snap.DailySeries["analytics_tokens"] = core.SortedTimePoints(dailyTokens) } if len(dailyRequests) > 0 { - snap.DailySeries["analytics_requests"] = mapToSortedTimePoints(dailyRequests) + snap.DailySeries["analytics_requests"] = core.SortedTimePoints(dailyRequests) } if tokensToday > 0 { @@ -1808,13 +1807,6 @@ func estimateTokensFromChars(chars int) float64 { return float64((chars + 3) / 4) } -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - func populateSourceUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { allTimeRows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) FROM messages @@ -1916,7 +1908,7 @@ func populateSourceUsageFromDB(ctx context.Context, db *sql.DB, snap *core.Usage if len(byDay) == 0 { continue } - snap.DailySeries["usage_source_"+sourceKey] = mapToSortedTimePoints(byDay) + snap.DailySeries["usage_source_"+sourceKey] = core.SortedTimePoints(byDay) } return nil @@ -1998,7 +1990,7 @@ func populateToolUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSn if len(byDay) == 0 { continue } - snap.DailySeries["usage_tool_"+toolKey] = mapToSortedTimePoints(byDay) + snap.DailySeries["usage_tool_"+toolKey] = core.SortedTimePoints(byDay) } return nil @@ -2050,7 +2042,7 @@ func populateDailySeriesFromDB(ctx context.Context, db *sql.DB, snap *core.Usage } rows.Close() if len(byDate) > 0 { - points := mapToSortedTimePoints(byDate) + points := core.SortedTimePoints(byDate) snap.DailySeries[dq.key] = points if dq.key == "requests_user" { if _, exists := snap.DailySeries["requests"]; !exists { @@ -2224,19 +2216,6 @@ func doJSONPostRequest(ctx context.Context, url string, body any, out any, clien return resp.StatusCode, nil } -func mapToSortedTimePoints(values map[string]float64) []core.TimePoint { - if len(values) == 0 { - return nil - } - keys := lo.Keys(values) - sort.Strings(keys) - series := make([]core.TimePoint, 0, len(keys)) - for _, key := range keys { - series = append(series, core.TimePoint{Date: key, Value: values[key]}) - } - return series -} - func sanitizeMetricPart(input string) string { s := strings.ToLower(strings.TrimSpace(input)) s = nonAlnumRe.ReplaceAllString(s, "_") diff --git a/internal/providers/openrouter/analytics.go b/internal/providers/openrouter/analytics.go index 9d202d0..bd6626e 100644 --- a/internal/providers/openrouter/analytics.go +++ b/internal/providers/openrouter/analytics.go @@ -275,22 +275,22 @@ func (p *Provider) fetchAnalytics(ctx context.Context, baseURL, apiKey string, s } if len(costByDate) > 0 { - snap.DailySeries["analytics_cost"] = mapToSortedTimePoints(costByDate) + snap.DailySeries["analytics_cost"] = core.SortedTimePoints(costByDate) } if len(tokensByDate) > 0 { - snap.DailySeries["analytics_tokens"] = mapToSortedTimePoints(tokensByDate) + snap.DailySeries["analytics_tokens"] = core.SortedTimePoints(tokensByDate) } if len(requestsByDate) > 0 { - snap.DailySeries["analytics_requests"] = mapToSortedTimePoints(requestsByDate) + snap.DailySeries["analytics_requests"] = core.SortedTimePoints(requestsByDate) } if len(byokCostByDate) > 0 { - snap.DailySeries["analytics_byok_cost"] = mapToSortedTimePoints(byokCostByDate) + snap.DailySeries["analytics_byok_cost"] = core.SortedTimePoints(byokCostByDate) } if len(reasoningTokensByDate) > 0 { - snap.DailySeries["analytics_reasoning_tokens"] = mapToSortedTimePoints(reasoningTokensByDate) + snap.DailySeries["analytics_reasoning_tokens"] = core.SortedTimePoints(reasoningTokensByDate) } if len(cachedTokensByDate) > 0 { - snap.DailySeries["analytics_cached_tokens"] = mapToSortedTimePoints(cachedTokensByDate) + snap.DailySeries["analytics_cached_tokens"] = core.SortedTimePoints(cachedTokensByDate) } if totalCost > 0 { @@ -709,17 +709,6 @@ func emitAnalyticsEndpointMetrics(snap *core.UsageSnapshot, endpointStatsMap map } } -func mapToSortedTimePoints(m map[string]float64) []core.TimePoint { - points := make([]core.TimePoint, 0, len(m)) - for date, val := range m { - points = append(points, core.TimePoint{Date: date, Value: val}) - } - sort.Slice(points, func(i, j int) bool { - return points[i].Date < points[j].Date - }) - return points -} - func parseAPIErrorMessage(body []byte) string { var apiErr apiErrorResponse if err := json.Unmarshal(body, &apiErr); err != nil { diff --git a/internal/providers/openrouter/generations.go b/internal/providers/openrouter/generations.go index 420542e..7be764a 100644 --- a/internal/providers/openrouter/generations.go +++ b/internal/providers/openrouter/generations.go @@ -429,8 +429,8 @@ func (p *Provider) fetchGenerationStats(ctx context.Context, baseURL, apiKey str snap.Metrics["daily_projected"] = core.Metric{Used: &dailyProjected, Unit: "USD", Window: "24h"} } - snap.DailySeries["cost"] = mapToSortedTimePoints(dailyCost) - snap.DailySeries["requests"] = mapToSortedTimePoints(dailyRequests) + snap.DailySeries["cost"] = core.SortedTimePoints(dailyCost) + snap.DailySeries["requests"] = core.SortedTimePoints(dailyRequests) emitClientDailySeries(snap, dailyProviderTokens, dailyProviderRequests) type modelTokenTotal struct { @@ -454,7 +454,7 @@ func (p *Provider) fetchGenerationStats(ctx context.Context, baseURL, apiKey str topN = len(modelTotals) } for _, modelTotal := range modelTotals[:topN] { - snap.DailySeries["tokens_"+sanitizeName(modelTotal.model)] = mapToSortedTimePoints(modelTotal.byDate) + snap.DailySeries["tokens_"+sanitizeName(modelTotal.model)] = core.SortedTimePoints(modelTotal.byDate) } hasAnalyticsModelRows := strings.TrimSpace(snap.Raw["activity_rows"]) != "" && strings.TrimSpace(snap.Raw["activity_rows"]) != "0" diff --git a/internal/providers/openrouter/snapshot_projection.go b/internal/providers/openrouter/snapshot_projection.go index a7c7b2e..b86a333 100644 --- a/internal/providers/openrouter/snapshot_projection.go +++ b/internal/providers/openrouter/snapshot_projection.go @@ -168,13 +168,13 @@ func emitClientDailySeries(snap *core.UsageSnapshot, tokensByClient, requestsByC if client == "" || len(byDate) == 0 { continue } - snap.DailySeries["tokens_client_"+client] = mapToSortedTimePoints(byDate) + snap.DailySeries["tokens_client_"+client] = core.SortedTimePoints(byDate) } for client, byDate := range requestsByClient { if client == "" || len(byDate) == 0 { continue } - snap.DailySeries["usage_client_"+client] = mapToSortedTimePoints(byDate) + snap.DailySeries["usage_client_"+client] = core.SortedTimePoints(byDate) } } diff --git a/internal/providers/zai/zai.go b/internal/providers/zai/zai.go index 1208672..64c8913 100644 --- a/internal/providers/zai/zai.go +++ b/internal/providers/zai/zai.go @@ -1139,9 +1139,9 @@ func applyModelUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { setUsedMetric(snap, "active_languages", float64(len(languageTotals)), "languages", "7d") setUsedMetric(snap, "activity_providers", float64(len(providerTotals)), "providers", "7d") - snap.DailySeries["cost"] = mapToSeries(dailyCost) - snap.DailySeries["requests"] = mapToSeries(dailyReq) - snap.DailySeries["tokens"] = mapToSeries(dailyTokens) + snap.DailySeries["cost"] = core.SortedTimePoints(dailyCost) + snap.DailySeries["requests"] = core.SortedTimePoints(dailyReq) + snap.DailySeries["tokens"] = core.SortedTimePoints(dailyTokens) type modelTotal struct { name string @@ -1158,7 +1158,7 @@ func applyModelUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { for _, entry := range ranked { if dayMap, ok := modelDailyTokens[entry.name]; ok { key := "tokens_" + sanitizeMetricSlug(entry.name) - snap.DailySeries[key] = mapToSeries(dayMap) + snap.DailySeries[key] = core.SortedTimePoints(dayMap) } } @@ -1166,13 +1166,13 @@ func applyModelUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { if len(dayMap) == 0 { continue } - snap.DailySeries["usage_client_"+sanitizeMetricSlug(client)] = mapToSeries(dayMap) + snap.DailySeries["usage_client_"+sanitizeMetricSlug(client)] = core.SortedTimePoints(dayMap) } for source, dayMap := range sourceDailyReq { if len(dayMap) == 0 { continue } - snap.DailySeries["usage_source_"+sanitizeMetricSlug(source)] = mapToSeries(dayMap) + snap.DailySeries["usage_source_"+sanitizeMetricSlug(source)] = core.SortedTimePoints(dayMap) } modelShare := make(map[string]float64, len(modelTotals)) @@ -1288,7 +1288,7 @@ func applyToolUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { } if len(dailyCalls) > 0 { - snap.DailySeries["tool_calls"] = mapToSeries(dailyCalls) + snap.DailySeries["tool_calls"] = core.SortedTimePoints(dailyCalls) } toolSummary := make(map[string]float64, len(toolTotals)) @@ -2430,21 +2430,6 @@ func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, wi } } -func mapToSeries(input map[string]float64) []core.TimePoint { - out := make([]core.TimePoint, 0, len(input)) - for day, value := range input { - if strings.TrimSpace(day) == "" { - continue - } - out = append(out, core.TimePoint{ - Date: day, - Value: value, - }) - } - sort.Slice(out, func(i, j int) bool { return out[i].Date < out[j].Date }) - return out -} - func sanitizeMetricSlug(value string) string { trimmed := strings.TrimSpace(strings.ToLower(value)) if trimmed == "" { diff --git a/internal/telemetry/usage_view_projection.go b/internal/telemetry/usage_view_projection.go index d73871b..1d19634 100644 --- a/internal/telemetry/usage_view_projection.go +++ b/internal/telemetry/usage_view_projection.go @@ -2,7 +2,6 @@ package telemetry import ( "fmt" - "sort" "strings" "github.com/janekbaraniewski/openusage/internal/core" @@ -335,17 +334,3 @@ func usageAuthoritativeCost(snap core.UsageSnapshot) float64 { } return 0 } - -func sortedSeriesFromByDay(byDay map[string]float64) []core.TimePoint { - days := lo.Keys(byDay) - sort.Strings(days) - - out := make([]core.TimePoint, 0, len(days)) - for _, day := range days { - out = append(out, core.TimePoint{ - Date: day, - Value: byDay[day], - }) - } - return out -} diff --git a/internal/telemetry/usage_view_queries.go b/internal/telemetry/usage_view_queries.go index ce8fa59..d3f7ad5 100644 --- a/internal/telemetry/usage_view_queries.go +++ b/internal/telemetry/usage_view_queries.go @@ -502,7 +502,7 @@ func queryDailyByDimension(ctx context.Context, db *sql.DB, filter usageFilter, out := make(map[string][]core.TimePoint, len(byDim)) for key, dayMap := range byDim { - out[key] = sortedSeriesFromByDay(dayMap) + out[key] = core.SortedTimePoints(dayMap) } return out, nil } @@ -554,7 +554,7 @@ func queryDailyClientTokens(ctx context.Context, db *sql.DB, filter usageFilter) out := make(map[string][]core.TimePoint, len(byClient)) for key, dayMap := range byClient { - out[key] = sortedSeriesFromByDay(dayMap) + out[key] = core.SortedTimePoints(dayMap) } return out, nil } diff --git a/internal/tui/analytics.go b/internal/tui/analytics.go index 22594c5..7001e3f 100644 --- a/internal/tui/analytics.go +++ b/internal/tui/analytics.go @@ -2,7 +2,9 @@ package tui import ( "fmt" + "maps" "math" + "slices" "sort" "strings" "time" @@ -263,8 +265,8 @@ func renderTopModelsSummary(models []modelCostEntry, w int, limit int) string { sb.WriteString(" " + sectionStyle.Render("TOP MODELS (Daily volume & efficiency)") + "\n") sb.WriteString(" " + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", w-4)) + "\n") - nameW := clampInt(w/3, 20, 34) - provW := clampInt(w/5, 14, 22) + nameW := clamp(w/3, 20, 34) + provW := clamp(w/5, 14, 22) tokW := 12 costW := 10 effW := 10 @@ -317,8 +319,8 @@ func renderTopModelsCompact(models []modelCostEntry, w int, limit int) string { sb.WriteString(" " + sectionStyle.Render("TOP MODELS (compact)") + "\n") sb.WriteString(" " + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", w-4)) + "\n") - nameW := clampInt(w/2, 16, 26) - provW := clampInt(w/4, 10, 16) + nameW := clamp(w/2, 16, 26) + provW := clamp(w/4, 10, 16) tokW := 9 effW := 9 @@ -385,8 +387,8 @@ func renderCostTableCompact(data costData, w int, limit int) string { sb.WriteString(" " + sectionStyle.Render("COST & SPEND (compact)") + "\n") sb.WriteString(" " + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", w-4)) + "\n") - provW := clampInt(w/3, 14, 24) - colW := clampInt((w-provW-8)/3, 8, 12) + provW := clamp(w/3, 14, 24) + colW := clamp((w-provW-8)/3, 8, 12) head := dimStyle.Copy().Bold(true) sb.WriteString(" " + padRight(head.Render("Provider"), provW) + " " + padLeft(head.Render("Today"), colW) + " " + @@ -864,9 +866,9 @@ func computeAnalyticsSummary(data costData) analyticsSummary { } } - s.dailyCost = mapToSortedPoints(costByDate) - s.dailyTokens = mapToSortedPoints(tokensByDate) - s.dailyMessages = mapToSortedPoints(messagesByDate) + s.dailyCost = core.SortedTimePoints(costByDate) + s.dailyTokens = core.SortedTimePoints(tokensByDate) + s.dailyMessages = core.SortedTimePoints(messagesByDate) s.activeDays = countNonZeroDays(s.dailyCost, s.dailyTokens, s.dailyMessages) s.peakCostDate, s.peakCost = maxPoint(s.dailyCost) @@ -890,17 +892,6 @@ func computeAnalyticsSummary(data costData) analyticsSummary { return s } -func mapToSortedPoints(m map[string]float64) []core.TimePoint { - keys := lo.Keys(m) - sort.Strings(keys) - - out := make([]core.TimePoint, 0, len(keys)) - for _, k := range keys { - out = append(out, core.TimePoint{Date: k, Value: m[k]}) - } - return out -} - func maxPoint(points []core.TimePoint) (string, float64) { bestDate := "" best := 0.0 @@ -1041,16 +1032,6 @@ func filterTokenModels(models []modelCostEntry) []modelCostEntry { return out } -func clampInt(v, lo, hi int) int { - if v < lo { - return lo - } - if v > hi { - return hi - } - return v -} - func truncStr(s string, maxLen int) string { if len(s) <= maxLen { return s @@ -1059,7 +1040,5 @@ func truncStr(s string, maxLen int) string { } func sortedMetricKeys(m map[string]core.Metric) []string { - keys := lo.Keys(m) - sort.Strings(keys) - return keys + return slices.Sorted(maps.Keys(m)) } diff --git a/internal/tui/charts.go b/internal/tui/charts.go index 6b1e373..9759f20 100644 --- a/internal/tui/charts.go +++ b/internal/tui/charts.go @@ -599,7 +599,7 @@ func RenderBrailleChart(title string, series []BrailleSeries, w, h int, yFmt fun axisStyle.Render("└"), axisStyle.Render(strings.Repeat("─", plotW)))) - numLabels := clampInt(plotW/22, 3, 6) + numLabels := clamp(plotW/22, 3, 6) if len(allDates) < numLabels { numLabels = len(allDates) } @@ -835,7 +835,7 @@ func renderStackedTimeChart(title string, series []BrailleSeries, w, h int, yFmt sb.WriteString(fmt.Sprintf(" %*s %s%s\n", yAxisW-2, "", axisStyle.Render("└"), axisStyle.Render(strings.Repeat("─", plotW)))) - numLabels := clampInt(plotW/22, 3, 6) + numLabels := clamp(plotW/22, 3, 6) if len(labels) < numLabels { numLabels = len(labels) } @@ -963,7 +963,7 @@ func renderBarTimeChart(title string, series []BrailleSeries, w, h int, yFmt fun sb.WriteString(fmt.Sprintf(" %*s %s%s\n", yAxisW-2, "", axisStyle.Render("└"), axisStyle.Render(strings.Repeat("─", plotW)))) - numLabels := clampInt(plotW/22, 3, 6) + numLabels := clamp(plotW/22, 3, 6) if len(labels) < numLabels { numLabels = len(labels) } @@ -1263,10 +1263,10 @@ func RenderHeatmap(spec HeatmapSpec, w int) string { copy(values[i], spec.Values[i]) } - rowLabelW := clampInt(w/5, 16, 28) + rowLabelW := clamp(w/5, 16, 28) maxCols := spec.MaxCols if maxCols <= 0 { - maxCols = clampInt(w-rowLabelW-8, 20, 80) + maxCols = clamp(w-rowLabelW-8, 20, 80) } if len(cols) > maxCols { step := float64(len(cols)) / float64(maxCols) diff --git a/internal/tui/tiles_composition.go b/internal/tui/tiles_composition.go index 63d6a45..37fac1b 100644 --- a/internal/tui/tiles_composition.go +++ b/internal/tui/tiles_composition.go @@ -8,7 +8,6 @@ import ( "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) type modelMixEntry struct { @@ -963,23 +962,6 @@ func mergeSeriesByDay(seriesByClient map[string]map[string]float64, client strin } } -func sortedSeriesFromByDay(pointsByDay map[string]float64) []core.TimePoint { - if len(pointsByDay) == 0 { - return nil - } - days := lo.Keys(pointsByDay) - sort.Strings(days) - - points := make([]core.TimePoint, 0, len(days)) - for _, day := range days { - points = append(points, core.TimePoint{ - Date: day, - Value: pointsByDay[day], - }) - } - return points -} - func limitClientMix(clients []clientMixEntry, expanded bool, maxVisible int) ([]clientMixEntry, int) { if expanded || maxVisible <= 0 || len(clients) <= maxVisible { return clients, 0 From 241d6ceec19636d51ba092e9c2413470168bb75c Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 16:06:52 +0100 Subject: [PATCH 17/32] refactor: share hook ingest flow and split detail analytics sections --- cmd/openusage/telemetry.go | 109 +----- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 14 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 33 +- internal/core/analytics_costs.go | 57 +++ internal/core/analytics_costs_test.go | 50 +++ internal/daemon/hook_ingest.go | 54 +++ internal/daemon/hook_ingest_local.go | 101 +++++ .../daemon/hook_ingest_test.go | 6 +- internal/daemon/server_http.go | 29 +- internal/daemon/server_spool.go | 20 +- internal/providers/claude_code/claude_code.go | 6 +- internal/telemetry/usage_view.go | 38 +- internal/telemetry/usage_view_materialize.go | 43 +++ internal/tui/analytics_data.go | 41 +-- internal/tui/detail.go | 348 ------------------ internal/tui/detail_analytics_sections.go | 340 +++++++++++++++++ 16 files changed, 692 insertions(+), 597 deletions(-) create mode 100644 internal/core/analytics_costs.go create mode 100644 internal/core/analytics_costs_test.go create mode 100644 internal/daemon/hook_ingest.go create mode 100644 internal/daemon/hook_ingest_local.go rename cmd/openusage/telemetry_hook_test.go => internal/daemon/hook_ingest_test.go (95%) create mode 100644 internal/telemetry/usage_view_materialize.go create mode 100644 internal/tui/detail_analytics_sections.go diff --git a/cmd/openusage/telemetry.go b/cmd/openusage/telemetry.go index 952e66f..ebbf25f 100644 --- a/cmd/openusage/telemetry.go +++ b/cmd/openusage/telemetry.go @@ -12,7 +12,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/daemon" "github.com/janekbaraniewski/openusage/internal/detect" "github.com/janekbaraniewski/openusage/internal/integrations" - "github.com/janekbaraniewski/openusage/internal/providers" "github.com/janekbaraniewski/openusage/internal/telemetry" "github.com/spf13/cobra" ) @@ -51,9 +50,6 @@ func newTelemetryHookCommand() *cobra.Command { Args: cobra.ExactArgs(1), RunE: func(_ *cobra.Command, args []string) error { sourceName := args[0] - if _, ok := providers.TelemetrySourceBySystem(sourceName); !ok { - return fmt.Errorf("unknown hook source %q", sourceName) - } payload, err := io.ReadAll(os.Stdin) if err != nil { @@ -89,7 +85,7 @@ func newTelemetryHookCommand() *cobra.Command { daemonErr = err } - result, err := ingestHookLocally( + result, err := daemon.IngestHookLocally( ctx, sourceName, strings.TrimSpace(accountID), @@ -147,109 +143,6 @@ func newTelemetryHookCommand() *cobra.Command { return cmd } -func ingestHookLocally( - ctx context.Context, - sourceName string, - accountID string, - payload []byte, - dbPath string, - spoolDir string, - spoolOnly bool, -) (daemon.HookResponse, error) { - source, ok := providers.TelemetrySourceBySystem(sourceName) - if !ok { - return daemon.HookResponse{}, fmt.Errorf("unknown hook source %q", sourceName) - } - options, effectiveAccountID, warnings := daemon.ResolveTelemetrySourceOptions(source, accountID) - reqs, err := telemetry.ParseSourceHookPayload(source, payload, options, effectiveAccountID) - if err != nil { - return daemon.HookResponse{}, fmt.Errorf("parse hook payload: %w", err) - } - resp := daemon.HookResponse{ - Source: sourceName, - Enqueued: len(reqs), - Warnings: warnings, - } - if len(reqs) == 0 { - return resp, nil - } - - if strings.TrimSpace(dbPath) == "" { - resolved, resolveErr := telemetry.DefaultDBPath() - if resolveErr != nil { - return daemon.HookResponse{}, fmt.Errorf("resolve telemetry db path: %w", resolveErr) - } - dbPath = resolved - } - if strings.TrimSpace(spoolDir) == "" { - resolved, resolveErr := telemetry.DefaultSpoolDir() - if resolveErr != nil { - return daemon.HookResponse{}, fmt.Errorf("resolve telemetry spool dir: %w", resolveErr) - } - spoolDir = resolved - } - - store, err := telemetry.OpenStore(dbPath) - if err != nil { - return daemon.HookResponse{}, fmt.Errorf("open telemetry store: %w", err) - } - defer store.Close() - - pipeline := telemetry.NewPipeline(store, telemetry.NewSpool(spoolDir)) - if spoolOnly { - enqueued, enqueueErr := pipeline.EnqueueRequests(reqs) - if enqueueErr != nil { - return daemon.HookResponse{}, fmt.Errorf("enqueue to telemetry spool: %w", enqueueErr) - } - resp.Enqueued = enqueued - return resp, nil - } - - retries := make([]telemetry.IngestRequest, 0, len(reqs)) - var firstIngestErr error - for _, req := range reqs { - resp.Processed++ - result, ingestErr := store.Ingest(ctx, req) - if ingestErr != nil { - if firstIngestErr == nil { - firstIngestErr = ingestErr - } - retries = append(retries, req) - continue - } - if result.Deduped { - resp.Deduped++ - } else { - resp.Ingested++ - } - } - - if len(retries) == 0 { - return resp, nil - } - if firstIngestErr != nil { - resp.Warnings = append(resp.Warnings, fmt.Sprintf("direct ingest failed for %d event(s): %v", len(retries), firstIngestErr)) - } - - enqueued, enqueueErr := pipeline.EnqueueRequests(retries) - if enqueueErr != nil { - resp.Failed += len(retries) - resp.Warnings = append(resp.Warnings, fmt.Sprintf("retry enqueue failed: %v", enqueueErr)) - return resp, nil - } - flush, warnings := daemon.FlushInBatches(ctx, pipeline, enqueued) - resp.Processed += flush.Processed - resp.Ingested += flush.Ingested - resp.Deduped += flush.Deduped - resp.Failed += flush.Failed - resp.Warnings = append(resp.Warnings, warnings...) - - if remaining := len(retries) - flush.Processed; remaining > 0 { - resp.Warnings = append(resp.Warnings, fmt.Sprintf("%d event(s) remain queued in spool", remaining)) - } - return resp, nil -} - func newTelemetryDaemonCommand() *cobra.Command { var ( socketPath string diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 24af736..0cf1446 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -55,6 +55,9 @@ This table captures every issue found in this pass. It is broad and high-signal, | R35 | Fixed | OpenRouter account API split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/account_api.go` | OpenRouter key/auth, credits, and key-metadata fetch helpers now live in a dedicated account API unit instead of the main provider file. The coordinator file is down to provider setup and fetch orchestration. | Keep further OpenRouter account mutations inside the account unit. | | R36 | Fixed | Detail token section decomposition | `internal/tui/detail.go`, `internal/tui/detail_tokens.go` | The detail token section now renders from shared analytics model extraction instead of reverse-parsing token metric keys, and the token-specific renderer lives in its own file. | Continue splitting other detail subsections the same way. | | R37 | Fixed | Telemetry source account binding and safer fallback | `internal/daemon/source_collectors.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_http.go`, `internal/daemon/server_spool.go`, `cmd/openusage/telemetry.go`, `internal/telemetry/provider_event_mapper.go` | Local collectors and hook ingestion now bind to configured source accounts when unambiguous, ambiguous shared-path setups degrade to explicit source-scoped attribution instead of silently choosing one account, and account fallback prefers source system before upstream provider. | If hook ingest logic is centralized later, keep using the same resolver. | +| R38 | Fixed | Shared hook ingest service | `internal/daemon/hook_ingest.go`, `internal/daemon/hook_ingest_local.go`, `internal/daemon/server_http.go`, `internal/daemon/server_spool.go`, `cmd/openusage/telemetry.go` | Hook request parsing and local ingest/spool fallback now live in shared daemon helpers used by CLI fallback, HTTP ingest, and hook-spool replay. The remaining edge code is transport and user messaging only. | Reuse the same helpers if more hook entrypoints are added. | +| R39 | Fixed | Usage-view materialization split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_materialize.go` | Temp-table creation/indexing/cleanup and aggregate initialization moved out of the main usage-view orchestration path. | Continue splitting aggregate query fanout if `usage_view.go` grows again. | +| R40 | Fixed | Analytics cost fallback extraction | `internal/core/analytics_costs.go`, `internal/tui/analytics_data.go` | Analytics all-time/today/week cost fallback rules now live in shared core logic instead of TUI-owned metric-key decoding. | Continue moving remaining analytics/detail metric decoding into shared extractors. | ## Action Table @@ -62,11 +65,10 @@ This table captures every issue found in this pass. It is broad and high-signal, | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/dashboardapp/service.go` | Side effects are injected and some detail logic is split, but TUI state-transition and rendering logic is still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go` | Composition bars and analytics model views now consume shared extractors, and the token detail table no longer parses raw token keys, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go` | Composition bars, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/codex/codex.go` | Cursor and OpenRouter are now materially decomposed, but several other providers still combine transport, parsing, normalization, and projection in single 1900-2600 LOC files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | -| A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go` | The usage-view code is materially smaller after the helper/projection/query splits, but the orchestration/materialization path still owns temp-table lifecycle, query fanout, and aggregate assembly in one place. | Continue splitting remaining orchestration/materialization concerns and consider a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | +| A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go` | The usage-view code is materially smaller after the helper/projection/query/materialization splits, but the orchestration path still owns query fanout and aggregate assembly in one place. | Continue splitting remaining orchestration concerns and consider a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | -| A9 | P2 | Hook ingestion duplication | `internal/daemon/server_spool.go`, `cmd/openusage/telemetry.go` | Daemon and CLI fallback still own overlapping hook ingest/spool behavior with different control flow and user messaging. | Extract a shared hook ingest service and keep only transport/output differences at the edges. | Less drift between daemon and CLI ingest behavior. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | @@ -76,11 +78,11 @@ This table captures every issue found in this pass. It is broad and high-signal, 1. A2, A3 2. A6, A4 -3. A9, A7, A1 -4. A8, A12, A14, A15 +3. A7, A1, A8 +4. A12, A14, A15 ## Notes - The highest-risk remaining issues are architectural rather than immediately broken behavior. -- The biggest remaining drift risks are the metric-prefix parsing still spread across the TUI render path and duplicated hook-ingest control flow across daemon and CLI paths. +- The biggest remaining drift risks are the metric-prefix parsing still spread across the TUI render path and the remaining large TUI/provider files. - The race pass completed cleanly for the core dashboard/daemon/telemetry packages after the timeframe fix. diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index 76808a9..4be6d26 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -22,6 +22,8 @@ These were major concerns in earlier reviews and are now materially addressed: - OpenRouter provider-resolution, analytics, generation, projection, and account-path monolith sprawl. - TUI side-effect leakage into config persistence / integration install / provider validation. - Ollama hot-path `time.Now()` usage in behavioral window/reset logic. +- Shared hook ingest parsing/local fallback drift between daemon and CLI. +- Usage-view temp-table materialization living inline in the main orchestration path. ## Findings @@ -73,19 +75,7 @@ What to address: - projection helpers - telemetry-specific collectors -### 4. [P2] Hook ingestion behavior is still duplicated between daemon and CLI fallback - -The daemon and CLI fallback paths still own overlapping hook-ingest/spool behavior. The structure is workable, but the logic can drift. - -Refs: -- [server_spool.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/daemon/server_spool.go) -- [telemetry.go](/Users/janekbaraniewski/Workspace/priv/openusage/cmd/openusage/telemetry.go) - -What to address: -- Extract a shared hook ingest service. -- Keep transport/output concerns at the command/daemon edge. - -### 5. [P3] Ambiguous shared-path local sources still require explicit account disambiguation +### 4. [P3] Ambiguous shared-path local sources still require explicit account disambiguation The daemon now binds local telemetry to configured accounts when the source/account mapping is unambiguous. If multiple accounts share the same source paths, it intentionally degrades to source-scoped attribution instead of silently guessing. That is the correct behavior today, but it means truly ambiguous local multi-account setups still need an explicit binding mechanism if they become a first-class use case. @@ -98,7 +88,7 @@ What to address: - Add persisted source/account alias mapping only if ambiguous local multi-account setups become common. - Keep ambiguous attribution explicit; do not reintroduce silent account guessing. -### 6. [P3] Account config contract cleanup is not finished +### 5. [P3] Account config contract cleanup is not finished The hot-path abuse of `Binary`/`BaseURL` is fixed, but the type still allows path-like runtime hints and canonical provider config to coexist ambiguously. @@ -110,7 +100,7 @@ What to address: - Introduce a dedicated typed runtime-hints structure. - Retire compatibility comments and residual semantic ambiguity in `AccountConfig`. -### 7. [P3] Test suites are strong but still expensive to maintain +### 6. [P3] Test suites are strong but still expensive to maintain Some package tests remain extremely large and inline too much fixture logic. @@ -126,15 +116,14 @@ What to address: ## Recommended Order -1. Telemetry account identity mapping. -2. TUI extractor/decomposition follow-through. -3. Remaining provider monolith splits. -4. Shared hook ingest service. -5. Account config contract hardening. -6. Test fixture cleanup. +1. TUI extractor/decomposition follow-through. +2. Remaining provider monolith splits. +3. Telemetry account identity mapping and daemon follow-through. +4. Account config contract hardening. +5. Test fixture cleanup. ## Notes - The repo is in materially better shape than it was at the start of this cleanup branch. - The main remaining risks are now architectural and maintainability-oriented rather than immediate correctness regressions. -- The highest near-term drift risk is the duplicated hook-ingest control flow plus the remaining metric-prefix parsing still sitting in TUI render code. +- The highest near-term drift risk is the remaining metric-prefix parsing still sitting in TUI render code plus the size of the remaining TUI/provider units. diff --git a/internal/core/analytics_costs.go b/internal/core/analytics_costs.go new file mode 100644 index 0000000..0cc3ddb --- /dev/null +++ b/internal/core/analytics_costs.go @@ -0,0 +1,57 @@ +package core + +type AnalyticsCostSummary struct { + TotalCostUSD float64 + TodayCostUSD float64 + WeekCostUSD float64 +} + +func ExtractAnalyticsCostSummary(s UsageSnapshot) AnalyticsCostSummary { + return AnalyticsCostSummary{ + TotalCostUSD: firstPositiveMetricUsed(s, + sumAnalyticsModelCost(s), + "total_cost_usd", + "plan_total_spend_usd", + "all_time_api_cost", + "jsonl_total_cost_usd", + "today_api_cost", + "daily_cost_usd", + "5h_block_cost", + "block_cost_usd", + "individual_spend", + "credits", + ), + TodayCostUSD: firstPositiveMetricUsed(s, + 0, + "today_api_cost", + "daily_cost_usd", + "today_cost", + "usage_daily", + ), + WeekCostUSD: firstPositiveMetricUsed(s, + 0, + "7d_api_cost", + "usage_weekly", + ), + } +} + +func sumAnalyticsModelCost(s UsageSnapshot) float64 { + total := 0.0 + for _, model := range ExtractAnalyticsModelUsage(s) { + total += model.CostUSD + } + return total +} + +func firstPositiveMetricUsed(s UsageSnapshot, fallback float64, keys ...string) float64 { + if fallback > 0 { + return fallback + } + for _, key := range keys { + if metric, ok := s.Metrics[key]; ok && metric.Used != nil && *metric.Used > 0 { + return *metric.Used + } + } + return 0 +} diff --git a/internal/core/analytics_costs_test.go b/internal/core/analytics_costs_test.go new file mode 100644 index 0000000..e12d7b5 --- /dev/null +++ b/internal/core/analytics_costs_test.go @@ -0,0 +1,50 @@ +package core + +import "testing" + +func TestExtractAnalyticsCostSummary_PrefersModelUsage(t *testing.T) { + cost := 3.5 + snap := UsageSnapshot{ + ModelUsage: []ModelUsageRecord{ + {RawModelID: "gpt-4.1", CostUSD: &cost}, + }, + Metrics: map[string]Metric{ + "total_cost_usd": {Used: Float64Ptr(1.2)}, + "today_api_cost": {Used: Float64Ptr(0.4)}, + "7d_api_cost": {Used: Float64Ptr(2.4)}, + }, + } + + got := ExtractAnalyticsCostSummary(snap) + if got.TotalCostUSD != 3.5 { + t.Fatalf("total = %v, want 3.5", got.TotalCostUSD) + } + if got.TodayCostUSD != 0.4 { + t.Fatalf("today = %v, want 0.4", got.TodayCostUSD) + } + if got.WeekCostUSD != 2.4 { + t.Fatalf("week = %v, want 2.4", got.WeekCostUSD) + } +} + +func TestExtractAnalyticsCostSummary_FallsBackToMetrics(t *testing.T) { + snap := UsageSnapshot{ + Metrics: map[string]Metric{ + "credits": {Used: Float64Ptr(8)}, + "usage_daily": {Used: Float64Ptr(1.5)}, + "usage_weekly": {Used: Float64Ptr(6)}, + "total_cost_usd": {Used: Float64Ptr(4)}, + }, + } + + got := ExtractAnalyticsCostSummary(snap) + if got.TotalCostUSD != 4 { + t.Fatalf("total = %v, want 4", got.TotalCostUSD) + } + if got.TodayCostUSD != 1.5 { + t.Fatalf("today = %v, want 1.5", got.TodayCostUSD) + } + if got.WeekCostUSD != 6 { + t.Fatalf("week = %v, want 6", got.WeekCostUSD) + } +} diff --git a/internal/daemon/hook_ingest.go b/internal/daemon/hook_ingest.go new file mode 100644 index 0000000..7189c71 --- /dev/null +++ b/internal/daemon/hook_ingest.go @@ -0,0 +1,54 @@ +package daemon + +import ( + "context" + "fmt" + "strings" + + "github.com/janekbaraniewski/openusage/internal/providers" + "github.com/janekbaraniewski/openusage/internal/telemetry" +) + +type HookParseResult struct { + SourceName string + EffectiveAccountID string + Requests []telemetry.IngestRequest + Warnings []string +} + +func ParseHookRequests(sourceName, accountID string, payload []byte) (HookParseResult, error) { + sourceName = strings.TrimSpace(sourceName) + source, ok := providers.TelemetrySourceBySystem(sourceName) + if !ok { + return HookParseResult{}, fmt.Errorf("unknown hook source %q", sourceName) + } + + options, effectiveAccountID, warnings := ResolveTelemetrySourceOptions(source, strings.TrimSpace(accountID)) + reqs, err := telemetry.ParseSourceHookPayload(source, payload, options, effectiveAccountID) + if err != nil { + return HookParseResult{}, fmt.Errorf("parse hook payload: %w", err) + } + + return HookParseResult{ + SourceName: sourceName, + EffectiveAccountID: effectiveAccountID, + Requests: reqs, + Warnings: warnings, + }, nil +} + +func IngestHookLocally( + ctx context.Context, + sourceName string, + accountID string, + payload []byte, + dbPath string, + spoolDir string, + spoolOnly bool, +) (HookResponse, error) { + parsed, err := ParseHookRequests(sourceName, accountID, payload) + if err != nil { + return HookResponse{}, err + } + return ingestParsedHookLocally(ctx, parsed, dbPath, spoolDir, spoolOnly) +} diff --git a/internal/daemon/hook_ingest_local.go b/internal/daemon/hook_ingest_local.go new file mode 100644 index 0000000..dfdd2be --- /dev/null +++ b/internal/daemon/hook_ingest_local.go @@ -0,0 +1,101 @@ +package daemon + +import ( + "context" + "fmt" + "strings" + + "github.com/janekbaraniewski/openusage/internal/telemetry" +) + +func ingestParsedHookLocally( + ctx context.Context, + parsed HookParseResult, + dbPath string, + spoolDir string, + spoolOnly bool, +) (HookResponse, error) { + resp := HookResponse{ + Source: parsed.SourceName, + Enqueued: len(parsed.Requests), + Warnings: append([]string(nil), parsed.Warnings...), + } + if len(parsed.Requests) == 0 { + return resp, nil + } + + if strings.TrimSpace(dbPath) == "" { + resolved, resolveErr := telemetry.DefaultDBPath() + if resolveErr != nil { + return HookResponse{}, fmt.Errorf("resolve telemetry db path: %w", resolveErr) + } + dbPath = resolved + } + if strings.TrimSpace(spoolDir) == "" { + resolved, resolveErr := telemetry.DefaultSpoolDir() + if resolveErr != nil { + return HookResponse{}, fmt.Errorf("resolve telemetry spool dir: %w", resolveErr) + } + spoolDir = resolved + } + + store, err := telemetry.OpenStore(dbPath) + if err != nil { + return HookResponse{}, fmt.Errorf("open telemetry store: %w", err) + } + defer store.Close() + + pipeline := telemetry.NewPipeline(store, telemetry.NewSpool(spoolDir)) + if spoolOnly { + enqueued, enqueueErr := pipeline.EnqueueRequests(parsed.Requests) + if enqueueErr != nil { + return HookResponse{}, fmt.Errorf("enqueue to telemetry spool: %w", enqueueErr) + } + resp.Enqueued = enqueued + return resp, nil + } + + retries := make([]telemetry.IngestRequest, 0, len(parsed.Requests)) + var firstIngestErr error + for _, req := range parsed.Requests { + resp.Processed++ + result, ingestErr := store.Ingest(ctx, req) + if ingestErr != nil { + if firstIngestErr == nil { + firstIngestErr = ingestErr + } + retries = append(retries, req) + continue + } + if result.Deduped { + resp.Deduped++ + } else { + resp.Ingested++ + } + } + + if len(retries) == 0 { + return resp, nil + } + if firstIngestErr != nil { + resp.Warnings = append(resp.Warnings, fmt.Sprintf("direct ingest failed for %d event(s): %v", len(retries), firstIngestErr)) + } + + enqueued, enqueueErr := pipeline.EnqueueRequests(retries) + if enqueueErr != nil { + resp.Failed += len(retries) + resp.Warnings = append(resp.Warnings, fmt.Sprintf("retry enqueue failed: %v", enqueueErr)) + return resp, nil + } + flush, warnings := FlushInBatches(ctx, pipeline, enqueued) + resp.Processed += flush.Processed + resp.Ingested += flush.Ingested + resp.Deduped += flush.Deduped + resp.Failed += flush.Failed + resp.Warnings = append(resp.Warnings, warnings...) + + if remaining := len(retries) - flush.Processed; remaining > 0 { + resp.Warnings = append(resp.Warnings, fmt.Sprintf("%d event(s) remain queued in spool", remaining)) + } + return resp, nil +} diff --git a/cmd/openusage/telemetry_hook_test.go b/internal/daemon/hook_ingest_test.go similarity index 95% rename from cmd/openusage/telemetry_hook_test.go rename to internal/daemon/hook_ingest_test.go index a9ccff7..35b6bc7 100644 --- a/cmd/openusage/telemetry_hook_test.go +++ b/internal/daemon/hook_ingest_test.go @@ -1,4 +1,4 @@ -package main +package daemon import ( "context" @@ -14,7 +14,7 @@ func TestIngestHookLocally_IngestsHookPayload(t *testing.T) { spoolDir := filepath.Join(t.TempDir(), "spool") payload := []byte(`{"hook":"chat.message","timestamp":"2026-02-26T20:00:00Z","input":{"sessionID":"sess-1","agent":"main","messageID":"turn-1","variant":"default","model":{"providerID":"openrouter","modelID":"openai/gpt-oss-20b"}},"output":{"message":{"id":"msg-1","sessionID":"sess-1","role":"assistant"},"route":{"provider_name":"DeepInfra"},"usage":{"input_tokens":12,"output_tokens":4,"total_tokens":16,"cost_usd":0.00012}}}`) - resp, err := ingestHookLocally(context.Background(), "opencode", "openrouter", payload, dbPath, spoolDir, false) + resp, err := IngestHookLocally(context.Background(), "opencode", "openrouter", payload, dbPath, spoolDir, false) if err != nil { t.Fatalf("ingest hook locally: %v", err) } @@ -50,7 +50,7 @@ func TestIngestHookLocally_SpoolOnly(t *testing.T) { spoolDir := filepath.Join(t.TempDir(), "spool") payload := []byte(`{"hook":"tool.execute.after","timestamp":"2026-02-26T20:00:00Z","input":{"tool":"glob","sessionID":"sess-1","callID":"tool-1"},"output":{"title":"Glob"}}`) - resp, err := ingestHookLocally(context.Background(), "opencode", "openrouter", payload, dbPath, spoolDir, true) + resp, err := IngestHookLocally(context.Background(), "opencode", "openrouter", payload, dbPath, spoolDir, true) if err != nil { t.Fatalf("spool-only ingest hook locally: %v", err) } diff --git a/internal/daemon/server_http.go b/internal/daemon/server_http.go index 487a5e9..054a8df 100644 --- a/internal/daemon/server_http.go +++ b/internal/daemon/server_http.go @@ -11,8 +11,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/integrations" - "github.com/janekbaraniewski/openusage/internal/providers" - "github.com/janekbaraniewski/openusage/internal/telemetry" "github.com/janekbaraniewski/openusage/internal/version" ) @@ -39,11 +37,6 @@ func (s *Service) handleHook(w http.ResponseWriter, r *http.Request) { writeJSONError(w, http.StatusBadRequest, "missing hook source") return } - source, ok := providers.TelemetrySourceBySystem(sourceName) - if !ok { - writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("unknown hook source %q", sourceName)) - return - } payload, err := io.ReadAll(r.Body) if err != nil { @@ -56,25 +49,25 @@ func (s *Service) handleHook(w http.ResponseWriter, r *http.Request) { } accountID := strings.TrimSpace(r.URL.Query().Get("account_id")) - options, effectiveAccountID, warnings := ResolveTelemetrySourceOptions(source, accountID) - reqs, err := telemetry.ParseSourceHookPayload(source, payload, options, effectiveAccountID) + parsed, err := ParseHookRequests(sourceName, accountID, payload) if err != nil { - writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("parse hook payload: %v", err)) + writeJSONError(w, http.StatusBadRequest, err.Error()) return } - if len(reqs) == 0 { - writeJSON(w, http.StatusOK, HookResponse{Source: sourceName, Warnings: warnings}) + if len(parsed.Requests) == 0 { + writeJSON(w, http.StatusOK, HookResponse{Source: sourceName, Warnings: parsed.Warnings}) return } - tally, _ := s.ingestBatch(r.Context(), reqs) + tally, _ := s.ingestBatch(r.Context(), parsed.Requests) + warnings := append([]string(nil), parsed.Warnings...) if tally.failed > 0 { warnings = append(warnings, fmt.Sprintf("%d ingest failures", tally.failed)) } writeJSON(w, http.StatusOK, HookResponse{ Source: sourceName, - Enqueued: len(reqs), + Enqueued: len(parsed.Requests), Processed: tally.processed, Ingested: tally.ingested, Deduped: tally.deduped, @@ -91,15 +84,15 @@ func (s *Service) handleHook(w http.ResponseWriter, r *http.Request) { if tally.failed > 0 { s.warnf(logLevel, "source=%s account_id=%q duration_ms=%d enqueued=%d processed=%d ingested=%d deduped=%d failed=%d", - sourceName, effectiveAccountID, durationMs, - len(reqs), tally.processed, tally.ingested, tally.deduped, tally.failed, + sourceName, parsed.EffectiveAccountID, durationMs, + len(parsed.Requests), tally.processed, tally.ingested, tally.deduped, tally.failed, ) return } s.infof(logLevel, "source=%s account_id=%q duration_ms=%d enqueued=%d processed=%d ingested=%d deduped=%d failed=%d", - sourceName, effectiveAccountID, durationMs, - len(reqs), tally.processed, tally.ingested, tally.deduped, tally.failed, + sourceName, parsed.EffectiveAccountID, durationMs, + len(parsed.Requests), tally.processed, tally.ingested, tally.deduped, tally.failed, ) } diff --git a/internal/daemon/server_spool.go b/internal/daemon/server_spool.go index 43ae0b9..96e5ca7 100644 --- a/internal/daemon/server_spool.go +++ b/internal/daemon/server_spool.go @@ -8,7 +8,6 @@ import ( "strings" "time" - "github.com/janekbaraniewski/openusage/internal/providers" "github.com/janekbaraniewski/openusage/internal/telemetry" ) @@ -171,27 +170,14 @@ func (s *Service) processHookSpool(ctx context.Context, dir string) { continue } - source, ok := providers.TelemetrySourceBySystem(raw.Source) - if !ok { + parsed, parseErr := ParseHookRequests(raw.Source, strings.TrimSpace(raw.AccountID), raw.Payload) + if parseErr != nil || len(parsed.Requests) == 0 { _ = os.Remove(path) processed++ continue } - options, effectiveAccountID, _ := ResolveTelemetrySourceOptions(source, strings.TrimSpace(raw.AccountID)) - reqs, parseErr := telemetry.ParseSourceHookPayload( - source, - raw.Payload, - options, - effectiveAccountID, - ) - if parseErr != nil || len(reqs) == 0 { - _ = os.Remove(path) - processed++ - continue - } - - tally, _ := s.ingestBatch(ctx, reqs) + tally, _ := s.ingestBatch(ctx, parsed.Requests) _ = os.Remove(path) processed++ diff --git a/internal/providers/claude_code/claude_code.go b/internal/providers/claude_code/claude_code.go index 03eda4e..2fa21e7 100644 --- a/internal/providers/claude_code/claude_code.go +++ b/internal/providers/claude_code/claude_code.go @@ -816,7 +816,7 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna if altProjectsDir != "" { jsonlFiles = append(jsonlFiles, collectJSONLFiles(altProjectsDir)...) } - jsonlFiles = dedupeStringSlice(jsonlFiles) + jsonlFiles = lo.Uniq(lo.Compact(jsonlFiles)) sort.Strings(jsonlFiles) if len(jsonlFiles) == 0 { @@ -1877,10 +1877,6 @@ func inferLanguageFromPath(path string) string { return "" } -func dedupeStringSlice(items []string) []string { - return lo.Uniq(lo.Compact(items)) -} - func summarizeCountMap(values map[string]int, limit int) string { type entry struct { name string diff --git a/internal/telemetry/usage_view.go b/internal/telemetry/usage_view.go index 4a3ae0c..1c4cca0 100644 --- a/internal/telemetry/usage_view.go +++ b/internal/telemetry/usage_view.go @@ -290,35 +290,13 @@ func loadUsageViewForProviderWithSources(ctx context.Context, db *sql.DB, provid func loadUsageViewForFilter(ctx context.Context, db *sql.DB, filter usageFilter) (*telemetryUsageAgg, error) { filterStart := time.Now() - agg := &telemetryUsageAgg{ - ModelDaily: make(map[string][]core.TimePoint), - SourceDaily: make(map[string][]core.TimePoint), - ProjectDaily: make(map[string][]core.TimePoint), - ClientDaily: make(map[string][]core.TimePoint), - ClientTokens: make(map[string][]core.TimePoint), - } - - // Materialize the deduped CTE into a temp table so subsequent queries - // read from a flat table instead of rebuilding the 3-level CTE each time. - usageCTE, whereArgs := dedupedUsageCTE(filter) - tempTable := "_deduped_tmp" + agg := newTelemetryUsageAgg() - matStart := time.Now() - _, _ = db.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", tempTable)) - materializeSQL := fmt.Sprintf("CREATE TEMP TABLE %s AS %s SELECT * FROM deduped_usage", tempTable, usageCTE) - if _, err := db.ExecContext(ctx, materializeSQL, whereArgs...); err != nil { - return nil, fmt.Errorf("materialize deduped usage: %w", err) + matFilter, cleanup, err := materializeUsageFilter(ctx, db, filter) + if err != nil { + return nil, err } - core.Tracef("[usage_view_perf] materialize temp table: %dms (providers=%v, windowHours=%d)", - time.Since(matStart).Milliseconds(), filter.ProviderIDs, filter.TimeWindowHours) - defer func() { - _, _ = db.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", tempTable)) - }() - - // Create indexes on the temp table for the aggregation queries. - // Compound (event_type, status) covers the most common WHERE pattern. - _, _ = db.ExecContext(ctx, fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_deduped_event_status ON %s(event_type, status)", tempTable)) - _, _ = db.ExecContext(ctx, fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_deduped_occurred ON %s(occurred_at)", tempTable)) + defer cleanup() // Count from the materialized table. countStart := time.Now() @@ -326,7 +304,7 @@ func loadUsageViewForFilter(ctx context.Context, db *sql.DB, filter usageFilter) SELECT COALESCE(MAX(occurred_at), ''), COUNT(*) FROM %s WHERE event_type IN ('message_usage', 'tool_usage') - `, tempTable) + `, matFilter.materializedTbl) if err := db.QueryRowContext(ctx, countQuery).Scan(&agg.LastOccurred, &agg.EventCount); err != nil { return nil, fmt.Errorf("canonical usage count query: %w", err) } @@ -336,10 +314,6 @@ func loadUsageViewForFilter(ctx context.Context, db *sql.DB, filter usageFilter) return agg, nil } - // All subsequent queries use the materialized temp table. - matFilter := filter - matFilter.materializedTbl = tempTable - trace := func(label string) func() { start := time.Now() return func() { core.Tracef("[usage_view_perf] %s: %dms", label, time.Since(start).Milliseconds()) } diff --git a/internal/telemetry/usage_view_materialize.go b/internal/telemetry/usage_view_materialize.go new file mode 100644 index 0000000..a5f5b41 --- /dev/null +++ b/internal/telemetry/usage_view_materialize.go @@ -0,0 +1,43 @@ +package telemetry + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func newTelemetryUsageAgg() *telemetryUsageAgg { + return &telemetryUsageAgg{ + ModelDaily: make(map[string][]core.TimePoint), + SourceDaily: make(map[string][]core.TimePoint), + ProjectDaily: make(map[string][]core.TimePoint), + ClientDaily: make(map[string][]core.TimePoint), + ClientTokens: make(map[string][]core.TimePoint), + } +} + +func materializeUsageFilter(ctx context.Context, db *sql.DB, filter usageFilter) (usageFilter, func(), error) { + usageCTE, whereArgs := dedupedUsageCTE(filter) + tempTable := "_deduped_tmp" + + matStart := time.Now() + _, _ = db.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", tempTable)) + materializeSQL := fmt.Sprintf("CREATE TEMP TABLE %s AS %s SELECT * FROM deduped_usage", tempTable, usageCTE) + if _, err := db.ExecContext(ctx, materializeSQL, whereArgs...); err != nil { + return usageFilter{}, nil, fmt.Errorf("materialize deduped usage: %w", err) + } + core.Tracef("[usage_view_perf] materialize temp table: %dms (providers=%v, windowHours=%d)", + time.Since(matStart).Milliseconds(), filter.ProviderIDs, filter.TimeWindowHours) + + _, _ = db.ExecContext(ctx, fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_deduped_event_status ON %s(event_type, status)", tempTable)) + _, _ = db.ExecContext(ctx, fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_deduped_occurred ON %s(occurred_at)", tempTable)) + + filter.materializedTbl = tempTable + cleanup := func() { + _, _ = db.ExecContext(context.Background(), fmt.Sprintf("DROP TABLE IF EXISTS %s", tempTable)) + } + return filter, cleanup, nil +} diff --git a/internal/tui/analytics_data.go b/internal/tui/analytics_data.go index 844cc36..8c38eb3 100644 --- a/internal/tui/analytics_data.go +++ b/internal/tui/analytics_data.go @@ -202,50 +202,15 @@ func extractCostData(snapshots map[string]core.UsageSnapshot, filter string) cos } func extractProviderCost(snap core.UsageSnapshot) float64 { - modelTotal := 0.0 - for _, model := range core.ExtractAnalyticsModelUsage(snap) { - modelTotal += model.CostUSD - } - if modelTotal > 0 { - return modelTotal - } - - for _, key := range []string{ - "total_cost_usd", - "plan_total_spend_usd", - "all_time_api_cost", - "jsonl_total_cost_usd", - "today_api_cost", - "daily_cost_usd", - "5h_block_cost", - "block_cost_usd", - "individual_spend", - "credits", - } { - if m, ok := snap.Metrics[key]; ok && m.Used != nil && *m.Used > 0 { - return *m.Used - } - } - - return 0 + return core.ExtractAnalyticsCostSummary(snap).TotalCostUSD } func extractTodayCost(snap core.UsageSnapshot) float64 { - for _, key := range []string{"today_api_cost", "daily_cost_usd", "today_cost", "usage_daily"} { - if m, ok := snap.Metrics[key]; ok && m.Used != nil && *m.Used > 0 { - return *m.Used - } - } - return 0 + return core.ExtractAnalyticsCostSummary(snap).TodayCostUSD } func extract7DayCost(snap core.UsageSnapshot) float64 { - for _, key := range []string{"7d_api_cost", "usage_weekly"} { - if m, ok := snap.Metrics[key]; ok && m.Used != nil && *m.Used > 0 { - return *m.Used - } - } - return 0 + return core.ExtractAnalyticsCostSummary(snap).WeekCostUSD } func extractAllModels(snap core.UsageSnapshot, provColor lipgloss.Color) []modelCostEntry { diff --git a/internal/tui/detail.go b/internal/tui/detail.go index d0f6f2d..3e26ca9 100644 --- a/internal/tui/detail.go +++ b/internal/tui/detail.go @@ -629,354 +629,6 @@ func renderSectionSparklines(sb *strings.Builder, widget core.DashboardWidget, w } } -func renderModelsSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { - models := core.ExtractAnalyticsModelUsage(snap) - if len(models) == 0 { - return - } - - if len(models) > 8 { - models = models[:8] - } - - items := make([]chartItem, 0, len(models)) - for i, model := range models { - if model.CostUSD <= 0 { - continue - } - subLabel := "" - if i == 0 && model.InputTokens > 0 { - subLabel = formatTokens(model.InputTokens) + " in" - } - items = append(items, chartItem{ - Label: prettifyModelName(model.Name), - Value: model.CostUSD, - Color: stableModelColor(model.Name, snap.ProviderID), - SubLabel: subLabel, - }) - } - - if len(items) > 0 { - labelW := 22 - if w < 55 { - labelW = 16 - } - barW := w - labelW - 20 - if barW < 8 { - barW = 8 - } - if barW > 30 { - barW = 30 - } - sb.WriteString(RenderHBarChart(items, barW, labelW) + "\n") - } - - for _, model := range models { - if model.InputTokens <= 0 && model.OutputTokens <= 0 { - continue - } - sb.WriteString("\n") - sb.WriteString(" " + dimStyle.Render("Token breakdown: "+prettifyModelName(model.Name)) + "\n") - sb.WriteString(RenderTokenBreakdown(model.InputTokens, model.OutputTokens, w-4) + "\n") - break - } -} - -func hasAnalyticsModelData(snap core.UsageSnapshot) bool { - return len(core.ExtractAnalyticsModelUsage(snap)) > 0 -} - -// hasChartableSeries returns true if at least one daily series has >= 2 data points. -func hasChartableSeries(series map[string][]core.TimePoint) bool { - for _, pts := range series { - if len(pts) >= 2 { - return true - } - } - return false -} - -// hasLanguageMetrics checks if the snapshot contains lang_ metric keys. -func hasLanguageMetrics(snap core.UsageSnapshot) bool { - langs, _ := core.ExtractLanguageUsage(snap) - return len(langs) > 0 -} - -func renderLanguagesSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { - langs, _ := core.ExtractLanguageUsage(snap) - if len(langs) == 0 { - return - } - - total := float64(0) - for _, l := range langs { - total += l.Requests - } - if total <= 0 { - return - } - - maxShow := 10 - if len(langs) > maxShow { - langs = langs[:maxShow] - } - - var items []chartItem - for _, l := range langs { - items = append(items, chartItem{ - Label: l.Name, - Value: l.Requests, - Color: stableModelColor("lang:"+l.Name, "languages"), - }) - } - - labelW := 18 - if w < 55 { - labelW = 14 - } - barW := w - labelW - 20 - if barW < 8 { - barW = 8 - } - if barW > 30 { - barW = 30 - } - - for _, item := range items { - pct := item.Value / total * 100 - label := item.Label - if len(label) > labelW { - label = label[:labelW-1] + "…" - } - - barLen := int(item.Value / items[0].Value * float64(barW)) - if barLen < 1 && item.Value > 0 { - barLen = 1 - } - emptyLen := barW - barLen - bar := lipgloss.NewStyle().Foreground(item.Color).Render(strings.Repeat("█", barLen)) - track := lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("░", emptyLen)) - - pctStr := lipgloss.NewStyle().Foreground(item.Color).Render(fmt.Sprintf("%4.1f%%", pct)) - countStr := dimStyle.Render(formatNumber(item.Value) + " req") - - sb.WriteString(fmt.Sprintf(" %s %s%s %s %s\n", - labelStyle.Width(labelW).Render(label), - bar, track, pctStr, countStr)) - } - - if len(langs) > maxShow { - remaining := len(langs) - maxShow - if remaining > 0 { - sb.WriteString(" " + dimStyle.Render(fmt.Sprintf("+ %d more languages", remaining)) + "\n") - } - } -} - -// hasMCPMetrics checks if the snapshot contains any MCP metric keys. -func hasMCPMetrics(snap core.UsageSnapshot) bool { - servers, _ := core.ExtractMCPUsage(snap) - return len(servers) > 0 -} - -// renderMCPSection renders MCP server and function call metrics. -// Uses prettifyMCPServerName/prettifyMCPFunctionName from tiles.go (same package). -func renderMCPSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { - rawServers, _ := core.ExtractMCPUsage(snap) - servers := make([]struct { - name string - calls float64 - funcs []struct { - name string - calls float64 - } - }, 0, len(rawServers)) - for _, rawServer := range rawServers { - server := struct { - name string - calls float64 - funcs []struct { - name string - calls float64 - } - }{ - name: prettifyMCPServerName(rawServer.RawName), - calls: rawServer.Calls, - } - for _, rawFunc := range rawServer.Functions { - server.funcs = append(server.funcs, struct { - name string - calls float64 - }{ - name: prettifyMCPFunctionName(rawFunc.RawName), - calls: rawFunc.Calls, - }) - } - servers = append(servers, server) - } - if len(servers) == 0 { - return - } - - var totalCalls float64 - for _, srv := range servers { - totalCalls += srv.calls - } - if totalCalls <= 0 { - return - } - - // Render stacked bar. - barW := w - 4 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - // Build color map using prettified names (same as tile). - var allEntries []toolMixEntry - for _, srv := range servers { - allEntries = append(allEntries, toolMixEntry{name: srv.name, count: srv.calls}) - } - toolColors := buildToolColorMap(allEntries, snap.AccountID) - - sb.WriteString(fmt.Sprintf(" %s\n", renderToolMixBar(allEntries, totalCalls, barW, toolColors))) - - // Render server + function rows. - for i, srv := range servers { - toolColor := colorForTool(toolColors, srv.name) - colorDot := lipgloss.NewStyle().Foreground(toolColor).Render("■") - serverLabel := fmt.Sprintf("%s %d %s", colorDot, i+1, srv.name) - pct := srv.calls / totalCalls * 100 - valueStr := fmt.Sprintf("%2.0f%% %s calls", pct, shortCompact(srv.calls)) - sb.WriteString(renderDotLeaderRow(serverLabel, valueStr, w-2)) - sb.WriteString("\n") - - // Show up to 8 functions. - maxFuncs := 8 - if len(srv.funcs) < maxFuncs { - maxFuncs = len(srv.funcs) - } - for j := 0; j < maxFuncs; j++ { - fn := srv.funcs[j] - fnLabel := " " + fn.name - fnValue := fmt.Sprintf("%s calls", shortCompact(fn.calls)) - sb.WriteString(renderDotLeaderRow(fnLabel, fnValue, w-2)) - sb.WriteString("\n") - } - if len(srv.funcs) > 8 { - sb.WriteString(dimStyle.Render(fmt.Sprintf(" + %d more functions", len(srv.funcs)-8))) - sb.WriteString("\n") - } - } - - // Footer. - footer := fmt.Sprintf("%d servers · %.0f calls", len(servers), totalCalls) - sb.WriteString(" " + dimStyle.Render(footer) + "\n") -} - -// hasModelCostMetrics checks if the snapshot contains model cost metric keys. -func hasModelCostMetrics(snap core.UsageSnapshot) bool { - for key := range snap.Metrics { - if core.IsModelCostMetricKey(key) { - return true - } - } - return false -} - -// renderTrendsSection renders DailySeries data as a braille chart for the primary series -// and sparklines for secondary series. -func renderTrendsSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { - if len(snap.DailySeries) == 0 { - return - } - - // Pick primary series key. - primaryCandidates := []string{"cost", "tokens_total", "messages", "requests", "sessions"} - primaryKey := "" - for _, key := range primaryCandidates { - if pts, ok := snap.DailySeries[key]; ok && len(pts) >= 2 { - primaryKey = key - break - } - } - - // If no candidate found, pick the first series with enough points. - if primaryKey == "" { - for key, pts := range snap.DailySeries { - if len(pts) >= 2 { - primaryKey = key - break - } - } - } - - if primaryKey == "" { - return - } - - // Render primary series as braille chart. - pts := snap.DailySeries[primaryKey] - yFmt := formatChartValue - if primaryKey == "cost" { - yFmt = formatCostAxis - } - - chartW := w - 4 - if chartW < 30 { - chartW = 30 - } - chartH := 6 - if w < 60 { - chartH = 4 - } - - series := []BrailleSeries{{ - Label: metricLabel(widget, primaryKey), - Color: colorTeal, - Points: pts, - }} - - chart := RenderBrailleChart(metricLabel(widget, primaryKey), series, chartW, chartH, yFmt) - if chart != "" { - sb.WriteString(chart) - } - - // Render remaining series as sparklines. - sparkW := w - 8 - if sparkW < 12 { - sparkW = 12 - } - if sparkW > 60 { - sparkW = 60 - } - - colors := []lipgloss.Color{colorSapphire, colorGreen, colorPeach, colorLavender} - colorIdx := 0 - - for _, candidate := range primaryCandidates { - if candidate == primaryKey { - continue - } - seriesPts, ok := snap.DailySeries[candidate] - if !ok || len(seriesPts) < 2 { - continue - } - values := make([]float64, len(seriesPts)) - for i, p := range seriesPts { - values[i] = p.Value - } - c := colors[colorIdx%len(colors)] - colorIdx++ - spark := RenderSparkline(values, sparkW, c) - label := metricLabel(widget, candidate) - sb.WriteString(fmt.Sprintf(" %s %s\n", dimStyle.Render(label), spark)) - } -} - // filterNonZeroEntries removes entries where all numeric values are nil or zero, // respecting the widget's suppression configuration. func filterNonZeroEntries(entries []metricEntry, widget core.DashboardWidget) []metricEntry { diff --git a/internal/tui/detail_analytics_sections.go b/internal/tui/detail_analytics_sections.go new file mode 100644 index 0000000..49b5b4b --- /dev/null +++ b/internal/tui/detail_analytics_sections.go @@ -0,0 +1,340 @@ +package tui + +import ( + "fmt" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func renderModelsSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { + models := core.ExtractAnalyticsModelUsage(snap) + if len(models) == 0 { + return + } + + if len(models) > 8 { + models = models[:8] + } + + items := make([]chartItem, 0, len(models)) + for i, model := range models { + if model.CostUSD <= 0 { + continue + } + subLabel := "" + if i == 0 && model.InputTokens > 0 { + subLabel = formatTokens(model.InputTokens) + " in" + } + items = append(items, chartItem{ + Label: prettifyModelName(model.Name), + Value: model.CostUSD, + Color: stableModelColor(model.Name, snap.ProviderID), + SubLabel: subLabel, + }) + } + + if len(items) > 0 { + labelW := 22 + if w < 55 { + labelW = 16 + } + barW := w - labelW - 20 + if barW < 8 { + barW = 8 + } + if barW > 30 { + barW = 30 + } + sb.WriteString(RenderHBarChart(items, barW, labelW) + "\n") + } + + for _, model := range models { + if model.InputTokens <= 0 && model.OutputTokens <= 0 { + continue + } + sb.WriteString("\n") + sb.WriteString(" " + dimStyle.Render("Token breakdown: "+prettifyModelName(model.Name)) + "\n") + sb.WriteString(RenderTokenBreakdown(model.InputTokens, model.OutputTokens, w-4) + "\n") + break + } +} + +func hasAnalyticsModelData(snap core.UsageSnapshot) bool { + return len(core.ExtractAnalyticsModelUsage(snap)) > 0 +} + +func hasChartableSeries(series map[string][]core.TimePoint) bool { + for _, pts := range series { + if len(pts) >= 2 { + return true + } + } + return false +} + +func hasLanguageMetrics(snap core.UsageSnapshot) bool { + langs, _ := core.ExtractLanguageUsage(snap) + return len(langs) > 0 +} + +func renderLanguagesSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { + langs, _ := core.ExtractLanguageUsage(snap) + if len(langs) == 0 { + return + } + + total := float64(0) + for _, l := range langs { + total += l.Requests + } + if total <= 0 { + return + } + + maxShow := 10 + if len(langs) > maxShow { + langs = langs[:maxShow] + } + + var items []chartItem + for _, l := range langs { + items = append(items, chartItem{ + Label: l.Name, + Value: l.Requests, + Color: stableModelColor("lang:"+l.Name, "languages"), + }) + } + + labelW := 18 + if w < 55 { + labelW = 14 + } + barW := w - labelW - 20 + if barW < 8 { + barW = 8 + } + if barW > 30 { + barW = 30 + } + + for _, item := range items { + pct := item.Value / total * 100 + label := item.Label + if len(label) > labelW { + label = label[:labelW-1] + "…" + } + + barLen := int(item.Value / items[0].Value * float64(barW)) + if barLen < 1 && item.Value > 0 { + barLen = 1 + } + emptyLen := barW - barLen + bar := lipgloss.NewStyle().Foreground(item.Color).Render(strings.Repeat("█", barLen)) + track := lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("░", emptyLen)) + + pctStr := lipgloss.NewStyle().Foreground(item.Color).Render(fmt.Sprintf("%4.1f%%", pct)) + countStr := dimStyle.Render(formatNumber(item.Value) + " req") + + sb.WriteString(fmt.Sprintf(" %s %s%s %s %s\n", + labelStyle.Width(labelW).Render(label), + bar, track, pctStr, countStr)) + } + + if len(langs) > maxShow { + remaining := len(langs) - maxShow + if remaining > 0 { + sb.WriteString(" " + dimStyle.Render(fmt.Sprintf("+ %d more languages", remaining)) + "\n") + } + } +} + +func hasMCPMetrics(snap core.UsageSnapshot) bool { + servers, _ := core.ExtractMCPUsage(snap) + return len(servers) > 0 +} + +func renderMCPSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { + rawServers, _ := core.ExtractMCPUsage(snap) + servers := make([]struct { + name string + calls float64 + funcs []struct { + name string + calls float64 + } + }, 0, len(rawServers)) + for _, rawServer := range rawServers { + server := struct { + name string + calls float64 + funcs []struct { + name string + calls float64 + } + }{ + name: prettifyMCPServerName(rawServer.RawName), + calls: rawServer.Calls, + } + for _, rawFunc := range rawServer.Functions { + server.funcs = append(server.funcs, struct { + name string + calls float64 + }{ + name: prettifyMCPFunctionName(rawFunc.RawName), + calls: rawFunc.Calls, + }) + } + servers = append(servers, server) + } + if len(servers) == 0 { + return + } + + var totalCalls float64 + for _, srv := range servers { + totalCalls += srv.calls + } + if totalCalls <= 0 { + return + } + + barW := w - 4 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + + var allEntries []toolMixEntry + for _, srv := range servers { + allEntries = append(allEntries, toolMixEntry{name: srv.name, count: srv.calls}) + } + toolColors := buildToolColorMap(allEntries, snap.AccountID) + + sb.WriteString(fmt.Sprintf(" %s\n", renderToolMixBar(allEntries, totalCalls, barW, toolColors))) + + for i, srv := range servers { + toolColor := colorForTool(toolColors, srv.name) + colorDot := lipgloss.NewStyle().Foreground(toolColor).Render("■") + serverLabel := fmt.Sprintf("%s %d %s", colorDot, i+1, srv.name) + pct := srv.calls / totalCalls * 100 + valueStr := fmt.Sprintf("%2.0f%% %s calls", pct, shortCompact(srv.calls)) + sb.WriteString(renderDotLeaderRow(serverLabel, valueStr, w-2)) + sb.WriteString("\n") + + maxFuncs := 8 + if len(srv.funcs) < maxFuncs { + maxFuncs = len(srv.funcs) + } + for j := 0; j < maxFuncs; j++ { + fn := srv.funcs[j] + fnLabel := " " + fn.name + fnValue := fmt.Sprintf("%s calls", shortCompact(fn.calls)) + sb.WriteString(renderDotLeaderRow(fnLabel, fnValue, w-2)) + sb.WriteString("\n") + } + if len(srv.funcs) > 8 { + sb.WriteString(dimStyle.Render(fmt.Sprintf(" + %d more functions", len(srv.funcs)-8))) + sb.WriteString("\n") + } + } + + footer := fmt.Sprintf("%d servers · %.0f calls", len(servers), totalCalls) + sb.WriteString(" " + dimStyle.Render(footer) + "\n") +} + +func hasModelCostMetrics(snap core.UsageSnapshot) bool { + for key := range snap.Metrics { + if core.IsModelCostMetricKey(key) { + return true + } + } + return false +} + +func renderTrendsSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { + if len(snap.DailySeries) == 0 { + return + } + + primaryCandidates := []string{"cost", "tokens_total", "messages", "requests", "sessions"} + primaryKey := "" + for _, key := range primaryCandidates { + if pts, ok := snap.DailySeries[key]; ok && len(pts) >= 2 { + primaryKey = key + break + } + } + + if primaryKey == "" { + for key, pts := range snap.DailySeries { + if len(pts) >= 2 { + primaryKey = key + break + } + } + } + + if primaryKey == "" { + return + } + + pts := snap.DailySeries[primaryKey] + yFmt := formatChartValue + if primaryKey == "cost" { + yFmt = formatCostAxis + } + + chartW := w - 4 + if chartW < 30 { + chartW = 30 + } + chartH := 6 + if w < 60 { + chartH = 4 + } + + series := []BrailleSeries{{ + Label: metricLabel(widget, primaryKey), + Color: colorTeal, + Points: pts, + }} + + chart := RenderBrailleChart(metricLabel(widget, primaryKey), series, chartW, chartH, yFmt) + if chart != "" { + sb.WriteString(chart) + } + + sparkW := w - 8 + if sparkW < 12 { + sparkW = 12 + } + if sparkW > 60 { + sparkW = 60 + } + + colors := []lipgloss.Color{colorSapphire, colorGreen, colorPeach, colorLavender} + colorIdx := 0 + + for _, candidate := range primaryCandidates { + if candidate == primaryKey { + continue + } + seriesPts, ok := snap.DailySeries[candidate] + if !ok || len(seriesPts) < 2 { + continue + } + values := make([]float64, len(seriesPts)) + for i, p := range seriesPts { + values[i] = p.Value + } + c := colors[colorIdx%len(colors)] + colorIdx++ + spark := RenderSparkline(values, sparkW, c) + label := metricLabel(widget, candidate) + sb.WriteString(fmt.Sprintf(" %s %s\n", dimStyle.Render(label), spark)) + } +} From b0fa065c061f578197f1e0c7538e73e04d65ebbf Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 16:08:39 +0100 Subject: [PATCH 18/32] refactor: split usage view aggregate orchestration --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 3 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 33 +++-- internal/telemetry/usage_view.go | 105 +--------------- internal/telemetry/usage_view_aggregate.go | 118 ++++++++++++++++++ 4 files changed, 145 insertions(+), 114 deletions(-) create mode 100644 internal/telemetry/usage_view_aggregate.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 0cf1446..8ae9f7c 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -58,6 +58,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | R38 | Fixed | Shared hook ingest service | `internal/daemon/hook_ingest.go`, `internal/daemon/hook_ingest_local.go`, `internal/daemon/server_http.go`, `internal/daemon/server_spool.go`, `cmd/openusage/telemetry.go` | Hook request parsing and local ingest/spool fallback now live in shared daemon helpers used by CLI fallback, HTTP ingest, and hook-spool replay. The remaining edge code is transport and user messaging only. | Reuse the same helpers if more hook entrypoints are added. | | R39 | Fixed | Usage-view materialization split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_materialize.go` | Temp-table creation/indexing/cleanup and aggregate initialization moved out of the main usage-view orchestration path. | Continue splitting aggregate query fanout if `usage_view.go` grows again. | | R40 | Fixed | Analytics cost fallback extraction | `internal/core/analytics_costs.go`, `internal/tui/analytics_data.go` | Analytics all-time/today/week cost fallback rules now live in shared core logic instead of TUI-owned metric-key decoding. | Continue moving remaining analytics/detail metric decoding into shared extractors. | +| R41 | Fixed | Usage-view aggregate fanout split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_aggregate.go` | Query fanout and aggregate assembly now live in a dedicated helper instead of inline in the main usage-view orchestration path. | Continue splitting only if the aggregate helper grows materially. | ## Action Table @@ -67,7 +68,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/dashboardapp/service.go` | Side effects are injected and some detail logic is split, but TUI state-transition and rendering logic is still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go` | Composition bars, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/codex/codex.go` | Cursor and OpenRouter are now materially decomposed, but several other providers still combine transport, parsing, normalization, and projection in single 1900-2600 LOC files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | -| A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go` | The usage-view code is materially smaller after the helper/projection/query/materialization splits, but the orchestration path still owns query fanout and aggregate assembly in one place. | Continue splitting remaining orchestration concerns and consider a typed intermediate aggregation model. | Easier optimization and safer incremental changes. | +| A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index 4be6d26..b4292d3 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -23,7 +23,7 @@ These were major concerns in earlier reviews and are now materially addressed: - TUI side-effect leakage into config persistence / integration install / provider validation. - Ollama hot-path `time.Now()` usage in behavioral window/reset logic. - Shared hook ingest parsing/local fallback drift between daemon and CLI. -- Usage-view temp-table materialization living inline in the main orchestration path. +- Usage-view temp-table materialization and aggregate query fanout living inline in the main orchestration path. ## Findings @@ -56,7 +56,21 @@ What to address: - Promote remaining analytics/detail extractors into `internal/core`. - Keep renderers as display adapters over typed sections. -### 3. [P2] Several providers are still large mixed-responsibility units +### 3. [P2] Telemetry usage-view orchestration is smaller, but still centralized + +The usage-view path is much cleaner after helper, projection, query, materialization, and aggregate-fanout splits, but the top-level file still coordinates source selection, cache/application flow, and final snapshot application in one place. + +Refs: +- [usage_view.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view.go) +- [usage_view_materialize.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view_materialize.go) +- [usage_view_aggregate.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view_aggregate.go) +- [usage_view_projection.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view_projection.go) + +What to address: +- Keep future telemetry work inside the split helper units. +- Only split the remaining coordinator path further if new behavior starts coupling unrelated concerns again. + +### 4. [P2] Several providers are still large mixed-responsibility units Cursor and OpenRouter are now in much better shape, but several other providers remain monoliths that mix transport, parsing, normalization, and projection in one place. @@ -75,7 +89,7 @@ What to address: - projection helpers - telemetry-specific collectors -### 4. [P3] Ambiguous shared-path local sources still require explicit account disambiguation +### 5. [P3] Ambiguous shared-path local sources still require explicit account disambiguation The daemon now binds local telemetry to configured accounts when the source/account mapping is unambiguous. If multiple accounts share the same source paths, it intentionally degrades to source-scoped attribution instead of silently guessing. That is the correct behavior today, but it means truly ambiguous local multi-account setups still need an explicit binding mechanism if they become a first-class use case. @@ -88,7 +102,7 @@ What to address: - Add persisted source/account alias mapping only if ambiguous local multi-account setups become common. - Keep ambiguous attribution explicit; do not reintroduce silent account guessing. -### 5. [P3] Account config contract cleanup is not finished +### 6. [P3] Account config contract cleanup is not finished The hot-path abuse of `Binary`/`BaseURL` is fixed, but the type still allows path-like runtime hints and canonical provider config to coexist ambiguously. @@ -100,7 +114,7 @@ What to address: - Introduce a dedicated typed runtime-hints structure. - Retire compatibility comments and residual semantic ambiguity in `AccountConfig`. -### 6. [P3] Test suites are strong but still expensive to maintain +### 7. [P3] Test suites are strong but still expensive to maintain Some package tests remain extremely large and inline too much fixture logic. @@ -117,10 +131,11 @@ What to address: ## Recommended Order 1. TUI extractor/decomposition follow-through. -2. Remaining provider monolith splits. -3. Telemetry account identity mapping and daemon follow-through. -4. Account config contract hardening. -5. Test fixture cleanup. +2. Telemetry and TUI decomposition follow-through. +3. Remaining provider monolith splits. +4. Telemetry account identity mapping and daemon follow-through. +5. Account config contract hardening. +6. Test fixture cleanup. ## Notes diff --git a/internal/telemetry/usage_view.go b/internal/telemetry/usage_view.go index 1c4cca0..877c320 100644 --- a/internal/telemetry/usage_view.go +++ b/internal/telemetry/usage_view.go @@ -313,112 +313,9 @@ func loadUsageViewForFilter(ctx context.Context, db *sql.DB, filter usageFilter) if agg.EventCount == 0 { return agg, nil } - - trace := func(label string) func() { - start := time.Now() - return func() { core.Tracef("[usage_view_perf] %s: %dms", label, time.Since(start).Milliseconds()) } - } - - done := trace("queryModelAgg") - models, err := queryModelAgg(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - done = trace("querySourceAgg") - sources, err := querySourceAgg(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - done = trace("queryProjectAgg") - projects, err := queryProjectAgg(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - done = trace("queryToolAgg") - tools, err := queryToolAgg(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - done = trace("queryProviderAgg") - providers, err := queryProviderAgg(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - done = trace("queryLanguageAgg") - languages, err := queryLanguageAgg(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - done = trace("queryActivityAgg") - activity, err := queryActivityAgg(ctx, db, matFilter) - done() - if err != nil { + if err := loadMaterializedUsageAgg(ctx, db, matFilter, agg); err != nil { return nil, err } - done = trace("queryCodeStatsAgg") - codeStats, err := queryCodeStatsAgg(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - done = trace("queryDailyTotals") - daily, err := queryDailyTotals(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - done = trace("queryDailyByDimension(model)") - modelDaily, err := queryDailyByDimension(ctx, db, matFilter, "model") - done() - if err != nil { - return nil, err - } - done = trace("queryDailyByDimension(source)") - sourceDaily, err := queryDailyByDimension(ctx, db, matFilter, "source") - done() - if err != nil { - return nil, err - } - done = trace("queryDailyByDimension(project)") - projectDaily, err := queryDailyByDimension(ctx, db, matFilter, "project") - done() - if err != nil { - return nil, err - } - done = trace("queryDailyByDimension(client)") - clientDaily, err := queryDailyByDimension(ctx, db, matFilter, "client") - done() - if err != nil { - return nil, err - } - done = trace("queryDailyClientTokens") - clientTokens, err := queryDailyClientTokens(ctx, db, matFilter) - done() - if err != nil { - return nil, err - } - - agg.Models = models - agg.Providers = providers - agg.Sources = sources - agg.Projects = projects - agg.Tools = tools - agg.MCPServers = buildMCPAgg(tools) - agg.Languages = languages - agg.Activity = activity - agg.CodeStats = codeStats - agg.Daily = daily - agg.ModelDaily = modelDaily - agg.SourceDaily = sourceDaily - agg.ProjectDaily = projectDaily - agg.ClientDaily = clientDaily - agg.ClientTokens = clientTokens core.Tracef("[usage_view_perf] loadUsageViewForFilter TOTAL: %dms (providers=%v)", time.Since(filterStart).Milliseconds(), filter.ProviderIDs) return agg, nil } diff --git a/internal/telemetry/usage_view_aggregate.go b/internal/telemetry/usage_view_aggregate.go new file mode 100644 index 0000000..3c627dd --- /dev/null +++ b/internal/telemetry/usage_view_aggregate.go @@ -0,0 +1,118 @@ +package telemetry + +import ( + "context" + "database/sql" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func loadMaterializedUsageAgg(ctx context.Context, db *sql.DB, filter usageFilter, agg *telemetryUsageAgg) error { + trace := func(label string) func() { + start := time.Now() + return func() { core.Tracef("[usage_view_perf] %s: %dms", label, time.Since(start).Milliseconds()) } + } + + done := trace("queryModelAgg") + models, err := queryModelAgg(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("querySourceAgg") + sources, err := querySourceAgg(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("queryProjectAgg") + projects, err := queryProjectAgg(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("queryToolAgg") + tools, err := queryToolAgg(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("queryProviderAgg") + providers, err := queryProviderAgg(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("queryLanguageAgg") + languages, err := queryLanguageAgg(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("queryActivityAgg") + activity, err := queryActivityAgg(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("queryCodeStatsAgg") + codeStats, err := queryCodeStatsAgg(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("queryDailyTotals") + daily, err := queryDailyTotals(ctx, db, filter) + done() + if err != nil { + return err + } + done = trace("queryDailyByDimension(model)") + modelDaily, err := queryDailyByDimension(ctx, db, filter, "model") + done() + if err != nil { + return err + } + done = trace("queryDailyByDimension(source)") + sourceDaily, err := queryDailyByDimension(ctx, db, filter, "source") + done() + if err != nil { + return err + } + done = trace("queryDailyByDimension(project)") + projectDaily, err := queryDailyByDimension(ctx, db, filter, "project") + done() + if err != nil { + return err + } + done = trace("queryDailyByDimension(client)") + clientDaily, err := queryDailyByDimension(ctx, db, filter, "client") + done() + if err != nil { + return err + } + done = trace("queryDailyClientTokens") + clientTokens, err := queryDailyClientTokens(ctx, db, filter) + done() + if err != nil { + return err + } + + agg.Models = models + agg.Providers = providers + agg.Sources = sources + agg.Projects = projects + agg.Tools = tools + agg.MCPServers = buildMCPAgg(tools) + agg.Languages = languages + agg.Activity = activity + agg.CodeStats = codeStats + agg.Daily = daily + agg.ModelDaily = modelDaily + agg.SourceDaily = sourceDaily + agg.ProjectDaily = projectDaily + agg.ClientDaily = clientDaily + agg.ClientTokens = clientTokens + return nil +} From 3868806141c6025ed01e3b912ca6d25d6cbc9cc5 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 16:29:47 +0100 Subject: [PATCH 19/32] refactor: split provider display info and share fallback metrics --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 5 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 6 +- internal/core/dashboard_display_metrics.go | 94 +++ .../core/dashboard_display_metrics_test.go | 38 ++ internal/tui/model.go | 592 ------------------ internal/tui/model_display_info.go | 547 ++++++++++++++++ 6 files changed, 686 insertions(+), 596 deletions(-) create mode 100644 internal/core/dashboard_display_metrics.go create mode 100644 internal/core/dashboard_display_metrics_test.go create mode 100644 internal/tui/model_display_info.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 8ae9f7c..25d12ec 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -59,14 +59,15 @@ This table captures every issue found in this pass. It is broad and high-signal, | R39 | Fixed | Usage-view materialization split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_materialize.go` | Temp-table creation/indexing/cleanup and aggregate initialization moved out of the main usage-view orchestration path. | Continue splitting aggregate query fanout if `usage_view.go` grows again. | | R40 | Fixed | Analytics cost fallback extraction | `internal/core/analytics_costs.go`, `internal/tui/analytics_data.go` | Analytics all-time/today/week cost fallback rules now live in shared core logic instead of TUI-owned metric-key decoding. | Continue moving remaining analytics/detail metric decoding into shared extractors. | | R41 | Fixed | Usage-view aggregate fanout split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_aggregate.go` | Query fanout and aggregate assembly now live in a dedicated helper instead of inline in the main usage-view orchestration path. | Continue splitting only if the aggregate helper grows materially. | +| R42 | Fixed | Provider display-info split and shared fallback metric helpers | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/core/dashboard_display_metrics.go` | Provider tile display-summary logic moved out of the main TUI model file, and fallback/rate-limit metric selection now lives in shared core helpers instead of ad hoc TUI parsing. | Continue moving the remaining analytics/detail-specific metric decoding into shared extractors. | ## Action Table | ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | -| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/dashboardapp/service.go` | Side effects are injected and some detail logic is split, but TUI state-transition and rendering logic is still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go` | Composition bars, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, and more detail logic is isolated, but TUI state-transition and rendering logic are still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | +| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Composition bars, provider tile fallback/rate-limit selection, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/codex/codex.go` | Cursor and OpenRouter are now materially decomposed, but several other providers still combine transport, parsing, normalization, and projection in single 1900-2600 LOC files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index b4292d3..004b09a 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -29,10 +29,11 @@ These were major concerns in earlier reviews and are now materially addressed: ### 1. [P2] TUI rendering and state handling are still concentrated in a few very large files -The TUI is much better than before, but [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go), and [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) are still large enough that unrelated concerns move together. +The TUI is much better than before, and provider tile display-summary logic no longer lives inline in `model.go`, but [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go), and [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) are still large enough that unrelated concerns move together. Refs: - [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go) +- [model_display_info.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model_display_info.go) - [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go) - [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go) - [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) @@ -44,13 +45,14 @@ What to address: ### 2. [P2] Some analytics/detail sections still decode raw metric-key conventions in UI code -The major composition and token-table paths now use shared extractors, but analytics/detail still contain pockets of renderer-owned key interpretation. That is better than before, but it is still a drift vector. +The major composition paths, provider tile fallback/rate-limit selection, and token-table paths now use shared extractors, but analytics/detail still contain pockets of renderer-owned key interpretation. That is better than before, but it is still a drift vector. Refs: - [analytics.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/analytics.go) - [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go) - [usage_breakdowns.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/usage_breakdowns.go) - [analytics_snapshot.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/analytics_snapshot.go) +- [dashboard_display_metrics.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/dashboard_display_metrics.go) What to address: - Promote remaining analytics/detail extractors into `internal/core`. diff --git a/internal/core/dashboard_display_metrics.go b/internal/core/dashboard_display_metrics.go new file mode 100644 index 0000000..1932ed0 --- /dev/null +++ b/internal/core/dashboard_display_metrics.go @@ -0,0 +1,94 @@ +package core + +import ( + "cmp" + "slices" + "strings" +) + +type RateLimitDisplayMetric struct { + Key string + LabelKey string + UsedPercent float64 + UsesRemainingPercent bool + RemainingPercent float64 +} + +func ExtractRateLimitDisplayMetrics(metrics map[string]Metric) []RateLimitDisplayMetric { + out := make([]RateLimitDisplayMetric, 0, len(metrics)) + for key, metric := range metrics { + labelKey, ok := rateLimitLabelKey(key) + if !ok { + continue + } + usedPercent := MetricUsedPercent(key, metric) + if usedPercent < 0 && strings.HasPrefix(key, "rate_limit_") && metric.Unit == "%" && metric.Remaining != nil { + usedPercent = 100 - *metric.Remaining + } + if usedPercent < 0 { + continue + } + entry := RateLimitDisplayMetric{ + Key: key, + LabelKey: labelKey, + UsedPercent: usedPercent, + } + if strings.HasPrefix(key, "rate_limit_") && metric.Unit == "%" && metric.Remaining != nil { + entry.UsesRemainingPercent = true + entry.RemainingPercent = *metric.Remaining + } + out = append(out, entry) + } + slices.SortFunc(out, func(a, b RateLimitDisplayMetric) int { + return cmp.Compare(a.Key, b.Key) + }) + return out +} + +func FallbackDisplayMetricKeys(metrics map[string]Metric) []string { + keys := make([]string, 0, len(metrics)) + for key := range metrics { + keys = append(keys, key) + } + slices.Sort(keys) + if len(keys) == 0 { + return nil + } + + filtered := make([]string, 0, len(keys)) + for _, key := range keys { + if hasDisplayExcludedPrefix(key) { + continue + } + filtered = append(filtered, key) + } + if len(filtered) > 0 { + return filtered + } + return keys +} + +func hasDisplayExcludedPrefix(key string) bool { + for _, prefix := range []string{ + "model_", "client_", "tool_", "source_", + "usage_model_", "usage_source_", "usage_client_", + "tokens_client_", "analytics_", + } { + if strings.HasPrefix(key, prefix) { + return true + } + } + return false +} + +func rateLimitLabelKey(key string) (string, bool) { + switch key { + case "rpm", "tpm", "rpd", "tpd": + return key, true + } + if strings.HasPrefix(key, "rate_limit_") { + labelKey := strings.TrimSpace(strings.TrimPrefix(key, "rate_limit_")) + return labelKey, labelKey != "" + } + return "", false +} diff --git a/internal/core/dashboard_display_metrics_test.go b/internal/core/dashboard_display_metrics_test.go new file mode 100644 index 0000000..67b3b11 --- /dev/null +++ b/internal/core/dashboard_display_metrics_test.go @@ -0,0 +1,38 @@ +package core + +import "testing" + +func TestExtractRateLimitDisplayMetrics(t *testing.T) { + remaining := 60.0 + limit := 100.0 + used := 25.0 + metrics := map[string]Metric{ + "rate_limit_primary": {Remaining: &remaining, Unit: "%"}, + "rpm": {Limit: &limit, Used: &used}, + "tokens_total": {Used: Float64Ptr(12)}, + } + + got := ExtractRateLimitDisplayMetrics(metrics) + if len(got) != 2 { + t.Fatalf("len = %d, want 2", len(got)) + } + if got[0].LabelKey != "primary" || !got[0].UsesRemainingPercent { + t.Fatalf("first = %+v, want primary remaining metric", got[0]) + } + if got[1].LabelKey != "rpm" || got[1].UsedPercent != 25 { + t.Fatalf("second = %+v, want rpm used=25", got[1]) + } +} + +func TestFallbackDisplayMetricKeys(t *testing.T) { + metrics := map[string]Metric{ + "usage_model_sonnet": {Used: Float64Ptr(1)}, + "messages_today": {Used: Float64Ptr(2)}, + "analytics_score": {Used: Float64Ptr(3)}, + } + + got := FallbackDisplayMetricKeys(metrics) + if len(got) != 1 || got[0] != "messages_today" { + t.Fatalf("got = %#v, want [messages_today]", got) + } +} diff --git a/internal/tui/model.go b/internal/tui/model.go index 95ff964..4d00d87 100644 --- a/internal/tui/model.go +++ b/internal/tui/model.go @@ -793,598 +793,6 @@ func (m Model) renderListItem(snap core.UsageSnapshot, selected bool, w int) str return result } -type providerDisplayInfo struct { - tagEmoji string // "💰", "⚡", "🔑", "⚠", "◇" - tagLabel string // "Credits", "Usage", "Error", "Auth", "N/A" - summary string // Primary summary (e.g. "$4.23 today · $0.82/h") - detail string // Secondary detail (e.g. "Primary 3% · Secondary 15%") - gaugePercent float64 // 0-100 used %. -1 if not applicable. - reason string // Decision branch name for diagnostics (e.g. "usage_five_hour", "spend_limit") -} - -func computeDisplayInfo(snap core.UsageSnapshot, widget core.DashboardWidget) providerDisplayInfo { - return normalizeProviderDisplayInfoType(computeDisplayInfoRaw(snap, widget)) -} - -func normalizeProviderDisplayInfoType(info providerDisplayInfo) providerDisplayInfo { - switch info.tagLabel { - case "Credits": - info.tagEmoji = "💰" - case "Usage": - info.tagEmoji = "⚡" - case "Error", "Auth", "N/A", "": - // Status and empty labels are allowed as-is. - default: - // Enforce only two billing types for provider tags. - info.tagLabel = "Usage" - info.tagEmoji = "⚡" - } - return info -} - -func computeDisplayInfoRaw(snap core.UsageSnapshot, widget core.DashboardWidget) providerDisplayInfo { - info := providerDisplayInfo{gaugePercent: -1} - - switch snap.Status { - case core.StatusError: - info.tagEmoji = "⚠" - info.tagLabel = "Error" - info.reason = "status_error" - msg := snap.Message - if len(msg) > 50 { - msg = msg[:47] + "..." - } - if msg == "" { - msg = "Error" - } - info.summary = msg - core.Tracef("[display] %s: branch=status_error", snap.ProviderID) - return info - case core.StatusAuth: - info.tagEmoji = "🔑" - info.tagLabel = "Auth" - info.reason = "status_auth" - info.summary = "Authentication required" - core.Tracef("[display] %s: branch=status_auth", snap.ProviderID) - return info - case core.StatusUnsupported: - info.tagEmoji = "◇" - info.tagLabel = "N/A" - info.reason = "status_unsupported" - info.summary = "Not supported" - core.Tracef("[display] %s: branch=status_unsupported", snap.ProviderID) - return info - } - - core.Tracef("[display] %s: checking metrics (%d total), has usage_five_hour=%v, has today_api_cost=%v, has spend_limit=%v", - snap.ProviderID, len(snap.Metrics), - snap.Metrics["usage_five_hour"].Used != nil, - snap.Metrics["today_api_cost"].Used != nil, - snap.Metrics["spend_limit"].Limit != nil) - - if m, ok := snap.Metrics["spend_limit"]; ok && m.Limit != nil && m.Used != nil { - remaining := *m.Limit - *m.Used - if m.Remaining != nil { - remaining = *m.Remaining - } - info.tagEmoji = "💰" - info.tagLabel = "Credits" - info.reason = "spend_limit" - info.summary = fmt.Sprintf("$%.0f / $%.0f spent", *m.Used, *m.Limit) - info.detail = fmt.Sprintf("$%.0f remaining", remaining) - // Add self vs team breakdown when individual spend is available - if indiv, ok2 := snap.Metrics["individual_spend"]; ok2 && indiv.Used != nil { - otherSpend := *m.Used - *indiv.Used - if otherSpend < 0 { - otherSpend = 0 - } - info.detail = fmt.Sprintf("you $%.0f · team $%.0f · $%.0f remaining", *indiv.Used, otherSpend, remaining) - } - if pct := m.Percent(); pct >= 0 { - info.gaugePercent = 100 - pct - } - core.Tracef("[display] %s: branch=spend_limit used=%.2f limit=%.2f gauge=%.1f", snap.ProviderID, *m.Used, *m.Limit, info.gaugePercent) - return info - } - - if m, ok := snap.Metrics["plan_spend"]; ok && m.Used != nil && m.Limit != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - info.summary = fmt.Sprintf("$%.0f / $%.0f plan", *m.Used, *m.Limit) - if pct := m.Percent(); pct >= 0 { - info.gaugePercent = 100 - pct - } - if pu, ok2 := snap.Metrics["plan_percent_used"]; ok2 && pu.Used != nil { - info.detail = fmt.Sprintf("%.0f%% plan used", *pu.Used) - } - return info - } - - if m, ok := snap.Metrics["plan_total_spend_usd"]; ok && m.Used != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - if lm, ok2 := snap.Metrics["plan_limit_usd"]; ok2 && lm.Limit != nil { - info.summary = fmt.Sprintf("$%.2f / $%.0f plan", *m.Used, *lm.Limit) - } else { - info.summary = fmt.Sprintf("$%.2f spent", *m.Used) - } - return info - } - - // Style hooks for richer credit summaries. - if widget.DisplayStyle == core.DashboardDisplayStyleDetailedCredits { - return computeDetailedCreditsDisplayInfo(snap, info) - } - - if m, ok := snap.Metrics["credits"]; ok { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - if m.Remaining != nil && m.Limit != nil { - info.summary = fmt.Sprintf("$%.2f / $%.2f credits", *m.Remaining, *m.Limit) - if pct := m.Percent(); pct >= 0 { - info.gaugePercent = 100 - pct - } - } else if m.Used != nil { - info.summary = fmt.Sprintf("$%.4f used", *m.Used) - } else { - info.summary = "Credits available" - } - return info - } - if m, ok := snap.Metrics["credit_balance"]; ok && m.Remaining != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - if m.Limit != nil { - info.summary = fmt.Sprintf("$%.2f / $%.2f", *m.Remaining, *m.Limit) - if pct := m.Percent(); pct >= 0 { - info.gaugePercent = 100 - pct - } - } else { - info.summary = fmt.Sprintf("$%.2f balance", *m.Remaining) - } - return info - } - if m, ok := snap.Metrics["total_balance"]; ok && m.Remaining != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - info.summary = fmt.Sprintf("%.2f %s available", *m.Remaining, m.Unit) - return info - } - - quotaKey := "" - for _, key := range []string{"quota_pro", "quota", "quota_flash"} { - if _, ok := snap.Metrics[key]; ok { - quotaKey = key - break - } - } - if quotaKey != "" { - m := snap.Metrics[quotaKey] - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - if pct := core.MetricUsedPercent(quotaKey, m); pct >= 0 { - info.gaugePercent = pct - info.summary = fmt.Sprintf("%.0f%% usage used", pct) - } - if m.Remaining != nil { - info.detail = fmt.Sprintf("%.0f%% usage left", *m.Remaining) - } - return info - } - - if m, ok := snap.Metrics["context_window"]; ok && m.Used != nil && m.Limit != nil { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - if pct := m.Percent(); pct >= 0 { - info.gaugePercent = pct - info.summary = fmt.Sprintf("%.0f%% usage used", pct) - } - info.detail = fmt.Sprintf("%s / %s tokens", shortCompact(*m.Used), shortCompact(*m.Limit)) - return info - } - - hasRateLimits := false - worstRatePct := float64(100) - var rateParts []string - for key, m := range snap.Metrics { - isRate := strings.HasPrefix(key, "rate_limit_") || - key == "rpm" || key == "tpm" || key == "rpd" || key == "tpd" - if !isRate { - continue - } - hasRateLimits = true - pct := m.Percent() - if pct >= 0 && pct < worstRatePct { - worstRatePct = pct - } - if m.Unit == "%" && m.Remaining != nil { - label := metricLabel(widget, strings.TrimPrefix(key, "rate_limit_")) - rateParts = append(rateParts, fmt.Sprintf("%s %.0f%%", label, 100-*m.Remaining)) - } else if pct >= 0 { - label := strings.ToUpper(key) - rateParts = append(rateParts, fmt.Sprintf("%s %.0f%%", label, 100-pct)) - } - } - if hasRateLimits { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - info.gaugePercent = 100 - worstRatePct - info.summary = fmt.Sprintf("%.0f%% used", 100-worstRatePct) - if len(rateParts) > 0 { - sort.Strings(rateParts) - info.detail = strings.Join(rateParts, " · ") - } - return info - } - - if fh, ok := snap.Metrics["usage_five_hour"]; ok && fh.Used != nil { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - info.reason = "usage_five_hour" - - info.gaugePercent = *fh.Used - parts := []string{fmt.Sprintf("5h %.0f%%", *fh.Used)} - - if sd, ok2 := snap.Metrics["usage_seven_day"]; ok2 && sd.Used != nil { - parts = append(parts, fmt.Sprintf("7d %.0f%%", *sd.Used)) - if *sd.Used > info.gaugePercent { - info.gaugePercent = *sd.Used - } - } - info.summary = strings.Join(parts, " · ") - - var detailParts []string - if dc, ok2 := snap.Metrics["today_api_cost"]; ok2 && dc.Used != nil { - tag := metricWindowTag(dc) - if tag != "" { - detailParts = append(detailParts, fmt.Sprintf("~$%.2f %s", *dc.Used, tag)) - } else { - detailParts = append(detailParts, fmt.Sprintf("~$%.2f", *dc.Used)) - } - } - if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("$%.2f/h", *br.Used)) - } - info.detail = strings.Join(detailParts, " · ") - core.Tracef("[display] %s: branch=usage_five_hour used=%.1f gauge=%.1f → tag=Usage", snap.ProviderID, *fh.Used, info.gaugePercent) - return info - } - - // Billing block fallback: JSONL data confirms a 5h billing block exists - // but Usage API percentage is unavailable. Classify as "Usage" (not "Credits"). - if _, hasBillingBlock := snap.Resets["billing_block"]; hasBillingBlock { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - info.reason = "billing_block_fallback" - - var parts []string - if dc, ok2 := snap.Metrics["today_api_cost"]; ok2 && dc.Used != nil { - tag := metricWindowTag(dc) - if tag != "" { - parts = append(parts, fmt.Sprintf("~$%.2f %s", *dc.Used, tag)) - } else { - parts = append(parts, fmt.Sprintf("~$%.2f", *dc.Used)) - } - } - if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { - parts = append(parts, fmt.Sprintf("$%.2f/h", *br.Used)) - } - info.summary = strings.Join(parts, " · ") - - var detailParts []string - if bc, ok2 := snap.Metrics["5h_block_cost"]; ok2 && bc.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("~$%.2f 5h block", *bc.Used)) - } - if wc, ok2 := snap.Metrics["7d_api_cost"]; ok2 && wc.Used != nil { - tag := metricWindowTag(wc) - if tag != "" { - detailParts = append(detailParts, fmt.Sprintf("~$%.2f/%s", *wc.Used, tag)) - } else { - detailParts = append(detailParts, fmt.Sprintf("~$%.2f", *wc.Used)) - } - } - if msgs, ok2 := snap.Metrics["messages_today"]; ok2 && msgs.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("%.0f msgs", *msgs.Used)) - } - if sess, ok2 := snap.Metrics["sessions_today"]; ok2 && sess.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("%.0f sessions", *sess.Used)) - } - info.detail = strings.Join(detailParts, " · ") - core.Tracef("[display] %s: branch=billing_block_fallback → tag=Usage", snap.ProviderID) - return info - } - - if m, ok := snap.Metrics["today_api_cost"]; ok && m.Used != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - info.reason = "today_api_cost" - core.Tracef("[display] %s: branch=today_api_cost used=%.2f → tag=Credits", snap.ProviderID, *m.Used) - tag := metricWindowTag(m) - var costLabel string - if tag != "" { - costLabel = fmt.Sprintf("~$%.2f %s", *m.Used, tag) - } else { - costLabel = fmt.Sprintf("~$%.2f", *m.Used) - } - parts := []string{costLabel} - if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { - parts = append(parts, fmt.Sprintf("$%.2f/h", *br.Used)) - } - info.summary = strings.Join(parts, " · ") - - var detailParts []string - if bc, ok2 := snap.Metrics["5h_block_cost"]; ok2 && bc.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("~$%.2f 5h block", *bc.Used)) - } - if wc, ok2 := snap.Metrics["7d_api_cost"]; ok2 && wc.Used != nil { - wcTag := metricWindowTag(wc) - if wcTag != "" { - detailParts = append(detailParts, fmt.Sprintf("~$%.2f/%s", *wc.Used, wcTag)) - } else { - detailParts = append(detailParts, fmt.Sprintf("~$%.2f", *wc.Used)) - } - } - if msgs, ok2 := snap.Metrics["messages_today"]; ok2 && msgs.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("%.0f msgs", *msgs.Used)) - } - if sess, ok2 := snap.Metrics["sessions_today"]; ok2 && sess.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("%.0f sessions", *sess.Used)) - } - info.detail = strings.Join(detailParts, " · ") - return info - } - - if m, ok := snap.Metrics["5h_block_cost"]; ok && m.Used != nil { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - info.summary = fmt.Sprintf("~$%.2f / 5h block", *m.Used) - if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { - info.detail = fmt.Sprintf("$%.2f/h burn rate", *br.Used) - } - return info - } - - hasUsage := false - worstUsagePct := float64(100) - var usageKey string - usageKeys := sortedMetricKeys(snap.Metrics) - for _, key := range usageKeys { - m := snap.Metrics[key] - pct := m.Percent() - if pct >= 0 { - hasUsage = true - if pct < worstUsagePct { - worstUsagePct = pct - usageKey = key - } - } - } - if hasUsage { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - info.gaugePercent = 100 - worstUsagePct - info.summary = fmt.Sprintf("%.0f%% used", 100-worstUsagePct) - if snap.ProviderID == "gemini_cli" { - if m, ok := snap.Metrics["total_conversations"]; ok && m.Used != nil { - info.detail = fmt.Sprintf("%.0f conversations", *m.Used) - return info - } - if m, ok := snap.Metrics["messages_today"]; ok && m.Used != nil { - info.detail = fmt.Sprintf("%.0f msgs today", *m.Used) - return info - } - return info - } - if usageKey != "" { - qm := snap.Metrics[usageKey] - parts := []string{metricLabel(widget, usageKey)} - if qm.Window != "" && qm.Window != "all_time" && qm.Window != "current_period" { - parts = append(parts, qm.Window) - } - info.detail = strings.Join(parts, " · ") - } - return info - } - - if m, ok := snap.Metrics["total_cost_usd"]; ok && m.Used != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - info.summary = fmt.Sprintf("$%.2f total", *m.Used) - return info - } - if m, ok := snap.Metrics["all_time_api_cost"]; ok && m.Used != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - info.summary = fmt.Sprintf("~$%.2f total (API est.)", *m.Used) - return info - } - - if m, ok := snap.Metrics["messages_today"]; ok && m.Used != nil { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - info.summary = fmt.Sprintf("%.0f msgs today", *m.Used) - var detailParts []string - if tc, ok2 := snap.Metrics["tool_calls_today"]; ok2 && tc.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("%.0f tools", *tc.Used)) - } - if sc, ok2 := snap.Metrics["sessions_today"]; ok2 && sc.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("%.0f sessions", *sc.Used)) - } - info.detail = strings.Join(detailParts, " · ") - return info - } - - for _, key := range fallbackDisplayMetricKeys(snap.Metrics) { - m := snap.Metrics[key] - if m.Used != nil { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - info.summary = fmt.Sprintf("%s: %s %s", metricLabel(widget, key), formatNumber(*m.Used), m.Unit) - return info - } - } - - if snap.Message != "" { - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - msg := snap.Message - if len(msg) > 50 { - msg = msg[:47] + "..." - } - info.summary = msg - return info - } - - info.tagEmoji = "⚡" - info.tagLabel = "Usage" - if snap.Status == core.StatusUnknown { - info.summary = "Syncing telemetry..." - } else { - info.summary = string(snap.Status) - } - return info -} - -func fallbackDisplayMetricKeys(metrics map[string]core.Metric) []string { - keys := sortedMetricKeys(metrics) - if len(keys) == 0 { - return nil - } - - excludePrefixes := []string{ - "model_", "client_", "tool_", "source_", - "usage_model_", "usage_source_", "usage_client_", - "tokens_client_", "analytics_", - } - filtered := lo.Filter(keys, func(key string, _ int) bool { - return !lo.SomeBy(excludePrefixes, func(prefix string) bool { - return strings.HasPrefix(key, prefix) - }) - }) - if len(filtered) > 0 { - return filtered - } - return keys -} - -// computeDetailedCreditsDisplayInfo renders a richer credits summary/detail view -// for providers that expose both balance and usage dimensions. -func computeDetailedCreditsDisplayInfo(snap core.UsageSnapshot, info providerDisplayInfo) providerDisplayInfo { - // Prefer account-level purchased credits when available. - if m, ok := snap.Metrics["credit_balance"]; ok && m.Limit != nil && m.Remaining != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - spent := *m.Limit - *m.Remaining - if m.Used != nil { - spent = *m.Used - } - info.summary = fmt.Sprintf("$%.2f / $%.2f spent", spent, *m.Limit) - if pct := m.Percent(); pct >= 0 { - info.gaugePercent = 100 - pct - } - - detailParts := []string{fmt.Sprintf("$%.2f remaining", *m.Remaining)} - if dc, ok2 := snap.Metrics["today_cost"]; ok2 && dc.Used != nil { - tag := metricWindowTag(dc) - if tag != "" { - detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *dc.Used)) - } else { - detailParts = append(detailParts, fmt.Sprintf("$%.2f", *dc.Used)) - } - } else if dc, ok2 := snap.Metrics["usage_daily"]; ok2 && dc.Used != nil { - tag := metricWindowTag(dc) - if tag != "" { - detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *dc.Used)) - } else { - detailParts = append(detailParts, fmt.Sprintf("$%.2f", *dc.Used)) - } - } - if wc, ok2 := snap.Metrics["7d_api_cost"]; ok2 && wc.Used != nil { - tag := metricWindowTag(wc) - if tag != "" { - detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *wc.Used)) - } else { - detailParts = append(detailParts, fmt.Sprintf("$%.2f", *wc.Used)) - } - } else if wc, ok2 := snap.Metrics["usage_weekly"]; ok2 && wc.Used != nil { - tag := metricWindowTag(wc) - if tag != "" { - detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *wc.Used)) - } else { - detailParts = append(detailParts, fmt.Sprintf("$%.2f", *wc.Used)) - } - } - if models := snapshotMeta(snap, "activity_models"); models != "" { - detailParts = append(detailParts, fmt.Sprintf("%s models", models)) - } - info.detail = strings.Join(detailParts, " · ") - return info - } - - // Fallback to key-level credits/usage. - if m, ok := snap.Metrics["credits"]; ok && m.Used != nil { - info.tagEmoji = "💰" - info.tagLabel = "Credits" - info.summary = fmt.Sprintf("$%.4f used", *m.Used) - - var detailParts []string - if daily, ok := snap.Metrics["usage_daily"]; ok && daily.Used != nil { - tag := metricWindowTag(daily) - if tag != "" { - detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *daily.Used)) - } else { - detailParts = append(detailParts, fmt.Sprintf("$%.2f", *daily.Used)) - } - } - if byok, ok := snap.Metrics["byok_daily"]; ok && byok.Used != nil && *byok.Used > 0 { - detailParts = append(detailParts, fmt.Sprintf("BYOK $%.2f", *byok.Used)) - } - if burn, ok := snap.Metrics["burn_rate"]; ok && burn.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("$%.2f/h", *burn.Used)) - } - if models := snapshotMeta(snap, "activity_models"); models != "" { - detailParts = append(detailParts, fmt.Sprintf("%s models", models)) - } - info.detail = strings.Join(detailParts, " · ") - return info - } - - // Fallback to generic - info.tagEmoji = "💰" - info.tagLabel = "Credits" - info.summary = "Connected" - return info -} - -// windowActivityLine returns a subtle summary of time-windowed telemetry activity. -// Returns "" when there is no telemetry data for the current window. -func windowActivityLine(snap core.UsageSnapshot, tw core.TimeWindow) string { - var parts []string - if m, ok := snap.Metrics["window_requests"]; ok && m.Used != nil && *m.Used > 0 { - parts = append(parts, fmt.Sprintf("%.0f reqs", *m.Used)) - } - if m, ok := snap.Metrics["window_cost"]; ok && m.Used != nil && *m.Used > 0.001 { - parts = append(parts, fmt.Sprintf("$%.2f", *m.Used)) - } - if m, ok := snap.Metrics["window_tokens"]; ok && m.Used != nil && *m.Used > 0 { - parts = append(parts, shortCompact(*m.Used)+" tok") - } - if len(parts) == 0 { - return "" - } - return strings.Join(parts, " · ") + " in " + tw.Label() -} - -// metricWindowTag returns a short display label from a metric's Window field. -// For example "1d" → "1d", "7d" → "7d", "30d" → "30d", "all" → "all", "" → "". -func metricWindowTag(met core.Metric) string { - w := strings.TrimSpace(met.Window) - if w == "" { - return "" - } - return w -} - func (m Model) renderDetailPanel(w, h int) string { ids := m.filteredIDs() if len(ids) == 0 || m.cursor >= len(ids) { diff --git a/internal/tui/model_display_info.go b/internal/tui/model_display_info.go new file mode 100644 index 0000000..c0dea22 --- /dev/null +++ b/internal/tui/model_display_info.go @@ -0,0 +1,547 @@ +package tui + +import ( + "fmt" + "sort" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +type providerDisplayInfo struct { + tagEmoji string + tagLabel string + summary string + detail string + gaugePercent float64 + reason string +} + +func computeDisplayInfo(snap core.UsageSnapshot, widget core.DashboardWidget) providerDisplayInfo { + return normalizeProviderDisplayInfoType(computeDisplayInfoRaw(snap, widget)) +} + +func normalizeProviderDisplayInfoType(info providerDisplayInfo) providerDisplayInfo { + switch info.tagLabel { + case "Credits": + info.tagEmoji = "💰" + case "Usage": + info.tagEmoji = "⚡" + case "Error", "Auth", "N/A", "": + default: + info.tagLabel = "Usage" + info.tagEmoji = "⚡" + } + return info +} + +func computeDisplayInfoRaw(snap core.UsageSnapshot, widget core.DashboardWidget) providerDisplayInfo { + info := providerDisplayInfo{gaugePercent: -1} + + switch snap.Status { + case core.StatusError: + info.tagEmoji = "⚠" + info.tagLabel = "Error" + info.reason = "status_error" + msg := snap.Message + if len(msg) > 50 { + msg = msg[:47] + "..." + } + if msg == "" { + msg = "Error" + } + info.summary = msg + core.Tracef("[display] %s: branch=status_error", snap.ProviderID) + return info + case core.StatusAuth: + info.tagEmoji = "🔑" + info.tagLabel = "Auth" + info.reason = "status_auth" + info.summary = "Authentication required" + core.Tracef("[display] %s: branch=status_auth", snap.ProviderID) + return info + case core.StatusUnsupported: + info.tagEmoji = "◇" + info.tagLabel = "N/A" + info.reason = "status_unsupported" + info.summary = "Not supported" + core.Tracef("[display] %s: branch=status_unsupported", snap.ProviderID) + return info + } + + core.Tracef("[display] %s: checking metrics (%d total), has usage_five_hour=%v, has today_api_cost=%v, has spend_limit=%v", + snap.ProviderID, len(snap.Metrics), + snap.Metrics["usage_five_hour"].Used != nil, + snap.Metrics["today_api_cost"].Used != nil, + snap.Metrics["spend_limit"].Limit != nil) + + if m, ok := snap.Metrics["spend_limit"]; ok && m.Limit != nil && m.Used != nil { + remaining := *m.Limit - *m.Used + if m.Remaining != nil { + remaining = *m.Remaining + } + info.tagEmoji = "💰" + info.tagLabel = "Credits" + info.reason = "spend_limit" + info.summary = fmt.Sprintf("$%.0f / $%.0f spent", *m.Used, *m.Limit) + info.detail = fmt.Sprintf("$%.0f remaining", remaining) + if indiv, ok2 := snap.Metrics["individual_spend"]; ok2 && indiv.Used != nil { + otherSpend := *m.Used - *indiv.Used + if otherSpend < 0 { + otherSpend = 0 + } + info.detail = fmt.Sprintf("you $%.0f · team $%.0f · $%.0f remaining", *indiv.Used, otherSpend, remaining) + } + if pct := m.Percent(); pct >= 0 { + info.gaugePercent = 100 - pct + } + core.Tracef("[display] %s: branch=spend_limit used=%.2f limit=%.2f gauge=%.1f", snap.ProviderID, *m.Used, *m.Limit, info.gaugePercent) + return info + } + + if m, ok := snap.Metrics["plan_spend"]; ok && m.Used != nil && m.Limit != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + info.summary = fmt.Sprintf("$%.0f / $%.0f plan", *m.Used, *m.Limit) + if pct := m.Percent(); pct >= 0 { + info.gaugePercent = 100 - pct + } + if pu, ok2 := snap.Metrics["plan_percent_used"]; ok2 && pu.Used != nil { + info.detail = fmt.Sprintf("%.0f%% plan used", *pu.Used) + } + return info + } + + if m, ok := snap.Metrics["plan_total_spend_usd"]; ok && m.Used != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + if lm, ok2 := snap.Metrics["plan_limit_usd"]; ok2 && lm.Limit != nil { + info.summary = fmt.Sprintf("$%.2f / $%.0f plan", *m.Used, *lm.Limit) + } else { + info.summary = fmt.Sprintf("$%.2f spent", *m.Used) + } + return info + } + + if widget.DisplayStyle == core.DashboardDisplayStyleDetailedCredits { + return computeDetailedCreditsDisplayInfo(snap, info) + } + + if m, ok := snap.Metrics["credits"]; ok { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + if m.Remaining != nil && m.Limit != nil { + info.summary = fmt.Sprintf("$%.2f / $%.2f credits", *m.Remaining, *m.Limit) + if pct := m.Percent(); pct >= 0 { + info.gaugePercent = 100 - pct + } + } else if m.Used != nil { + info.summary = fmt.Sprintf("$%.4f used", *m.Used) + } else { + info.summary = "Credits available" + } + return info + } + if m, ok := snap.Metrics["credit_balance"]; ok && m.Remaining != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + if m.Limit != nil { + info.summary = fmt.Sprintf("$%.2f / $%.2f", *m.Remaining, *m.Limit) + if pct := m.Percent(); pct >= 0 { + info.gaugePercent = 100 - pct + } + } else { + info.summary = fmt.Sprintf("$%.2f balance", *m.Remaining) + } + return info + } + if m, ok := snap.Metrics["total_balance"]; ok && m.Remaining != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + info.summary = fmt.Sprintf("%.2f %s available", *m.Remaining, m.Unit) + return info + } + + quotaKey := "" + for _, key := range []string{"quota_pro", "quota", "quota_flash"} { + if _, ok := snap.Metrics[key]; ok { + quotaKey = key + break + } + } + if quotaKey != "" { + m := snap.Metrics[quotaKey] + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + if pct := core.MetricUsedPercent(quotaKey, m); pct >= 0 { + info.gaugePercent = pct + info.summary = fmt.Sprintf("%.0f%% usage used", pct) + } + if m.Remaining != nil { + info.detail = fmt.Sprintf("%.0f%% usage left", *m.Remaining) + } + return info + } + + if m, ok := snap.Metrics["context_window"]; ok && m.Used != nil && m.Limit != nil { + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + if pct := m.Percent(); pct >= 0 { + info.gaugePercent = pct + info.summary = fmt.Sprintf("%.0f%% usage used", pct) + } + info.detail = fmt.Sprintf("%s / %s tokens", shortCompact(*m.Used), shortCompact(*m.Limit)) + return info + } + + rateLimits := core.ExtractRateLimitDisplayMetrics(snap.Metrics) + if len(rateLimits) > 0 { + worstRatePct := float64(100) + rateParts := make([]string, 0, len(rateLimits)) + for _, rate := range rateLimits { + if rate.UsedPercent < worstRatePct { + worstRatePct = rate.UsedPercent + } + if rate.UsesRemainingPercent { + label := metricLabel(widget, rate.LabelKey) + rateParts = append(rateParts, fmt.Sprintf("%s %.0f%%", label, 100-rate.RemainingPercent)) + continue + } + rateParts = append(rateParts, fmt.Sprintf("%s %.0f%%", strings.ToUpper(rate.LabelKey), 100-rate.UsedPercent)) + } + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + info.gaugePercent = 100 - worstRatePct + info.summary = fmt.Sprintf("%.0f%% used", 100-worstRatePct) + if len(rateParts) > 0 { + sort.Strings(rateParts) + info.detail = strings.Join(rateParts, " · ") + } + return info + } + + if fh, ok := snap.Metrics["usage_five_hour"]; ok && fh.Used != nil { + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + info.reason = "usage_five_hour" + info.gaugePercent = *fh.Used + parts := []string{fmt.Sprintf("5h %.0f%%", *fh.Used)} + if sd, ok2 := snap.Metrics["usage_seven_day"]; ok2 && sd.Used != nil { + parts = append(parts, fmt.Sprintf("7d %.0f%%", *sd.Used)) + if *sd.Used > info.gaugePercent { + info.gaugePercent = *sd.Used + } + } + info.summary = strings.Join(parts, " · ") + + var detailParts []string + if dc, ok2 := snap.Metrics["today_api_cost"]; ok2 && dc.Used != nil { + tag := metricWindowTag(dc) + if tag != "" { + detailParts = append(detailParts, fmt.Sprintf("~$%.2f %s", *dc.Used, tag)) + } else { + detailParts = append(detailParts, fmt.Sprintf("~$%.2f", *dc.Used)) + } + } + if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("$%.2f/h", *br.Used)) + } + info.detail = strings.Join(detailParts, " · ") + core.Tracef("[display] %s: branch=usage_five_hour used=%.1f gauge=%.1f -> tag=Usage", snap.ProviderID, *fh.Used, info.gaugePercent) + return info + } + + if _, hasBillingBlock := snap.Resets["billing_block"]; hasBillingBlock { + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + info.reason = "billing_block_fallback" + + var parts []string + if dc, ok2 := snap.Metrics["today_api_cost"]; ok2 && dc.Used != nil { + tag := metricWindowTag(dc) + if tag != "" { + parts = append(parts, fmt.Sprintf("~$%.2f %s", *dc.Used, tag)) + } else { + parts = append(parts, fmt.Sprintf("~$%.2f", *dc.Used)) + } + } + if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { + parts = append(parts, fmt.Sprintf("$%.2f/h", *br.Used)) + } + info.summary = strings.Join(parts, " · ") + + var detailParts []string + if bc, ok2 := snap.Metrics["5h_block_cost"]; ok2 && bc.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("~$%.2f 5h block", *bc.Used)) + } + if wc, ok2 := snap.Metrics["7d_api_cost"]; ok2 && wc.Used != nil { + tag := metricWindowTag(wc) + if tag != "" { + detailParts = append(detailParts, fmt.Sprintf("~$%.2f/%s", *wc.Used, tag)) + } else { + detailParts = append(detailParts, fmt.Sprintf("~$%.2f", *wc.Used)) + } + } + if msgs, ok2 := snap.Metrics["messages_today"]; ok2 && msgs.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("%.0f msgs", *msgs.Used)) + } + if sess, ok2 := snap.Metrics["sessions_today"]; ok2 && sess.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("%.0f sessions", *sess.Used)) + } + info.detail = strings.Join(detailParts, " · ") + core.Tracef("[display] %s: branch=billing_block_fallback -> tag=Usage", snap.ProviderID) + return info + } + + if m, ok := snap.Metrics["today_api_cost"]; ok && m.Used != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + info.reason = "today_api_cost" + core.Tracef("[display] %s: branch=today_api_cost used=%.2f -> tag=Credits", snap.ProviderID, *m.Used) + tag := metricWindowTag(m) + costLabel := fmt.Sprintf("~$%.2f", *m.Used) + if tag != "" { + costLabel = fmt.Sprintf("~$%.2f %s", *m.Used, tag) + } + parts := []string{costLabel} + if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { + parts = append(parts, fmt.Sprintf("$%.2f/h", *br.Used)) + } + info.summary = strings.Join(parts, " · ") + + var detailParts []string + if bc, ok2 := snap.Metrics["5h_block_cost"]; ok2 && bc.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("~$%.2f 5h block", *bc.Used)) + } + if wc, ok2 := snap.Metrics["7d_api_cost"]; ok2 && wc.Used != nil { + wcTag := metricWindowTag(wc) + if wcTag != "" { + detailParts = append(detailParts, fmt.Sprintf("~$%.2f/%s", *wc.Used, wcTag)) + } else { + detailParts = append(detailParts, fmt.Sprintf("~$%.2f", *wc.Used)) + } + } + if msgs, ok2 := snap.Metrics["messages_today"]; ok2 && msgs.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("%.0f msgs", *msgs.Used)) + } + if sess, ok2 := snap.Metrics["sessions_today"]; ok2 && sess.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("%.0f sessions", *sess.Used)) + } + info.detail = strings.Join(detailParts, " · ") + return info + } + + if m, ok := snap.Metrics["5h_block_cost"]; ok && m.Used != nil { + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + info.summary = fmt.Sprintf("~$%.2f / 5h block", *m.Used) + if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { + info.detail = fmt.Sprintf("$%.2f/h burn rate", *br.Used) + } + return info + } + + hasUsage := false + worstUsagePct := float64(100) + var usageKey string + for _, key := range sortedMetricKeys(snap.Metrics) { + m := snap.Metrics[key] + pct := m.Percent() + if pct >= 0 { + hasUsage = true + if pct < worstUsagePct { + worstUsagePct = pct + usageKey = key + } + } + } + if hasUsage { + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + info.gaugePercent = 100 - worstUsagePct + info.summary = fmt.Sprintf("%.0f%% used", 100-worstUsagePct) + if snap.ProviderID == "gemini_cli" { + if m, ok := snap.Metrics["total_conversations"]; ok && m.Used != nil { + info.detail = fmt.Sprintf("%.0f conversations", *m.Used) + return info + } + if m, ok := snap.Metrics["messages_today"]; ok && m.Used != nil { + info.detail = fmt.Sprintf("%.0f msgs today", *m.Used) + return info + } + return info + } + if usageKey != "" { + qm := snap.Metrics[usageKey] + parts := []string{metricLabel(widget, usageKey)} + if qm.Window != "" && qm.Window != "all_time" && qm.Window != "current_period" { + parts = append(parts, qm.Window) + } + info.detail = strings.Join(parts, " · ") + } + return info + } + + if m, ok := snap.Metrics["total_cost_usd"]; ok && m.Used != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + info.summary = fmt.Sprintf("$%.2f total", *m.Used) + return info + } + if m, ok := snap.Metrics["all_time_api_cost"]; ok && m.Used != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + info.summary = fmt.Sprintf("~$%.2f total (API est.)", *m.Used) + return info + } + + if m, ok := snap.Metrics["messages_today"]; ok && m.Used != nil { + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + info.summary = fmt.Sprintf("%.0f msgs today", *m.Used) + var detailParts []string + if tc, ok2 := snap.Metrics["tool_calls_today"]; ok2 && tc.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("%.0f tools", *tc.Used)) + } + if sc, ok2 := snap.Metrics["sessions_today"]; ok2 && sc.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("%.0f sessions", *sc.Used)) + } + info.detail = strings.Join(detailParts, " · ") + return info + } + + for _, key := range core.FallbackDisplayMetricKeys(snap.Metrics) { + m := snap.Metrics[key] + if m.Used != nil { + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + info.summary = fmt.Sprintf("%s: %s %s", metricLabel(widget, key), formatNumber(*m.Used), m.Unit) + return info + } + } + + if snap.Message != "" { + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + msg := snap.Message + if len(msg) > 50 { + msg = msg[:47] + "..." + } + info.summary = msg + return info + } + + info.tagEmoji = "⚡" + info.tagLabel = "Usage" + if snap.Status == core.StatusUnknown { + info.summary = "Syncing telemetry..." + } else { + info.summary = string(snap.Status) + } + return info +} + +func computeDetailedCreditsDisplayInfo(snap core.UsageSnapshot, info providerDisplayInfo) providerDisplayInfo { + if m, ok := snap.Metrics["credit_balance"]; ok && m.Limit != nil && m.Remaining != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + spent := *m.Limit - *m.Remaining + if m.Used != nil { + spent = *m.Used + } + info.summary = fmt.Sprintf("$%.2f / $%.2f spent", spent, *m.Limit) + if pct := m.Percent(); pct >= 0 { + info.gaugePercent = 100 - pct + } + + detailParts := []string{fmt.Sprintf("$%.2f remaining", *m.Remaining)} + if dc, ok2 := snap.Metrics["today_cost"]; ok2 && dc.Used != nil { + tag := metricWindowTag(dc) + if tag != "" { + detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *dc.Used)) + } else { + detailParts = append(detailParts, fmt.Sprintf("$%.2f", *dc.Used)) + } + } else if dc, ok2 := snap.Metrics["usage_daily"]; ok2 && dc.Used != nil { + tag := metricWindowTag(dc) + if tag != "" { + detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *dc.Used)) + } else { + detailParts = append(detailParts, fmt.Sprintf("$%.2f", *dc.Used)) + } + } + if wc, ok2 := snap.Metrics["7d_api_cost"]; ok2 && wc.Used != nil { + tag := metricWindowTag(wc) + if tag != "" { + detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *wc.Used)) + } else { + detailParts = append(detailParts, fmt.Sprintf("$%.2f", *wc.Used)) + } + } else if wc, ok2 := snap.Metrics["usage_weekly"]; ok2 && wc.Used != nil { + tag := metricWindowTag(wc) + if tag != "" { + detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *wc.Used)) + } else { + detailParts = append(detailParts, fmt.Sprintf("$%.2f", *wc.Used)) + } + } + if models := snapshotMeta(snap, "activity_models"); models != "" { + detailParts = append(detailParts, fmt.Sprintf("%s models", models)) + } + info.detail = strings.Join(detailParts, " · ") + return info + } + + if m, ok := snap.Metrics["credits"]; ok && m.Used != nil { + info.tagEmoji = "💰" + info.tagLabel = "Credits" + info.summary = fmt.Sprintf("$%.4f used", *m.Used) + + var detailParts []string + if daily, ok := snap.Metrics["usage_daily"]; ok && daily.Used != nil { + tag := metricWindowTag(daily) + if tag != "" { + detailParts = append(detailParts, fmt.Sprintf("%s $%.2f", tag, *daily.Used)) + } else { + detailParts = append(detailParts, fmt.Sprintf("$%.2f", *daily.Used)) + } + } + if byok, ok := snap.Metrics["byok_daily"]; ok && byok.Used != nil && *byok.Used > 0 { + detailParts = append(detailParts, fmt.Sprintf("BYOK $%.2f", *byok.Used)) + } + if burn, ok := snap.Metrics["burn_rate"]; ok && burn.Used != nil { + detailParts = append(detailParts, fmt.Sprintf("$%.2f/h", *burn.Used)) + } + if models := snapshotMeta(snap, "activity_models"); models != "" { + detailParts = append(detailParts, fmt.Sprintf("%s models", models)) + } + info.detail = strings.Join(detailParts, " · ") + return info + } + + info.tagEmoji = "💰" + info.tagLabel = "Credits" + info.summary = "Connected" + return info +} + +func windowActivityLine(snap core.UsageSnapshot, tw core.TimeWindow) string { + var parts []string + if m, ok := snap.Metrics["window_requests"]; ok && m.Used != nil && *m.Used > 0 { + parts = append(parts, fmt.Sprintf("%.0f reqs", *m.Used)) + } + if m, ok := snap.Metrics["window_cost"]; ok && m.Used != nil && *m.Used > 0.001 { + parts = append(parts, fmt.Sprintf("$%.2f", *m.Used)) + } + if m, ok := snap.Metrics["window_tokens"]; ok && m.Used != nil && *m.Used > 0 { + parts = append(parts, shortCompact(*m.Used)+" tok") + } + if len(parts) == 0 { + return "" + } + return strings.Join(parts, " · ") + " in " + tw.Label() +} + +func metricWindowTag(met core.Metric) string { + return strings.TrimSpace(met.Window) +} From 8342a6b9ffede2373586827f7813407bc1c50770 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 17:31:33 +0100 Subject: [PATCH 20/32] refactor: split codex live and session usage helpers --- internal/providers/codex/codex.go | 1512 --------------------- internal/providers/codex/live_usage.go | 382 ++++++ internal/providers/codex/session_usage.go | 1083 +++++++++++++++ 3 files changed, 1465 insertions(+), 1512 deletions(-) create mode 100644 internal/providers/codex/live_usage.go create mode 100644 internal/providers/codex/session_usage.go diff --git a/internal/providers/codex/codex.go b/internal/providers/codex/codex.go index ddf65c8..e404788 100644 --- a/internal/providers/codex/codex.go +++ b/internal/providers/codex/codex.go @@ -1,25 +1,17 @@ package codex import ( - "bufio" - "bytes" "context" "encoding/json" "errors" - "fmt" - "io" - "net/http" "os" "path/filepath" - "sort" "strconv" "strings" "time" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" - "github.com/janekbaraniewski/openusage/internal/providers/shared" - "github.com/samber/lo" ) const ( @@ -293,1404 +285,6 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa return snap, nil } -func (p *Provider) fetchLiveUsage(ctx context.Context, acct core.AccountConfig, configDir string, snap *core.UsageSnapshot) (bool, error) { - authPath := filepath.Join(configDir, "auth.json") - if acct.ExtraData != nil && acct.ExtraData["auth_file"] != "" { - authPath = acct.ExtraData["auth_file"] - } - - data, err := os.ReadFile(authPath) - if err != nil { - return false, nil - } - - var auth authFile - if err := json.Unmarshal(data, &auth); err != nil { - return false, nil - } - - if strings.TrimSpace(auth.Tokens.AccessToken) == "" { - return false, nil - } - - baseURL := resolveChatGPTBaseURL(acct, configDir) - usageURL := usageURLForBase(baseURL) - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, usageURL, nil) - if err != nil { - return false, fmt.Errorf("codex: creating live usage request: %w", err) - } - req.Header.Set("Authorization", "Bearer "+auth.Tokens.AccessToken) - req.Header.Set("Accept", "application/json") - - accountID := core.FirstNonEmpty(auth.Tokens.AccountID, auth.AccountID) - if accountID == "" && acct.ExtraData != nil { - accountID = acct.ExtraData["account_id"] - } - if accountID != "" { - req.Header.Set("ChatGPT-Account-Id", accountID) - } - - if cliVersion := snap.Raw["cli_version"]; cliVersion != "" { - req.Header.Set("User-Agent", "codex-cli/"+cliVersion) - } else { - req.Header.Set("User-Agent", "codex-cli") - } - - resp, err := p.Client().Do(req) - if err != nil { - return false, fmt.Errorf("codex: live usage request failed: %w", err) - } - defer resp.Body.Close() - - body, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden { - return false, fmt.Errorf("%w: HTTP %d", errLiveUsageAuth, resp.StatusCode) - } - if resp.StatusCode != http.StatusOK { - return false, fmt.Errorf("codex: live usage HTTP %d: %s", resp.StatusCode, truncateForError(string(body), maxHTTPErrorBodySize)) - } - - var payload usagePayload - if err := json.Unmarshal(body, &payload); err != nil { - return false, fmt.Errorf("codex: parsing live usage response: %w", err) - } - - summary := applyUsagePayload(&payload, snap) - if summary.limitMetricsApplied > 0 { - snap.Raw["rate_limit_source"] = "live" - } else { - clearRateLimitMetrics(snap) - snap.Raw["rate_limit_source"] = "live_unavailable" - snap.Raw["rate_limit_warning"] = "live usage payload did not include limit windows" - } - snap.Raw["quota_api"] = "live" - return true, nil -} - -func applyUsagePayload(payload *usagePayload, snap *core.UsageSnapshot) usageApplySummary { - var summary usageApplySummary - if payload == nil { - return summary - } - - if payload.Email != "" { - snap.Raw["account_email"] = payload.Email - } - if payload.AccountID != "" { - snap.Raw["account_id"] = payload.AccountID - } - if payload.PlanType != "" { - snap.Raw["plan_type"] = payload.PlanType - } - - summary.limitMetricsApplied += applyUsageLimitDetails(payload.RateLimit, "rate_limit_primary", "rate_limit_secondary", snap) - summary.limitMetricsApplied += applyUsageLimitDetails(payload.CodeReviewRateLimit, "rate_limit_code_review_primary", "rate_limit_code_review_secondary", snap) - summary.limitMetricsApplied += applyUsageAdditionalLimits(payload.AdditionalRateLimits, snap) - - if payload.RateLimitStatus != nil { - status := payload.RateLimitStatus - if payload.PlanType == "" && status.PlanType != "" { - snap.Raw["plan_type"] = status.PlanType - } - summary.limitMetricsApplied += applyUsageLimitDetails(status.RateLimit, "rate_limit_primary", "rate_limit_secondary", snap) - summary.limitMetricsApplied += applyUsageLimitDetails(status.CodeReviewRateLimit, "rate_limit_code_review_primary", "rate_limit_code_review_secondary", snap) - summary.limitMetricsApplied += applyUsageAdditionalLimits(status.AdditionalRateLimits, snap) - if payload.Credits == nil { - applyUsageCredits(status.Credits, snap) - } - } - - applyUsageCredits(payload.Credits, snap) - return summary -} - -func applyUsageAdditionalLimits(additional []usageAdditionalLimit, snap *core.UsageSnapshot) int { - applied := 0 - for _, extra := range additional { - limitID := sanitizeMetricName(core.FirstNonEmpty(extra.MeteredFeature, extra.LimitName)) - if limitID == "" || limitID == "codex" { - continue - } - - primaryKey := "rate_limit_" + limitID + "_primary" - secondaryKey := "rate_limit_" + limitID + "_secondary" - applied += applyUsageLimitDetails(extra.RateLimit, primaryKey, secondaryKey, snap) - if extra.LimitName != "" { - snap.Raw["rate_limit_"+limitID+"_name"] = extra.LimitName - } - } - return applied -} - -func applyUsageCredits(credits *usageCredits, snap *core.UsageSnapshot) { - if credits == nil { - return - } - - switch { - case credits.Unlimited: - snap.Raw["credits"] = "unlimited" - case credits.HasCredits: - snap.Raw["credits"] = "available" - if formatted := formatCreditsBalance(credits.Balance); formatted != "" { - snap.Raw["credit_balance"] = formatted - } - default: - snap.Raw["credits"] = "none" - } -} - -func formatCreditsBalance(balance any) string { - switch v := balance.(type) { - case nil: - return "" - case string: - if strings.TrimSpace(v) == "" { - return "" - } - if f, err := strconv.ParseFloat(v, 64); err == nil { - return fmt.Sprintf("$%.2f", f) - } - return v - case float64: - return fmt.Sprintf("$%.2f", v) - case json.Number: - if f, err := v.Float64(); err == nil { - return fmt.Sprintf("$%.2f", f) - } - } - return "" -} - -func applyUsageLimitDetails(details *usageLimitDetails, primaryKey, secondaryKey string, snap *core.UsageSnapshot) int { - if details == nil { - return 0 - } - applied := 0 - primary := details.PrimaryWindow - if primary == nil { - primary = details.Primary - } - secondary := details.SecondaryWindow - if secondary == nil { - secondary = details.Secondary - } - if applyUsageWindowMetric(primary, primaryKey, snap) { - applied++ - } - if applyUsageWindowMetric(secondary, secondaryKey, snap) { - applied++ - } - return applied -} - -func applyUsageWindowMetric(window *usageWindowInfo, key string, snap *core.UsageSnapshot) bool { - if window == nil || key == "" { - return false - } - - used, ok := resolveWindowUsedPercent(window) - if !ok { - return false - } - - limit := float64(100) - remaining := 100 - used - windowLabel := formatWindow(resolveWindowMinutes(window)) - - snap.Metrics[key] = core.Metric{ - Limit: &limit, - Used: &used, - Remaining: &remaining, - Unit: "%", - Window: windowLabel, - } - - if resetAt := resolveWindowResetAt(window); resetAt > 0 { - snap.Resets[key] = time.Unix(resetAt, 0) - } - return true -} - -func resolveWindowUsedPercent(window *usageWindowInfo) (float64, bool) { - if window == nil { - return 0, false - } - if window.UsedPercent != nil { - return clampPercent(*window.UsedPercent), true - } - if window.RemainingPercent != nil { - return clampPercent(100 - *window.RemainingPercent), true - } - return 0, false -} - -func resolveWindowMinutes(window *usageWindowInfo) int { - if window == nil { - return 0 - } - if window.LimitWindowSeconds > 0 { - return secondsToMinutes(window.LimitWindowSeconds) - } - if window.WindowMinutes > 0 { - return window.WindowMinutes - } - return 0 -} - -func resolveWindowResetAt(window *usageWindowInfo) int64 { - if window == nil { - return 0 - } - switch { - case window.ResetAt > 0: - return window.ResetAt - case window.ResetsAt > 0: - return window.ResetsAt - case window.ResetAfterSeconds > 0: - return time.Now().UTC().Add(time.Duration(window.ResetAfterSeconds) * time.Second).Unix() - default: - return 0 - } -} - -func clearRateLimitMetrics(snap *core.UsageSnapshot) { - for key := range snap.Metrics { - if strings.HasPrefix(key, "rate_limit_") { - delete(snap.Metrics, key) - } - } - for key := range snap.Resets { - if strings.HasPrefix(key, "rate_limit_") { - delete(snap.Resets, key) - } - } -} - -func clampPercent(v float64) float64 { - if v < 0 { - return 0 - } - if v > 100 { - return 100 - } - return v -} - -func secondsToMinutes(seconds int) int { - if seconds <= 0 { - return 0 - } - return (seconds + 59) / 60 -} - -func resolveChatGPTBaseURL(acct core.AccountConfig, configDir string) string { - switch { - case strings.TrimSpace(acct.BaseURL) != "": - return normalizeChatGPTBaseURL(acct.BaseURL) - case acct.ExtraData != nil && strings.TrimSpace(acct.ExtraData["chatgpt_base_url"]) != "": - return normalizeChatGPTBaseURL(acct.ExtraData["chatgpt_base_url"]) - default: - if fromConfig := readChatGPTBaseURLFromConfig(configDir); fromConfig != "" { - return normalizeChatGPTBaseURL(fromConfig) - } - } - return normalizeChatGPTBaseURL(defaultChatGPTBaseURL) -} - -func readChatGPTBaseURLFromConfig(configDir string) string { - if strings.TrimSpace(configDir) == "" { - return "" - } - - configPath := filepath.Join(configDir, "config.toml") - data, err := os.ReadFile(configPath) - if err != nil { - return "" - } - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if line == "" || strings.HasPrefix(line, "#") || !strings.Contains(line, "=") { - continue - } - if !strings.HasPrefix(line, "chatgpt_base_url") { - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) != 2 { - continue - } - val := strings.TrimSpace(parts[1]) - val = strings.Trim(val, "\"'") - if val != "" { - return val - } - } - - return "" -} - -func normalizeChatGPTBaseURL(baseURL string) string { - baseURL = strings.TrimSpace(baseURL) - baseURL = strings.TrimRight(baseURL, "/") - if baseURL == "" { - return defaultChatGPTBaseURL - } - if (strings.HasPrefix(baseURL, "https://chatgpt.com") || strings.HasPrefix(baseURL, "https://chat.openai.com")) && - !strings.Contains(baseURL, "/backend-api") { - baseURL += "/backend-api" - } - return baseURL -} - -func usageURLForBase(baseURL string) string { - if strings.Contains(baseURL, "/backend-api") { - return baseURL + "/wham/usage" - } - return baseURL + "/api/codex/usage" -} - -func truncateForError(value string, max int) string { - return shared.Truncate(strings.TrimSpace(value), max) -} - -func (p *Provider) readLatestSession(sessionsDir string, snap *core.UsageSnapshot) error { - latestFile, err := findLatestSessionFile(sessionsDir) - if err != nil { - return fmt.Errorf("finding latest session: %w", err) - } - - snap.Raw["latest_session_file"] = filepath.Base(latestFile) - - lastPayload, err := findLastTokenCount(latestFile) - if err != nil { - return fmt.Errorf("reading session: %w", err) - } - - if lastPayload == nil { - return fmt.Errorf("no token_count events in latest session") - } - - if lastPayload.Info != nil { - info := lastPayload.Info - total := info.TotalTokenUsage - - inputTokens := float64(total.InputTokens) - snap.Metrics["session_input_tokens"] = core.Metric{ - Used: &inputTokens, - Unit: "tokens", - Window: "session", - } - - outputTokens := float64(total.OutputTokens) - snap.Metrics["session_output_tokens"] = core.Metric{ - Used: &outputTokens, - Unit: "tokens", - Window: "session", - } - - cachedTokens := float64(total.CachedInputTokens) - snap.Metrics["session_cached_tokens"] = core.Metric{ - Used: &cachedTokens, - Unit: "tokens", - Window: "session", - } - - if total.ReasoningOutputTokens > 0 { - reasoning := float64(total.ReasoningOutputTokens) - snap.Metrics["session_reasoning_tokens"] = core.Metric{ - Used: &reasoning, - Unit: "tokens", - Window: "session", - } - } - - totalTokens := float64(total.TotalTokens) - snap.Metrics["session_total_tokens"] = core.Metric{ - Used: &totalTokens, - Unit: "tokens", - Window: "session", - } - - if info.ModelContextWindow > 0 { - ctxWindow := float64(info.ModelContextWindow) - ctxUsed := float64(total.InputTokens) - snap.Metrics["context_window"] = core.Metric{ - Limit: &ctxWindow, - Used: &ctxUsed, - Unit: "tokens", - } - } - } - - if lastPayload.RateLimits != nil { - rl := lastPayload.RateLimits - rateLimitSet := false - - if rl.Primary != nil { - limit := float64(100) - used := rl.Primary.UsedPercent - remaining := 100 - used - windowStr := formatWindow(rl.Primary.WindowMinutes) - snap.Metrics["rate_limit_primary"] = core.Metric{ - Limit: &limit, - Used: &used, - Remaining: &remaining, - Unit: "%", - Window: windowStr, - } - - if rl.Primary.ResetsAt > 0 { - resetTime := time.Unix(rl.Primary.ResetsAt, 0) - snap.Resets["rate_limit_primary"] = resetTime - } - rateLimitSet = true - } - - if rl.Secondary != nil { - limit := float64(100) - used := rl.Secondary.UsedPercent - remaining := 100 - used - windowStr := formatWindow(rl.Secondary.WindowMinutes) - snap.Metrics["rate_limit_secondary"] = core.Metric{ - Limit: &limit, - Used: &used, - Remaining: &remaining, - Unit: "%", - Window: windowStr, - } - - if rl.Secondary.ResetsAt > 0 { - resetTime := time.Unix(rl.Secondary.ResetsAt, 0) - snap.Resets["rate_limit_secondary"] = resetTime - } - rateLimitSet = true - } - - if rl.Credits != nil { - if rl.Credits.Unlimited { - snap.Raw["credits"] = "unlimited" - } else if rl.Credits.HasCredits { - snap.Raw["credits"] = "available" - if rl.Credits.Balance != nil { - snap.Raw["credit_balance"] = fmt.Sprintf("$%.2f", *rl.Credits.Balance) - } - } else { - snap.Raw["credits"] = "none" - } - } - - if rl.PlanType != nil { - snap.Raw["plan_type"] = *rl.PlanType - } - if rateLimitSet && snap.Raw["rate_limit_source"] == "" { - snap.Raw["rate_limit_source"] = "session" - } - } - - return nil -} - -func (p *Provider) readSessionUsageBreakdowns(sessionsDir string, snap *core.UsageSnapshot) error { - modelTotals := make(map[string]tokenUsage) - clientTotals := make(map[string]tokenUsage) - modelDaily := make(map[string]map[string]float64) - clientDaily := make(map[string]map[string]float64) - interfaceDaily := make(map[string]map[string]float64) - dailyTokenTotals := make(map[string]float64) - dailyRequestTotals := make(map[string]float64) - clientSessions := make(map[string]int) - clientRequests := make(map[string]int) - toolCalls := make(map[string]int) - langRequests := make(map[string]int) - callTool := make(map[string]string) - callOutcome := make(map[string]int) - stats := patchStats{ - Files: make(map[string]struct{}), - Deleted: make(map[string]struct{}), - } - today := time.Now().UTC().Format("2006-01-02") - totalRequests := 0 - requestsToday := 0 - promptCount := 0 - commits := 0 - completedWithoutCallID := 0 - - walkErr := filepath.Walk(sessionsDir, func(path string, info os.FileInfo, err error) error { - if err != nil || info == nil || info.IsDir() || !strings.HasSuffix(path, ".jsonl") { - return nil - } - - defaultDay := dayFromSessionPath(path, sessionsDir) - sessionClient := "Other" - currentModel := "unknown" - var previous tokenUsage - var hasPrevious bool - var countedSession bool - return walkSessionFile(path, func(record sessionLine) error { - switch { - case record.SessionMeta != nil: - sessionClient = classifyClient(record.SessionMeta.Source, record.SessionMeta.Originator) - if record.SessionMeta.Model != "" { - currentModel = record.SessionMeta.Model - } - case record.TurnContext != nil: - if strings.TrimSpace(record.TurnContext.Model) != "" { - currentModel = record.TurnContext.Model - } - case record.EventPayload != nil: - payload := record.EventPayload - if payload.Type == "user_message" { - promptCount++ - return nil - } - if payload.Type != "token_count" || payload.Info == nil { - return nil - } - - total := payload.Info.TotalTokenUsage - delta := total - if hasPrevious { - delta = usageDelta(total, previous) - if !validUsageDelta(delta) { - delta = total - } - } - previous = total - hasPrevious = true - - if delta.TotalTokens <= 0 { - return nil - } - - modelName := normalizeModelName(currentModel) - clientName := normalizeClientName(sessionClient) - day := dayFromTimestamp(record.Timestamp) - if day == "" { - day = defaultDay - } - - addUsage(modelTotals, modelName, delta) - addUsage(clientTotals, clientName, delta) - addDailyUsage(modelDaily, modelName, day, float64(delta.TotalTokens)) - addDailyUsage(clientDaily, clientName, day, float64(delta.TotalTokens)) - addDailyUsage(interfaceDaily, clientInterfaceBucket(clientName), day, 1) - dailyTokenTotals[day] += float64(delta.TotalTokens) - dailyRequestTotals[day]++ - clientRequests[clientName]++ - totalRequests++ - if day == today { - requestsToday++ - } - - if !countedSession { - clientSessions[clientName]++ - countedSession = true - } - case record.ResponseItem != nil: - item := record.ResponseItem - switch item.Type { - case "function_call": - tool := normalizeToolName(item.Name) - recordToolCall(toolCalls, callTool, item.CallID, tool) - if strings.EqualFold(tool, "exec_command") { - var args commandArgs - if json.Unmarshal(item.Arguments, &args) == nil { - recordCommandLanguage(args.Cmd, langRequests) - if commandContainsGitCommit(args.Cmd) { - commits++ - } - } - } - case "custom_tool_call": - tool := normalizeToolName(item.Name) - recordToolCall(toolCalls, callTool, item.CallID, tool) - if strings.EqualFold(tool, "apply_patch") { - stats.PatchCalls++ - accumulatePatchStats(item.Input, &stats, langRequests) - } - case "web_search_call": - recordToolCall(toolCalls, callTool, "", "web_search") - completedWithoutCallID++ - case "function_call_output", "custom_tool_call_output": - setToolCallOutcome(item.CallID, item.Output, callOutcome) - } - } - - return nil - }) - }) - if walkErr != nil { - return fmt.Errorf("walking session files: %w", walkErr) - } - - emitBreakdownMetrics("model", modelTotals, modelDaily, snap) - emitBreakdownMetrics("client", clientTotals, clientDaily, snap) - emitClientSessionMetrics(clientSessions, snap) - emitClientRequestMetrics(clientRequests, snap) - emitToolMetrics(toolCalls, callTool, callOutcome, completedWithoutCallID, snap) - emitLanguageMetrics(langRequests, snap) - emitProductivityMetrics(stats, promptCount, commits, totalRequests, requestsToday, clientSessions, snap) - emitDailyUsageSeries(dailyTokenTotals, dailyRequestTotals, interfaceDaily, snap) - - return nil -} - -func recordToolCall(toolCalls map[string]int, callTool map[string]string, callID, tool string) { - tool = normalizeToolName(tool) - toolCalls[tool]++ - if strings.TrimSpace(callID) != "" { - callTool[callID] = tool - } -} - -func normalizeToolName(tool string) string { - tool = strings.TrimSpace(tool) - if tool == "" { - return "unknown" - } - return tool -} - -func setToolCallOutcome(callID, output string, outcomes map[string]int) { - callID = strings.TrimSpace(callID) - if callID == "" { - return - } - outcomes[callID] = inferToolCallOutcome(output) -} - -func inferToolCallOutcome(output string) int { - lower := strings.ToLower(strings.TrimSpace(output)) - if lower == "" { - return 1 - } - if strings.Contains(lower, `"exit_code":0`) || strings.Contains(lower, "process exited with code 0") { - return 1 - } - if strings.Contains(lower, "cancelled") || strings.Contains(lower, "canceled") || strings.Contains(lower, "aborted") { - return 3 - } - if idx := strings.Index(lower, "process exited with code "); idx >= 0 { - rest := lower[idx+len("process exited with code "):] - n := 0 - for _, r := range rest { - if r < '0' || r > '9' { - break - } - n = n*10 + int(r-'0') - } - if n == 0 { - return 1 - } - return 2 - } - if idx := strings.Index(lower, "exit code "); idx >= 0 { - rest := lower[idx+len("exit code "):] - n := 0 - foundDigit := false - for _, r := range rest { - if r < '0' || r > '9' { - if foundDigit { - break - } - continue - } - foundDigit = true - n = n*10 + int(r-'0') - } - if !foundDigit || n == 0 { - return 1 - } - return 2 - } - if strings.Contains(lower, `"exit_code":`) && !strings.Contains(lower, `"exit_code":0`) { - return 2 - } - if strings.Contains(lower, "error") || strings.Contains(lower, "failed") { - return 2 - } - return 1 -} - -func recordCommandLanguage(cmd string, langs map[string]int) { - language := detectCommandLanguage(cmd) - if language != "" { - langs[language]++ - } -} - -func detectCommandLanguage(cmd string) string { - trimmed := strings.TrimSpace(strings.ToLower(cmd)) - if trimmed == "" { - return "" - } - switch { - case strings.Contains(trimmed, " go ") || strings.HasPrefix(trimmed, "go ") || strings.Contains(trimmed, "gofmt ") || strings.Contains(trimmed, "golangci-lint"): - return "go" - case strings.Contains(trimmed, " terraform ") || strings.HasPrefix(trimmed, "terraform "): - return "terraform" - case strings.Contains(trimmed, " python ") || strings.HasPrefix(trimmed, "python ") || strings.HasPrefix(trimmed, "python3 "): - return "python" - case strings.Contains(trimmed, " npm ") || strings.HasPrefix(trimmed, "npm ") || strings.Contains(trimmed, " yarn ") || strings.HasPrefix(trimmed, "pnpm ") || strings.Contains(trimmed, " node "): - return "ts" - case strings.Contains(trimmed, " cargo ") || strings.HasPrefix(trimmed, "cargo ") || strings.Contains(trimmed, " rustc "): - return "rust" - case strings.Contains(trimmed, " java ") || strings.HasPrefix(trimmed, "java ") || strings.Contains(trimmed, " gradle ") || strings.Contains(trimmed, " mvn "): - return "java" - case strings.Contains(trimmed, ".log"): - return "log" - case strings.Contains(trimmed, ".txt"): - return "txt" - default: - return "shell" - } -} - -func commandContainsGitCommit(cmd string) bool { - normalized := " " + strings.ToLower(cmd) + " " - return strings.Contains(normalized, " git commit ") -} - -func accumulatePatchStats(input string, stats *patchStats, langs map[string]int) { - if stats == nil { - return - } - lines := strings.Split(input, "\n") - for _, line := range lines { - switch { - case strings.HasPrefix(line, "*** Update File: "): - path := strings.TrimSpace(strings.TrimPrefix(line, "*** Update File: ")) - if path != "" { - stats.Files[path] = struct{}{} - if language := languageFromPath(path); language != "" { - langs[language]++ - } - } - case strings.HasPrefix(line, "*** Add File: "): - path := strings.TrimSpace(strings.TrimPrefix(line, "*** Add File: ")) - if path != "" { - stats.Files[path] = struct{}{} - if language := languageFromPath(path); language != "" { - langs[language]++ - } - } - case strings.HasPrefix(line, "*** Delete File: "): - path := strings.TrimSpace(strings.TrimPrefix(line, "*** Delete File: ")) - if path != "" { - stats.Files[path] = struct{}{} - stats.Deleted[path] = struct{}{} - if language := languageFromPath(path); language != "" { - langs[language]++ - } - } - case strings.HasPrefix(line, "*** Move to: "): - path := strings.TrimSpace(strings.TrimPrefix(line, "*** Move to: ")) - if path != "" { - stats.Files[path] = struct{}{} - if language := languageFromPath(path); language != "" { - langs[language]++ - } - } - case strings.HasPrefix(line, "+++ "), strings.HasPrefix(line, "--- "), strings.HasPrefix(line, "***"): - continue - case strings.HasPrefix(line, "+"): - stats.Added++ - case strings.HasPrefix(line, "-"): - stats.Removed++ - } - } -} - -func languageFromPath(path string) string { - lower := strings.ToLower(strings.TrimSpace(path)) - switch { - case strings.HasSuffix(lower, ".go"): - return "go" - case strings.HasSuffix(lower, ".tf"): - return "terraform" - case strings.HasSuffix(lower, ".ts"), strings.HasSuffix(lower, ".tsx"), strings.HasSuffix(lower, ".js"), strings.HasSuffix(lower, ".jsx"): - return "ts" - case strings.HasSuffix(lower, ".py"): - return "python" - case strings.HasSuffix(lower, ".rs"): - return "rust" - case strings.HasSuffix(lower, ".java"): - return "java" - case strings.HasSuffix(lower, ".yaml"), strings.HasSuffix(lower, ".yml"): - return "yaml" - case strings.HasSuffix(lower, ".json"): - return "json" - case strings.HasSuffix(lower, ".md"): - return "md" - case strings.HasSuffix(lower, ".tpl"): - return "tpl" - case strings.HasSuffix(lower, ".txt"): - return "txt" - case strings.HasSuffix(lower, ".log"): - return "log" - case strings.HasSuffix(lower, ".sh"), strings.HasSuffix(lower, ".zsh"), strings.HasSuffix(lower, ".bash"): - return "shell" - default: - return "" - } -} - -func emitClientRequestMetrics(clientRequests map[string]int, snap *core.UsageSnapshot) { - type entry struct { - name string - count int - } - var all []entry - interfaceTotals := make(map[string]float64) - for name, count := range clientRequests { - if count > 0 { - all = append(all, entry{name: name, count: count}) - interfaceTotals[clientInterfaceBucket(name)] += float64(count) - } - } - sort.Slice(all, func(i, j int) bool { - if all[i].count == all[j].count { - return all[i].name < all[j].name - } - return all[i].count > all[j].count - }) - for i, item := range all { - if i >= maxBreakdownMetrics { - break - } - value := float64(item.count) - snap.Metrics["client_"+sanitizeMetricName(item.name)+"_requests"] = core.Metric{ - Used: &value, - Unit: "requests", - Window: defaultUsageWindowLabel, - } - } - for bucket, value := range interfaceTotals { - v := value - snap.Metrics["interface_"+sanitizeMetricName(bucket)] = core.Metric{ - Used: &v, - Unit: "requests", - Window: defaultUsageWindowLabel, - } - } -} - -func clientInterfaceBucket(name string) string { - lower := strings.ToLower(strings.TrimSpace(name)) - switch { - case strings.Contains(lower, "desktop"): - return "desktop_app" - case strings.Contains(lower, "cli"), strings.Contains(lower, "exec"), strings.Contains(lower, "terminal"): - return "cli_agents" - case strings.Contains(lower, "ide"), strings.Contains(lower, "vscode"), strings.Contains(lower, "editor"): - return "ide" - case strings.Contains(lower, "cloud"), strings.Contains(lower, "web"): - return "cloud_agents" - case strings.Contains(lower, "human"), strings.Contains(lower, "other"): - return "human" - default: - return sanitizeMetricName(name) - } -} - -func emitToolMetrics(toolCalls map[string]int, callTool map[string]string, callOutcome map[string]int, completedWithoutCallID int, snap *core.UsageSnapshot) { - var all []countEntry - totalCalls := 0 - for name, count := range toolCalls { - if count <= 0 { - continue - } - all = append(all, countEntry{name: name, count: count}) - totalCalls += count - v := float64(count) - snap.Metrics["tool_"+sanitizeMetricName(name)] = core.Metric{ - Used: &v, - Unit: "calls", - Window: defaultUsageWindowLabel, - } - } - if totalCalls <= 0 { - return - } - - sort.Slice(all, func(i, j int) bool { - if all[i].count == all[j].count { - return all[i].name < all[j].name - } - return all[i].count > all[j].count - }) - - completed := completedWithoutCallID - errored := 0 - cancelled := 0 - for callID := range callTool { - switch callOutcome[callID] { - case 2: - errored++ - case 3: - cancelled++ - default: - completed++ - } - } - if completed+errored+cancelled < totalCalls { - completed += totalCalls - (completed + errored + cancelled) - } - - totalV := float64(totalCalls) - snap.Metrics["tool_calls_total"] = core.Metric{Used: &totalV, Unit: "calls", Window: defaultUsageWindowLabel} - if completed > 0 { - v := float64(completed) - snap.Metrics["tool_completed"] = core.Metric{Used: &v, Unit: "calls", Window: defaultUsageWindowLabel} - } - if errored > 0 { - v := float64(errored) - snap.Metrics["tool_errored"] = core.Metric{Used: &v, Unit: "calls", Window: defaultUsageWindowLabel} - } - if cancelled > 0 { - v := float64(cancelled) - snap.Metrics["tool_cancelled"] = core.Metric{Used: &v, Unit: "calls", Window: defaultUsageWindowLabel} - } - if totalCalls > 0 { - success := float64(completed) / float64(totalCalls) * 100 - snap.Metrics["tool_success_rate"] = core.Metric{ - Used: &success, - Unit: "%", - Window: defaultUsageWindowLabel, - } - } - snap.Raw["tool_usage"] = formatCountSummary(all, maxBreakdownRaw) -} - -func emitLanguageMetrics(langRequests map[string]int, snap *core.UsageSnapshot) { - var all []countEntry - for language, count := range langRequests { - if count <= 0 { - continue - } - all = append(all, countEntry{name: language, count: count}) - v := float64(count) - snap.Metrics["lang_"+sanitizeMetricName(language)] = core.Metric{ - Used: &v, - Unit: "requests", - Window: defaultUsageWindowLabel, - } - } - if len(all) == 0 { - return - } - sort.Slice(all, func(i, j int) bool { - if all[i].count == all[j].count { - return all[i].name < all[j].name - } - return all[i].count > all[j].count - }) - snap.Raw["language_usage"] = formatCountSummary(all, maxBreakdownRaw) -} - -func emitProductivityMetrics(stats patchStats, promptCount, commits, totalRequests, requestsToday int, clientSessions map[string]int, snap *core.UsageSnapshot) { - if totalRequests > 0 { - v := float64(totalRequests) - snap.Metrics["total_ai_requests"] = core.Metric{Used: &v, Unit: "requests", Window: defaultUsageWindowLabel} - snap.Metrics["composer_requests"] = core.Metric{Used: &v, Unit: "requests", Window: defaultUsageWindowLabel} - } - if requestsToday > 0 { - v := float64(requestsToday) - snap.Metrics["requests_today"] = core.Metric{Used: &v, Unit: "requests", Window: "today"} - snap.Metrics["today_composer_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "today"} - } - - totalSessions := 0 - for _, count := range clientSessions { - totalSessions += count - } - if totalSessions > 0 { - v := float64(totalSessions) - snap.Metrics["composer_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: defaultUsageWindowLabel} - } - - if metric, ok := snap.Metrics["context_window"]; ok && metric.Used != nil && metric.Limit != nil && *metric.Limit > 0 { - pct := *metric.Used / *metric.Limit * 100 - if pct > 100 { - pct = 100 - } - if pct < 0 { - pct = 0 - } - snap.Metrics["composer_context_pct"] = core.Metric{ - Used: &pct, - Unit: "%", - Window: metric.Window, - } - } - - if stats.Added > 0 { - v := float64(stats.Added) - snap.Metrics["composer_lines_added"] = core.Metric{Used: &v, Unit: "lines", Window: defaultUsageWindowLabel} - } - if stats.Removed > 0 { - v := float64(stats.Removed) - snap.Metrics["composer_lines_removed"] = core.Metric{Used: &v, Unit: "lines", Window: defaultUsageWindowLabel} - } - if filesChanged := len(stats.Files); filesChanged > 0 { - v := float64(filesChanged) - snap.Metrics["composer_files_changed"] = core.Metric{Used: &v, Unit: "files", Window: defaultUsageWindowLabel} - snap.Metrics["ai_tracked_files"] = core.Metric{Used: &v, Unit: "files", Window: defaultUsageWindowLabel} - } - if deleted := len(stats.Deleted); deleted > 0 { - v := float64(deleted) - snap.Metrics["ai_deleted_files"] = core.Metric{Used: &v, Unit: "files", Window: defaultUsageWindowLabel} - } - if commits > 0 { - v := float64(commits) - snap.Metrics["scored_commits"] = core.Metric{Used: &v, Unit: "commits", Window: defaultUsageWindowLabel} - } - if promptCount > 0 { - v := float64(promptCount) - snap.Metrics["total_prompts"] = core.Metric{Used: &v, Unit: "prompts", Window: defaultUsageWindowLabel} - } - if stats.PatchCalls > 0 { - base := totalRequests - if base < stats.PatchCalls { - base = stats.PatchCalls - } - if base > 0 { - pct := float64(stats.PatchCalls) / float64(base) * 100 - snap.Metrics["ai_code_percentage"] = core.Metric{Used: &pct, Unit: "%", Window: defaultUsageWindowLabel} - } - } -} - -func emitDailyUsageSeries(dailyTokenTotals, dailyRequestTotals map[string]float64, interfaceDaily map[string]map[string]float64, snap *core.UsageSnapshot) { - if len(dailyTokenTotals) > 0 { - points := core.SortedTimePoints(dailyTokenTotals) - snap.DailySeries["analytics_tokens"] = points - snap.DailySeries["tokens_total"] = points - } - if len(dailyRequestTotals) > 0 { - points := core.SortedTimePoints(dailyRequestTotals) - snap.DailySeries["analytics_requests"] = points - snap.DailySeries["requests"] = points - } - for name, byDay := range interfaceDaily { - if len(byDay) == 0 { - continue - } - key := sanitizeMetricName(name) - snap.DailySeries["usage_client_"+key] = core.SortedTimePoints(byDay) - snap.DailySeries["usage_source_"+key] = core.SortedTimePoints(byDay) - } -} - -func formatCountSummary(entries []countEntry, max int) string { - if len(entries) == 0 || max <= 0 { - return "" - } - total := 0 - for _, entry := range entries { - total += entry.count - } - if total <= 0 { - return "" - } - limit := max - if limit > len(entries) { - limit = len(entries) - } - parts := make([]string, 0, limit+1) - for i := 0; i < limit; i++ { - pct := float64(entries[i].count) / float64(total) * 100 - parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entries[i].name, shared.FormatTokenCount(entries[i].count), pct)) - } - if len(entries) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) - } - return strings.Join(parts, ", ") -} - -func emitBreakdownMetrics(prefix string, totals map[string]tokenUsage, daily map[string]map[string]float64, snap *core.UsageSnapshot) { - entries := sortUsageEntries(totals) - if len(entries) == 0 { - return - } - - for i, entry := range entries { - if i >= maxBreakdownMetrics { - break - } - keyPrefix := prefix + "_" + sanitizeMetricName(entry.Name) - setUsageMetric(snap, keyPrefix+"_total_tokens", float64(entry.Data.TotalTokens)) - setUsageMetric(snap, keyPrefix+"_input_tokens", float64(entry.Data.InputTokens)) - setUsageMetric(snap, keyPrefix+"_output_tokens", float64(entry.Data.OutputTokens)) - - if entry.Data.CachedInputTokens > 0 { - setUsageMetric(snap, keyPrefix+"_cached_tokens", float64(entry.Data.CachedInputTokens)) - } - if entry.Data.ReasoningOutputTokens > 0 { - setUsageMetric(snap, keyPrefix+"_reasoning_tokens", float64(entry.Data.ReasoningOutputTokens)) - } - - if byDay, ok := daily[entry.Name]; ok { - series := core.SortedTimePoints(byDay) - snap.DailySeries["tokens_"+prefix+"_"+sanitizeMetricName(entry.Name)] = series - snap.DailySeries["usage_"+prefix+"_"+sanitizeMetricName(entry.Name)] = series - } - - if prefix == "model" { - rec := core.ModelUsageRecord{ - RawModelID: entry.Name, - RawSource: "jsonl", - Window: defaultUsageWindowLabel, - InputTokens: core.Float64Ptr(float64(entry.Data.InputTokens)), - OutputTokens: core.Float64Ptr(float64(entry.Data.OutputTokens)), - TotalTokens: core.Float64Ptr(float64(entry.Data.TotalTokens)), - } - if entry.Data.CachedInputTokens > 0 { - rec.CachedTokens = core.Float64Ptr(float64(entry.Data.CachedInputTokens)) - } - if entry.Data.ReasoningOutputTokens > 0 { - rec.ReasoningTokens = core.Float64Ptr(float64(entry.Data.ReasoningOutputTokens)) - } - snap.AppendModelUsage(rec) - } - } - - rawKey := prefix + "_usage" - snap.Raw[rawKey] = formatUsageSummary(entries, maxBreakdownRaw) -} - -func emitClientSessionMetrics(clientSessions map[string]int, snap *core.UsageSnapshot) { - type entry struct { - name string - count int - } - var all []entry - for name, count := range clientSessions { - if count > 0 { - all = append(all, entry{name: name, count: count}) - } - } - sort.Slice(all, func(i, j int) bool { - if all[i].count == all[j].count { - return all[i].name < all[j].name - } - return all[i].count > all[j].count - }) - - for i, item := range all { - if i >= maxBreakdownMetrics { - break - } - value := float64(item.count) - snap.Metrics["client_"+sanitizeMetricName(item.name)+"_sessions"] = core.Metric{ - Used: &value, - Unit: "sessions", - Window: defaultUsageWindowLabel, - } - } -} - -func setUsageMetric(snap *core.UsageSnapshot, key string, value float64) { - if value <= 0 { - return - } - snap.Metrics[key] = core.Metric{ - Used: &value, - Unit: "tokens", - Window: defaultUsageWindowLabel, - } -} - -func addUsage(target map[string]tokenUsage, name string, delta tokenUsage) { - current := target[name] - current.InputTokens += delta.InputTokens - current.CachedInputTokens += delta.CachedInputTokens - current.OutputTokens += delta.OutputTokens - current.ReasoningOutputTokens += delta.ReasoningOutputTokens - current.TotalTokens += delta.TotalTokens - target[name] = current -} - -func addDailyUsage(target map[string]map[string]float64, name, day string, value float64) { - if day == "" || value <= 0 { - return - } - if target[name] == nil { - target[name] = make(map[string]float64) - } - target[name][day] += value -} - -func sortUsageEntries(values map[string]tokenUsage) []usageEntry { - out := make([]usageEntry, 0, len(values)) - for name, data := range values { - out = append(out, usageEntry{Name: name, Data: data}) - } - sort.Slice(out, func(i, j int) bool { - if out[i].Data.TotalTokens == out[j].Data.TotalTokens { - return out[i].Name < out[j].Name - } - return out[i].Data.TotalTokens > out[j].Data.TotalTokens - }) - return out -} - -func formatUsageSummary(entries []usageEntry, max int) string { - total := 0 - for _, entry := range entries { - total += entry.Data.TotalTokens - } - if total <= 0 { - return "" - } - - limit := max - if limit > len(entries) { - limit = len(entries) - } - - parts := make([]string, 0, limit+1) - for i := 0; i < limit; i++ { - entry := entries[i] - pct := float64(entry.Data.TotalTokens) / float64(total) * 100 - parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, shared.FormatTokenCount(entry.Data.TotalTokens), pct)) - } - - if len(entries) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) - } - return strings.Join(parts, ", ") -} - -func usageDelta(current, previous tokenUsage) tokenUsage { - return tokenUsage{ - InputTokens: current.InputTokens - previous.InputTokens, - CachedInputTokens: current.CachedInputTokens - previous.CachedInputTokens, - OutputTokens: current.OutputTokens - previous.OutputTokens, - ReasoningOutputTokens: current.ReasoningOutputTokens - previous.ReasoningOutputTokens, - TotalTokens: current.TotalTokens - previous.TotalTokens, - } -} - -func validUsageDelta(delta tokenUsage) bool { - return delta.InputTokens >= 0 && - delta.CachedInputTokens >= 0 && - delta.OutputTokens >= 0 && - delta.ReasoningOutputTokens >= 0 && - delta.TotalTokens >= 0 -} - -func normalizeModelName(name string) string { - name = strings.TrimSpace(name) - if name == "" { - return "unknown" - } - return name -} - -func classifyClient(source, originator string) string { - src := strings.ToLower(strings.TrimSpace(source)) - org := strings.ToLower(strings.TrimSpace(originator)) - - switch { - case src == "openusage" || src == "codex": - return "CLI" - case strings.Contains(org, "desktop"): - return "Desktop App" - case strings.Contains(org, "exec") || src == "exec": - return "Exec" - case strings.Contains(org, "cli") || src == "cli": - return "CLI" - case src == "vscode" || src == "ide": - return "IDE" - case src == "": - return "Other" - default: - return strings.ToUpper(src) - } -} - -func normalizeClientName(name string) string { - name = strings.TrimSpace(name) - if name == "" { - return "Other" - } - return name -} - -func sanitizeMetricName(name string) string { - name = strings.ToLower(strings.TrimSpace(name)) - if name == "" { - return "unknown" - } - - var b strings.Builder - lastUnderscore := false - for _, r := range name { - switch { - case r >= 'a' && r <= 'z': - b.WriteRune(r) - lastUnderscore = false - case r >= '0' && r <= '9': - b.WriteRune(r) - lastUnderscore = false - default: - if !lastUnderscore { - b.WriteByte('_') - lastUnderscore = true - } - } - } - - out := strings.Trim(b.String(), "_") - if out == "" { - return "unknown" - } - return out -} - -func dayFromTimestamp(timestamp string) string { - if timestamp == "" { - return "" - } - - for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} { - if parsed, err := time.Parse(layout, timestamp); err == nil { - return parsed.Format("2006-01-02") - } - } - - if len(timestamp) >= 10 { - candidate := timestamp[:10] - if _, err := time.Parse("2006-01-02", candidate); err == nil { - return candidate - } - } - return "" -} - -func dayFromSessionPath(path, sessionsDir string) string { - rel, err := filepath.Rel(sessionsDir, path) - if err != nil { - return "" - } - - parts := strings.Split(filepath.ToSlash(rel), "/") - if len(parts) < 3 { - return "" - } - - candidate := fmt.Sprintf("%s-%s-%s", parts[0], parts[1], parts[2]) - if _, err := time.Parse("2006-01-02", candidate); err != nil { - return "" - } - return candidate -} - func (p *Provider) applyRateLimitStatus(snap *core.UsageSnapshot) { if snap.Status == core.StatusAuth || snap.Status == core.StatusError || snap.Status == core.StatusUnknown || snap.Status == core.StatusUnsupported { return @@ -1799,109 +393,3 @@ func parseCurrencyValue(raw string) (float64, bool) { } return value, true } - -func findLatestSessionFile(sessionsDir string) (string, error) { - var files []string - - err := filepath.Walk(sessionsDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return nil // skip errors - } - if !info.IsDir() && strings.HasSuffix(path, ".jsonl") { - files = append(files, path) - } - return nil - }) - if err != nil { - return "", fmt.Errorf("walking sessions dir: %w", err) - } - - if len(files) == 0 { - return "", fmt.Errorf("no session files found in %s", sessionsDir) - } - - sort.Slice(files, func(i, j int) bool { - si, _ := os.Stat(files[i]) - sj, _ := os.Stat(files[j]) - if si == nil || sj == nil { - return false - } - return si.ModTime().After(sj.ModTime()) - }) - - return files[0], nil -} - -func findLastTokenCount(path string) (*eventPayload, error) { - var lastPayload *eventPayload - if err := walkSessionFile(path, func(record sessionLine) error { - if record.EventPayload == nil || record.EventPayload.Type != "token_count" { - return nil - } - payload := *record.EventPayload - lastPayload = &payload - return nil - }); err != nil { - return nil, err - } - return lastPayload, nil -} - -func (p *Provider) readDailySessionCounts(sessionsDir string, snap *core.UsageSnapshot) { - dayCounts := make(map[string]int) // "2025-01-15" → count - - _ = filepath.Walk(sessionsDir, func(path string, info os.FileInfo, err error) error { - if err != nil || info.IsDir() || !strings.HasSuffix(path, ".jsonl") { - return nil - } - rel, relErr := filepath.Rel(sessionsDir, path) - if relErr != nil { - return nil - } - parts := strings.Split(filepath.ToSlash(rel), "/") - if len(parts) >= 3 { - dateStr := fmt.Sprintf("%s-%s-%s", parts[0], parts[1], parts[2]) - if _, parseErr := time.Parse("2006-01-02", dateStr); parseErr == nil { - dayCounts[dateStr]++ - } - } - return nil - }) - - if len(dayCounts) == 0 { - return - } - - dates := lo.Keys(dayCounts) - sort.Strings(dates) - - for _, d := range dates { - snap.DailySeries["sessions"] = append(snap.DailySeries["sessions"], core.TimePoint{ - Date: d, - Value: float64(dayCounts[d]), - }) - } -} - -func formatWindow(minutes int) string { - if minutes <= 0 { - return "" - } - if minutes < 60 { - return fmt.Sprintf("%dm", minutes) - } - hours := minutes / 60 - remaining := minutes % 60 - if remaining == 0 { - if hours >= 24 { - days := hours / 24 - leftover := hours % 24 - if leftover == 0 { - return fmt.Sprintf("%dd", days) - } - return fmt.Sprintf("%dd%dh", days, leftover) - } - return fmt.Sprintf("%dh", hours) - } - return fmt.Sprintf("%dh%dm", hours, remaining) -} diff --git a/internal/providers/codex/live_usage.go b/internal/providers/codex/live_usage.go new file mode 100644 index 0000000..4af80f6 --- /dev/null +++ b/internal/providers/codex/live_usage.go @@ -0,0 +1,382 @@ +package codex + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +func (p *Provider) fetchLiveUsage(ctx context.Context, acct core.AccountConfig, configDir string, snap *core.UsageSnapshot) (bool, error) { + authPath := filepath.Join(configDir, "auth.json") + if acct.ExtraData != nil && acct.ExtraData["auth_file"] != "" { + authPath = acct.ExtraData["auth_file"] + } + + data, err := os.ReadFile(authPath) + if err != nil { + return false, nil + } + + var auth authFile + if err := json.Unmarshal(data, &auth); err != nil { + return false, nil + } + + if strings.TrimSpace(auth.Tokens.AccessToken) == "" { + return false, nil + } + + baseURL := resolveChatGPTBaseURL(acct, configDir) + usageURL := usageURLForBase(baseURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, usageURL, nil) + if err != nil { + return false, fmt.Errorf("codex: creating live usage request: %w", err) + } + req.Header.Set("Authorization", "Bearer "+auth.Tokens.AccessToken) + req.Header.Set("Accept", "application/json") + + accountID := core.FirstNonEmpty(auth.Tokens.AccountID, auth.AccountID) + if accountID == "" && acct.ExtraData != nil { + accountID = acct.ExtraData["account_id"] + } + if accountID != "" { + req.Header.Set("ChatGPT-Account-Id", accountID) + } + + if cliVersion := snap.Raw["cli_version"]; cliVersion != "" { + req.Header.Set("User-Agent", "codex-cli/"+cliVersion) + } else { + req.Header.Set("User-Agent", "codex-cli") + } + + resp, err := p.Client().Do(req) + if err != nil { + return false, fmt.Errorf("codex: live usage request failed: %w", err) + } + defer resp.Body.Close() + + body, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden { + return false, fmt.Errorf("%w: HTTP %d", errLiveUsageAuth, resp.StatusCode) + } + if resp.StatusCode != http.StatusOK { + return false, fmt.Errorf("codex: live usage HTTP %d: %s", resp.StatusCode, truncateForError(string(body), maxHTTPErrorBodySize)) + } + + var payload usagePayload + if err := json.Unmarshal(body, &payload); err != nil { + return false, fmt.Errorf("codex: parsing live usage response: %w", err) + } + + summary := applyUsagePayload(&payload, snap) + if summary.limitMetricsApplied > 0 { + snap.Raw["rate_limit_source"] = "live" + } else { + clearRateLimitMetrics(snap) + snap.Raw["rate_limit_source"] = "live_unavailable" + snap.Raw["rate_limit_warning"] = "live usage payload did not include limit windows" + } + snap.Raw["quota_api"] = "live" + return true, nil +} + +func applyUsagePayload(payload *usagePayload, snap *core.UsageSnapshot) usageApplySummary { + var summary usageApplySummary + if payload == nil { + return summary + } + + if payload.Email != "" { + snap.Raw["account_email"] = payload.Email + } + if payload.AccountID != "" { + snap.Raw["account_id"] = payload.AccountID + } + if payload.PlanType != "" { + snap.Raw["plan_type"] = payload.PlanType + } + + summary.limitMetricsApplied += applyUsageLimitDetails(payload.RateLimit, "rate_limit_primary", "rate_limit_secondary", snap) + summary.limitMetricsApplied += applyUsageLimitDetails(payload.CodeReviewRateLimit, "rate_limit_code_review_primary", "rate_limit_code_review_secondary", snap) + summary.limitMetricsApplied += applyUsageAdditionalLimits(payload.AdditionalRateLimits, snap) + + if payload.RateLimitStatus != nil { + status := payload.RateLimitStatus + if payload.PlanType == "" && status.PlanType != "" { + snap.Raw["plan_type"] = status.PlanType + } + summary.limitMetricsApplied += applyUsageLimitDetails(status.RateLimit, "rate_limit_primary", "rate_limit_secondary", snap) + summary.limitMetricsApplied += applyUsageLimitDetails(status.CodeReviewRateLimit, "rate_limit_code_review_primary", "rate_limit_code_review_secondary", snap) + summary.limitMetricsApplied += applyUsageAdditionalLimits(status.AdditionalRateLimits, snap) + if payload.Credits == nil { + applyUsageCredits(status.Credits, snap) + } + } + + applyUsageCredits(payload.Credits, snap) + return summary +} + +func applyUsageAdditionalLimits(additional []usageAdditionalLimit, snap *core.UsageSnapshot) int { + applied := 0 + for _, extra := range additional { + limitID := sanitizeMetricName(core.FirstNonEmpty(extra.MeteredFeature, extra.LimitName)) + if limitID == "" || limitID == "codex" { + continue + } + + primaryKey := "rate_limit_" + limitID + "_primary" + secondaryKey := "rate_limit_" + limitID + "_secondary" + applied += applyUsageLimitDetails(extra.RateLimit, primaryKey, secondaryKey, snap) + if extra.LimitName != "" { + snap.Raw["rate_limit_"+limitID+"_name"] = extra.LimitName + } + } + return applied +} + +func applyUsageCredits(credits *usageCredits, snap *core.UsageSnapshot) { + if credits == nil { + return + } + + switch { + case credits.Unlimited: + snap.Raw["credits"] = "unlimited" + case credits.HasCredits: + snap.Raw["credits"] = "available" + if formatted := formatCreditsBalance(credits.Balance); formatted != "" { + snap.Raw["credit_balance"] = formatted + } + default: + snap.Raw["credits"] = "none" + } +} + +func formatCreditsBalance(balance any) string { + switch v := balance.(type) { + case nil: + return "" + case string: + if strings.TrimSpace(v) == "" { + return "" + } + if f, err := strconv.ParseFloat(v, 64); err == nil { + return fmt.Sprintf("$%.2f", f) + } + return v + case float64: + return fmt.Sprintf("$%.2f", v) + case json.Number: + if f, err := v.Float64(); err == nil { + return fmt.Sprintf("$%.2f", f) + } + } + return "" +} + +func applyUsageLimitDetails(details *usageLimitDetails, primaryKey, secondaryKey string, snap *core.UsageSnapshot) int { + if details == nil { + return 0 + } + applied := 0 + primary := details.PrimaryWindow + if primary == nil { + primary = details.Primary + } + secondary := details.SecondaryWindow + if secondary == nil { + secondary = details.Secondary + } + if applyUsageWindowMetric(primary, primaryKey, snap) { + applied++ + } + if applyUsageWindowMetric(secondary, secondaryKey, snap) { + applied++ + } + return applied +} + +func applyUsageWindowMetric(window *usageWindowInfo, key string, snap *core.UsageSnapshot) bool { + if window == nil || key == "" { + return false + } + + used, ok := resolveWindowUsedPercent(window) + if !ok { + return false + } + + limit := float64(100) + remaining := 100 - used + windowLabel := formatWindow(resolveWindowMinutes(window)) + + snap.Metrics[key] = core.Metric{ + Limit: &limit, + Used: &used, + Remaining: &remaining, + Unit: "%", + Window: windowLabel, + } + + if resetAt := resolveWindowResetAt(window); resetAt > 0 { + snap.Resets[key] = time.Unix(resetAt, 0) + } + return true +} + +func resolveWindowUsedPercent(window *usageWindowInfo) (float64, bool) { + if window == nil { + return 0, false + } + if window.UsedPercent != nil { + return clampPercent(*window.UsedPercent), true + } + if window.RemainingPercent != nil { + return clampPercent(100 - *window.RemainingPercent), true + } + return 0, false +} + +func resolveWindowMinutes(window *usageWindowInfo) int { + if window == nil { + return 0 + } + if window.LimitWindowSeconds > 0 { + return secondsToMinutes(window.LimitWindowSeconds) + } + if window.WindowMinutes > 0 { + return window.WindowMinutes + } + return 0 +} + +func resolveWindowResetAt(window *usageWindowInfo) int64 { + if window == nil { + return 0 + } + switch { + case window.ResetAt > 0: + return window.ResetAt + case window.ResetsAt > 0: + return window.ResetsAt + case window.ResetAfterSeconds > 0: + return time.Now().UTC().Add(time.Duration(window.ResetAfterSeconds) * time.Second).Unix() + default: + return 0 + } +} + +func clearRateLimitMetrics(snap *core.UsageSnapshot) { + for key := range snap.Metrics { + if strings.HasPrefix(key, "rate_limit_") { + delete(snap.Metrics, key) + } + } + for key := range snap.Resets { + if strings.HasPrefix(key, "rate_limit_") { + delete(snap.Resets, key) + } + } +} + +func clampPercent(v float64) float64 { + if v < 0 { + return 0 + } + if v > 100 { + return 100 + } + return v +} + +func secondsToMinutes(seconds int) int { + if seconds <= 0 { + return 0 + } + return (seconds + 59) / 60 +} + +func resolveChatGPTBaseURL(acct core.AccountConfig, configDir string) string { + switch { + case strings.TrimSpace(acct.BaseURL) != "": + return normalizeChatGPTBaseURL(acct.BaseURL) + case acct.ExtraData != nil && strings.TrimSpace(acct.ExtraData["chatgpt_base_url"]) != "": + return normalizeChatGPTBaseURL(acct.ExtraData["chatgpt_base_url"]) + default: + if fromConfig := readChatGPTBaseURLFromConfig(configDir); fromConfig != "" { + return normalizeChatGPTBaseURL(fromConfig) + } + } + return normalizeChatGPTBaseURL(defaultChatGPTBaseURL) +} + +func readChatGPTBaseURLFromConfig(configDir string) string { + if strings.TrimSpace(configDir) == "" { + return "" + } + + configPath := filepath.Join(configDir, "config.toml") + data, err := os.ReadFile(configPath) + if err != nil { + return "" + } + + scanner := bufio.NewScanner(strings.NewReader(string(data))) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" || strings.HasPrefix(line, "#") || !strings.Contains(line, "=") { + continue + } + if !strings.HasPrefix(line, "chatgpt_base_url") { + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + continue + } + val := strings.TrimSpace(parts[1]) + val = strings.Trim(val, "\"'") + if val != "" { + return val + } + } + + return "" +} + +func normalizeChatGPTBaseURL(baseURL string) string { + baseURL = strings.TrimSpace(baseURL) + baseURL = strings.TrimRight(baseURL, "/") + if baseURL == "" { + return defaultChatGPTBaseURL + } + if (strings.HasPrefix(baseURL, "https://chatgpt.com") || strings.HasPrefix(baseURL, "https://chat.openai.com")) && + !strings.Contains(baseURL, "/backend-api") { + baseURL += "/backend-api" + } + return baseURL +} + +func usageURLForBase(baseURL string) string { + if strings.Contains(baseURL, "/backend-api") { + return baseURL + "/wham/usage" + } + return baseURL + "/api/codex/usage" +} + +func truncateForError(value string, max int) string { + return shared.Truncate(strings.TrimSpace(value), max) +} diff --git a/internal/providers/codex/session_usage.go b/internal/providers/codex/session_usage.go new file mode 100644 index 0000000..17817cc --- /dev/null +++ b/internal/providers/codex/session_usage.go @@ -0,0 +1,1083 @@ +package codex + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" + "github.com/samber/lo" +) + +func (p *Provider) readLatestSession(sessionsDir string, snap *core.UsageSnapshot) error { + latestFile, err := findLatestSessionFile(sessionsDir) + if err != nil { + return fmt.Errorf("finding latest session: %w", err) + } + + snap.Raw["latest_session_file"] = filepath.Base(latestFile) + + lastPayload, err := findLastTokenCount(latestFile) + if err != nil { + return fmt.Errorf("reading session: %w", err) + } + + if lastPayload == nil { + return fmt.Errorf("no token_count events in latest session") + } + + if lastPayload.Info != nil { + info := lastPayload.Info + total := info.TotalTokenUsage + + inputTokens := float64(total.InputTokens) + snap.Metrics["session_input_tokens"] = core.Metric{Used: &inputTokens, Unit: "tokens", Window: "session"} + + outputTokens := float64(total.OutputTokens) + snap.Metrics["session_output_tokens"] = core.Metric{Used: &outputTokens, Unit: "tokens", Window: "session"} + + cachedTokens := float64(total.CachedInputTokens) + snap.Metrics["session_cached_tokens"] = core.Metric{Used: &cachedTokens, Unit: "tokens", Window: "session"} + + if total.ReasoningOutputTokens > 0 { + reasoning := float64(total.ReasoningOutputTokens) + snap.Metrics["session_reasoning_tokens"] = core.Metric{Used: &reasoning, Unit: "tokens", Window: "session"} + } + + totalTokens := float64(total.TotalTokens) + snap.Metrics["session_total_tokens"] = core.Metric{Used: &totalTokens, Unit: "tokens", Window: "session"} + + if info.ModelContextWindow > 0 { + ctxWindow := float64(info.ModelContextWindow) + ctxUsed := float64(total.InputTokens) + snap.Metrics["context_window"] = core.Metric{Limit: &ctxWindow, Used: &ctxUsed, Unit: "tokens"} + } + } + + if lastPayload.RateLimits != nil { + rl := lastPayload.RateLimits + rateLimitSet := false + + if rl.Primary != nil { + limit := float64(100) + used := rl.Primary.UsedPercent + remaining := 100 - used + windowStr := formatWindow(rl.Primary.WindowMinutes) + snap.Metrics["rate_limit_primary"] = core.Metric{Limit: &limit, Used: &used, Remaining: &remaining, Unit: "%", Window: windowStr} + + if rl.Primary.ResetsAt > 0 { + snap.Resets["rate_limit_primary"] = time.Unix(rl.Primary.ResetsAt, 0) + } + rateLimitSet = true + } + + if rl.Secondary != nil { + limit := float64(100) + used := rl.Secondary.UsedPercent + remaining := 100 - used + windowStr := formatWindow(rl.Secondary.WindowMinutes) + snap.Metrics["rate_limit_secondary"] = core.Metric{Limit: &limit, Used: &used, Remaining: &remaining, Unit: "%", Window: windowStr} + + if rl.Secondary.ResetsAt > 0 { + snap.Resets["rate_limit_secondary"] = time.Unix(rl.Secondary.ResetsAt, 0) + } + rateLimitSet = true + } + + if rl.Credits != nil { + if rl.Credits.Unlimited { + snap.Raw["credits"] = "unlimited" + } else if rl.Credits.HasCredits { + snap.Raw["credits"] = "available" + if rl.Credits.Balance != nil { + snap.Raw["credit_balance"] = fmt.Sprintf("$%.2f", *rl.Credits.Balance) + } + } else { + snap.Raw["credits"] = "none" + } + } + + if rl.PlanType != nil { + snap.Raw["plan_type"] = *rl.PlanType + } + if rateLimitSet && snap.Raw["rate_limit_source"] == "" { + snap.Raw["rate_limit_source"] = "session" + } + } + + return nil +} + +func (p *Provider) readSessionUsageBreakdowns(sessionsDir string, snap *core.UsageSnapshot) error { + modelTotals := make(map[string]tokenUsage) + clientTotals := make(map[string]tokenUsage) + modelDaily := make(map[string]map[string]float64) + clientDaily := make(map[string]map[string]float64) + interfaceDaily := make(map[string]map[string]float64) + dailyTokenTotals := make(map[string]float64) + dailyRequestTotals := make(map[string]float64) + clientSessions := make(map[string]int) + clientRequests := make(map[string]int) + toolCalls := make(map[string]int) + langRequests := make(map[string]int) + callTool := make(map[string]string) + callOutcome := make(map[string]int) + stats := patchStats{ + Files: make(map[string]struct{}), + Deleted: make(map[string]struct{}), + } + today := time.Now().UTC().Format("2006-01-02") + totalRequests := 0 + requestsToday := 0 + promptCount := 0 + commits := 0 + completedWithoutCallID := 0 + + walkErr := filepath.Walk(sessionsDir, func(path string, info os.FileInfo, err error) error { + if err != nil || info == nil || info.IsDir() || !strings.HasSuffix(path, ".jsonl") { + return nil + } + + defaultDay := dayFromSessionPath(path, sessionsDir) + sessionClient := "Other" + currentModel := "unknown" + var previous tokenUsage + var hasPrevious bool + var countedSession bool + return walkSessionFile(path, func(record sessionLine) error { + switch { + case record.SessionMeta != nil: + sessionClient = classifyClient(record.SessionMeta.Source, record.SessionMeta.Originator) + if record.SessionMeta.Model != "" { + currentModel = record.SessionMeta.Model + } + case record.TurnContext != nil: + if strings.TrimSpace(record.TurnContext.Model) != "" { + currentModel = record.TurnContext.Model + } + case record.EventPayload != nil: + payload := record.EventPayload + if payload.Type == "user_message" { + promptCount++ + return nil + } + if payload.Type != "token_count" || payload.Info == nil { + return nil + } + + total := payload.Info.TotalTokenUsage + delta := total + if hasPrevious { + delta = usageDelta(total, previous) + if !validUsageDelta(delta) { + delta = total + } + } + previous = total + hasPrevious = true + + if delta.TotalTokens <= 0 { + return nil + } + + modelName := normalizeModelName(currentModel) + clientName := normalizeClientName(sessionClient) + day := dayFromTimestamp(record.Timestamp) + if day == "" { + day = defaultDay + } + + addUsage(modelTotals, modelName, delta) + addUsage(clientTotals, clientName, delta) + addDailyUsage(modelDaily, modelName, day, float64(delta.TotalTokens)) + addDailyUsage(clientDaily, clientName, day, float64(delta.TotalTokens)) + addDailyUsage(interfaceDaily, clientInterfaceBucket(clientName), day, 1) + dailyTokenTotals[day] += float64(delta.TotalTokens) + dailyRequestTotals[day]++ + clientRequests[clientName]++ + totalRequests++ + if day == today { + requestsToday++ + } + + if !countedSession { + clientSessions[clientName]++ + countedSession = true + } + case record.ResponseItem != nil: + item := record.ResponseItem + switch item.Type { + case "function_call": + tool := normalizeToolName(item.Name) + recordToolCall(toolCalls, callTool, item.CallID, tool) + if strings.EqualFold(tool, "exec_command") { + var args commandArgs + if json.Unmarshal(item.Arguments, &args) == nil { + recordCommandLanguage(args.Cmd, langRequests) + if commandContainsGitCommit(args.Cmd) { + commits++ + } + } + } + case "custom_tool_call": + tool := normalizeToolName(item.Name) + recordToolCall(toolCalls, callTool, item.CallID, tool) + if strings.EqualFold(tool, "apply_patch") { + stats.PatchCalls++ + accumulatePatchStats(item.Input, &stats, langRequests) + } + case "web_search_call": + recordToolCall(toolCalls, callTool, "", "web_search") + completedWithoutCallID++ + case "function_call_output", "custom_tool_call_output": + setToolCallOutcome(item.CallID, item.Output, callOutcome) + } + } + + return nil + }) + }) + if walkErr != nil { + return fmt.Errorf("walking session files: %w", walkErr) + } + + emitBreakdownMetrics("model", modelTotals, modelDaily, snap) + emitBreakdownMetrics("client", clientTotals, clientDaily, snap) + emitClientSessionMetrics(clientSessions, snap) + emitClientRequestMetrics(clientRequests, snap) + emitToolMetrics(toolCalls, callTool, callOutcome, completedWithoutCallID, snap) + emitLanguageMetrics(langRequests, snap) + emitProductivityMetrics(stats, promptCount, commits, totalRequests, requestsToday, clientSessions, snap) + emitDailyUsageSeries(dailyTokenTotals, dailyRequestTotals, interfaceDaily, snap) + + return nil +} + +func recordToolCall(toolCalls map[string]int, callTool map[string]string, callID, tool string) { + tool = normalizeToolName(tool) + toolCalls[tool]++ + if strings.TrimSpace(callID) != "" { + callTool[callID] = tool + } +} + +func normalizeToolName(tool string) string { + tool = strings.TrimSpace(tool) + if tool == "" { + return "unknown" + } + return tool +} + +func setToolCallOutcome(callID, output string, outcomes map[string]int) { + callID = strings.TrimSpace(callID) + if callID == "" { + return + } + outcomes[callID] = inferToolCallOutcome(output) +} + +func inferToolCallOutcome(output string) int { + lower := strings.ToLower(strings.TrimSpace(output)) + if lower == "" { + return 1 + } + if strings.Contains(lower, `"exit_code":0`) || strings.Contains(lower, "process exited with code 0") { + return 1 + } + if strings.Contains(lower, "cancelled") || strings.Contains(lower, "canceled") || strings.Contains(lower, "aborted") { + return 3 + } + if idx := strings.Index(lower, "process exited with code "); idx >= 0 { + rest := lower[idx+len("process exited with code "):] + n := 0 + for _, r := range rest { + if r < '0' || r > '9' { + break + } + n = n*10 + int(r-'0') + } + if n == 0 { + return 1 + } + return 2 + } + if idx := strings.Index(lower, "exit code "); idx >= 0 { + rest := lower[idx+len("exit code "):] + n := 0 + foundDigit := false + for _, r := range rest { + if r < '0' || r > '9' { + if foundDigit { + break + } + continue + } + foundDigit = true + n = n*10 + int(r-'0') + } + if !foundDigit || n == 0 { + return 1 + } + return 2 + } + if strings.Contains(lower, `"exit_code":`) && !strings.Contains(lower, `"exit_code":0`) { + return 2 + } + if strings.Contains(lower, "error") || strings.Contains(lower, "failed") { + return 2 + } + return 1 +} + +func recordCommandLanguage(cmd string, langs map[string]int) { + if language := detectCommandLanguage(cmd); language != "" { + langs[language]++ + } +} + +func detectCommandLanguage(cmd string) string { + trimmed := strings.TrimSpace(strings.ToLower(cmd)) + if trimmed == "" { + return "" + } + switch { + case strings.Contains(trimmed, " go ") || strings.HasPrefix(trimmed, "go ") || strings.Contains(trimmed, "gofmt ") || strings.Contains(trimmed, "golangci-lint"): + return "go" + case strings.Contains(trimmed, " terraform ") || strings.HasPrefix(trimmed, "terraform "): + return "terraform" + case strings.Contains(trimmed, " python ") || strings.HasPrefix(trimmed, "python ") || strings.HasPrefix(trimmed, "python3 "): + return "python" + case strings.Contains(trimmed, " npm ") || strings.HasPrefix(trimmed, "npm ") || strings.Contains(trimmed, " yarn ") || strings.HasPrefix(trimmed, "pnpm ") || strings.Contains(trimmed, " node "): + return "ts" + case strings.Contains(trimmed, " cargo ") || strings.HasPrefix(trimmed, "cargo ") || strings.Contains(trimmed, " rustc "): + return "rust" + case strings.Contains(trimmed, " java ") || strings.HasPrefix(trimmed, "java ") || strings.Contains(trimmed, " gradle ") || strings.Contains(trimmed, " mvn "): + return "java" + case strings.Contains(trimmed, ".log"): + return "log" + case strings.Contains(trimmed, ".txt"): + return "txt" + default: + return "shell" + } +} + +func commandContainsGitCommit(cmd string) bool { + normalized := " " + strings.ToLower(cmd) + " " + return strings.Contains(normalized, " git commit ") +} + +func accumulatePatchStats(input string, stats *patchStats, langs map[string]int) { + if stats == nil { + return + } + lines := strings.Split(input, "\n") + for _, line := range lines { + switch { + case strings.HasPrefix(line, "*** Update File: "): + path := strings.TrimSpace(strings.TrimPrefix(line, "*** Update File: ")) + if path != "" { + stats.Files[path] = struct{}{} + if language := languageFromPath(path); language != "" { + langs[language]++ + } + } + case strings.HasPrefix(line, "*** Add File: "): + path := strings.TrimSpace(strings.TrimPrefix(line, "*** Add File: ")) + if path != "" { + stats.Files[path] = struct{}{} + if language := languageFromPath(path); language != "" { + langs[language]++ + } + } + case strings.HasPrefix(line, "*** Delete File: "): + path := strings.TrimSpace(strings.TrimPrefix(line, "*** Delete File: ")) + if path != "" { + stats.Files[path] = struct{}{} + stats.Deleted[path] = struct{}{} + if language := languageFromPath(path); language != "" { + langs[language]++ + } + } + case strings.HasPrefix(line, "*** Move to: "): + path := strings.TrimSpace(strings.TrimPrefix(line, "*** Move to: ")) + if path != "" { + stats.Files[path] = struct{}{} + if language := languageFromPath(path); language != "" { + langs[language]++ + } + } + case strings.HasPrefix(line, "+++ "), strings.HasPrefix(line, "--- "), strings.HasPrefix(line, "***"): + continue + case strings.HasPrefix(line, "+"): + stats.Added++ + case strings.HasPrefix(line, "-"): + stats.Removed++ + } + } +} + +func languageFromPath(path string) string { + lower := strings.ToLower(strings.TrimSpace(path)) + switch { + case strings.HasSuffix(lower, ".go"): + return "go" + case strings.HasSuffix(lower, ".tf"): + return "terraform" + case strings.HasSuffix(lower, ".ts"), strings.HasSuffix(lower, ".tsx"), strings.HasSuffix(lower, ".js"), strings.HasSuffix(lower, ".jsx"): + return "ts" + case strings.HasSuffix(lower, ".py"): + return "python" + case strings.HasSuffix(lower, ".rs"): + return "rust" + case strings.HasSuffix(lower, ".java"): + return "java" + case strings.HasSuffix(lower, ".yaml"), strings.HasSuffix(lower, ".yml"): + return "yaml" + case strings.HasSuffix(lower, ".json"): + return "json" + case strings.HasSuffix(lower, ".md"): + return "md" + case strings.HasSuffix(lower, ".tpl"): + return "tpl" + case strings.HasSuffix(lower, ".txt"): + return "txt" + case strings.HasSuffix(lower, ".log"): + return "log" + case strings.HasSuffix(lower, ".sh"), strings.HasSuffix(lower, ".zsh"), strings.HasSuffix(lower, ".bash"): + return "shell" + default: + return "" + } +} + +func emitClientRequestMetrics(clientRequests map[string]int, snap *core.UsageSnapshot) { + type entry struct { + name string + count int + } + var all []entry + interfaceTotals := make(map[string]float64) + for name, count := range clientRequests { + if count > 0 { + all = append(all, entry{name: name, count: count}) + interfaceTotals[clientInterfaceBucket(name)] += float64(count) + } + } + sort.Slice(all, func(i, j int) bool { + if all[i].count == all[j].count { + return all[i].name < all[j].name + } + return all[i].count > all[j].count + }) + for i, item := range all { + if i >= maxBreakdownMetrics { + break + } + value := float64(item.count) + snap.Metrics["client_"+sanitizeMetricName(item.name)+"_requests"] = core.Metric{Used: &value, Unit: "requests", Window: defaultUsageWindowLabel} + } + for bucket, value := range interfaceTotals { + v := value + snap.Metrics["interface_"+sanitizeMetricName(bucket)] = core.Metric{Used: &v, Unit: "requests", Window: defaultUsageWindowLabel} + } +} + +func clientInterfaceBucket(name string) string { + lower := strings.ToLower(strings.TrimSpace(name)) + switch { + case strings.Contains(lower, "desktop"): + return "desktop_app" + case strings.Contains(lower, "cli"), strings.Contains(lower, "exec"), strings.Contains(lower, "terminal"): + return "cli_agents" + case strings.Contains(lower, "ide"), strings.Contains(lower, "vscode"), strings.Contains(lower, "editor"): + return "ide" + case strings.Contains(lower, "cloud"), strings.Contains(lower, "web"): + return "cloud_agents" + case strings.Contains(lower, "human"), strings.Contains(lower, "other"): + return "human" + default: + return sanitizeMetricName(name) + } +} + +func emitToolMetrics(toolCalls map[string]int, callTool map[string]string, callOutcome map[string]int, completedWithoutCallID int, snap *core.UsageSnapshot) { + var all []countEntry + totalCalls := 0 + for name, count := range toolCalls { + if count <= 0 { + continue + } + all = append(all, countEntry{name: name, count: count}) + totalCalls += count + v := float64(count) + snap.Metrics["tool_"+sanitizeMetricName(name)] = core.Metric{Used: &v, Unit: "calls", Window: defaultUsageWindowLabel} + } + if totalCalls <= 0 { + return + } + + sort.Slice(all, func(i, j int) bool { + if all[i].count == all[j].count { + return all[i].name < all[j].name + } + return all[i].count > all[j].count + }) + + completed := completedWithoutCallID + errored := 0 + cancelled := 0 + for callID := range callTool { + switch callOutcome[callID] { + case 2: + errored++ + case 3: + cancelled++ + default: + completed++ + } + } + if completed+errored+cancelled < totalCalls { + completed += totalCalls - (completed + errored + cancelled) + } + + totalV := float64(totalCalls) + snap.Metrics["tool_calls_total"] = core.Metric{Used: &totalV, Unit: "calls", Window: defaultUsageWindowLabel} + if completed > 0 { + v := float64(completed) + snap.Metrics["tool_completed"] = core.Metric{Used: &v, Unit: "calls", Window: defaultUsageWindowLabel} + } + if errored > 0 { + v := float64(errored) + snap.Metrics["tool_errored"] = core.Metric{Used: &v, Unit: "calls", Window: defaultUsageWindowLabel} + } + if cancelled > 0 { + v := float64(cancelled) + snap.Metrics["tool_cancelled"] = core.Metric{Used: &v, Unit: "calls", Window: defaultUsageWindowLabel} + } + if totalCalls > 0 { + success := float64(completed) / float64(totalCalls) * 100 + snap.Metrics["tool_success_rate"] = core.Metric{Used: &success, Unit: "%", Window: defaultUsageWindowLabel} + } + snap.Raw["tool_usage"] = formatCountSummary(all, maxBreakdownRaw) +} + +func emitLanguageMetrics(langRequests map[string]int, snap *core.UsageSnapshot) { + var all []countEntry + for language, count := range langRequests { + if count <= 0 { + continue + } + all = append(all, countEntry{name: language, count: count}) + v := float64(count) + snap.Metrics["lang_"+sanitizeMetricName(language)] = core.Metric{Used: &v, Unit: "requests", Window: defaultUsageWindowLabel} + } + if len(all) == 0 { + return + } + sort.Slice(all, func(i, j int) bool { + if all[i].count == all[j].count { + return all[i].name < all[j].name + } + return all[i].count > all[j].count + }) + snap.Raw["language_usage"] = formatCountSummary(all, maxBreakdownRaw) +} + +func emitProductivityMetrics(stats patchStats, promptCount, commits, totalRequests, requestsToday int, clientSessions map[string]int, snap *core.UsageSnapshot) { + if totalRequests > 0 { + v := float64(totalRequests) + snap.Metrics["total_ai_requests"] = core.Metric{Used: &v, Unit: "requests", Window: defaultUsageWindowLabel} + snap.Metrics["composer_requests"] = core.Metric{Used: &v, Unit: "requests", Window: defaultUsageWindowLabel} + } + if requestsToday > 0 { + v := float64(requestsToday) + snap.Metrics["requests_today"] = core.Metric{Used: &v, Unit: "requests", Window: "today"} + snap.Metrics["today_composer_requests"] = core.Metric{Used: &v, Unit: "requests", Window: "today"} + } + + totalSessions := 0 + for _, count := range clientSessions { + totalSessions += count + } + if totalSessions > 0 { + v := float64(totalSessions) + snap.Metrics["composer_sessions"] = core.Metric{Used: &v, Unit: "sessions", Window: defaultUsageWindowLabel} + } + + if metric, ok := snap.Metrics["context_window"]; ok && metric.Used != nil && metric.Limit != nil && *metric.Limit > 0 { + pct := *metric.Used / *metric.Limit * 100 + if pct < 0 { + pct = 0 + } + if pct > 100 { + pct = 100 + } + snap.Metrics["composer_context_pct"] = core.Metric{Used: &pct, Unit: "%", Window: metric.Window} + } + + if stats.Added > 0 { + v := float64(stats.Added) + snap.Metrics["composer_lines_added"] = core.Metric{Used: &v, Unit: "lines", Window: defaultUsageWindowLabel} + } + if stats.Removed > 0 { + v := float64(stats.Removed) + snap.Metrics["composer_lines_removed"] = core.Metric{Used: &v, Unit: "lines", Window: defaultUsageWindowLabel} + } + if filesChanged := len(stats.Files); filesChanged > 0 { + v := float64(filesChanged) + snap.Metrics["composer_files_changed"] = core.Metric{Used: &v, Unit: "files", Window: defaultUsageWindowLabel} + snap.Metrics["ai_tracked_files"] = core.Metric{Used: &v, Unit: "files", Window: defaultUsageWindowLabel} + } + if deleted := len(stats.Deleted); deleted > 0 { + v := float64(deleted) + snap.Metrics["ai_deleted_files"] = core.Metric{Used: &v, Unit: "files", Window: defaultUsageWindowLabel} + } + if commits > 0 { + v := float64(commits) + snap.Metrics["scored_commits"] = core.Metric{Used: &v, Unit: "commits", Window: defaultUsageWindowLabel} + } + if promptCount > 0 { + v := float64(promptCount) + snap.Metrics["total_prompts"] = core.Metric{Used: &v, Unit: "prompts", Window: defaultUsageWindowLabel} + } + if stats.PatchCalls > 0 { + base := totalRequests + if base < stats.PatchCalls { + base = stats.PatchCalls + } + if base > 0 { + pct := float64(stats.PatchCalls) / float64(base) * 100 + snap.Metrics["ai_code_percentage"] = core.Metric{Used: &pct, Unit: "%", Window: defaultUsageWindowLabel} + } + } +} + +func emitDailyUsageSeries(dailyTokenTotals, dailyRequestTotals map[string]float64, interfaceDaily map[string]map[string]float64, snap *core.UsageSnapshot) { + if len(dailyTokenTotals) > 0 { + points := core.SortedTimePoints(dailyTokenTotals) + snap.DailySeries["analytics_tokens"] = points + snap.DailySeries["tokens_total"] = points + } + if len(dailyRequestTotals) > 0 { + points := core.SortedTimePoints(dailyRequestTotals) + snap.DailySeries["analytics_requests"] = points + snap.DailySeries["requests"] = points + } + for name, byDay := range interfaceDaily { + if len(byDay) == 0 { + continue + } + key := sanitizeMetricName(name) + snap.DailySeries["usage_client_"+key] = core.SortedTimePoints(byDay) + snap.DailySeries["usage_source_"+key] = core.SortedTimePoints(byDay) + } +} + +func formatCountSummary(entries []countEntry, max int) string { + if len(entries) == 0 || max <= 0 { + return "" + } + total := 0 + for _, entry := range entries { + total += entry.count + } + if total <= 0 { + return "" + } + limit := max + if limit > len(entries) { + limit = len(entries) + } + parts := make([]string, 0, limit+1) + for i := 0; i < limit; i++ { + pct := float64(entries[i].count) / float64(total) * 100 + parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entries[i].name, shared.FormatTokenCount(entries[i].count), pct)) + } + if len(entries) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) + } + return strings.Join(parts, ", ") +} + +func emitBreakdownMetrics(prefix string, totals map[string]tokenUsage, daily map[string]map[string]float64, snap *core.UsageSnapshot) { + entries := sortUsageEntries(totals) + if len(entries) == 0 { + return + } + + for i, entry := range entries { + if i >= maxBreakdownMetrics { + break + } + keyPrefix := prefix + "_" + sanitizeMetricName(entry.Name) + setUsageMetric(snap, keyPrefix+"_total_tokens", float64(entry.Data.TotalTokens)) + setUsageMetric(snap, keyPrefix+"_input_tokens", float64(entry.Data.InputTokens)) + setUsageMetric(snap, keyPrefix+"_output_tokens", float64(entry.Data.OutputTokens)) + + if entry.Data.CachedInputTokens > 0 { + setUsageMetric(snap, keyPrefix+"_cached_tokens", float64(entry.Data.CachedInputTokens)) + } + if entry.Data.ReasoningOutputTokens > 0 { + setUsageMetric(snap, keyPrefix+"_reasoning_tokens", float64(entry.Data.ReasoningOutputTokens)) + } + + if byDay, ok := daily[entry.Name]; ok { + series := core.SortedTimePoints(byDay) + snap.DailySeries["tokens_"+prefix+"_"+sanitizeMetricName(entry.Name)] = series + snap.DailySeries["usage_"+prefix+"_"+sanitizeMetricName(entry.Name)] = series + } + + if prefix == "model" { + rec := core.ModelUsageRecord{ + RawModelID: entry.Name, + RawSource: "jsonl", + Window: defaultUsageWindowLabel, + InputTokens: core.Float64Ptr(float64(entry.Data.InputTokens)), + OutputTokens: core.Float64Ptr(float64(entry.Data.OutputTokens)), + TotalTokens: core.Float64Ptr(float64(entry.Data.TotalTokens)), + } + if entry.Data.CachedInputTokens > 0 { + rec.CachedTokens = core.Float64Ptr(float64(entry.Data.CachedInputTokens)) + } + if entry.Data.ReasoningOutputTokens > 0 { + rec.ReasoningTokens = core.Float64Ptr(float64(entry.Data.ReasoningOutputTokens)) + } + snap.AppendModelUsage(rec) + } + } + + snap.Raw[prefix+"_usage"] = formatUsageSummary(entries, maxBreakdownRaw) +} + +func emitClientSessionMetrics(clientSessions map[string]int, snap *core.UsageSnapshot) { + type entry struct { + name string + count int + } + var all []entry + for name, count := range clientSessions { + if count > 0 { + all = append(all, entry{name: name, count: count}) + } + } + sort.Slice(all, func(i, j int) bool { + if all[i].count == all[j].count { + return all[i].name < all[j].name + } + return all[i].count > all[j].count + }) + + for i, item := range all { + if i >= maxBreakdownMetrics { + break + } + value := float64(item.count) + snap.Metrics["client_"+sanitizeMetricName(item.name)+"_sessions"] = core.Metric{Used: &value, Unit: "sessions", Window: defaultUsageWindowLabel} + } +} + +func setUsageMetric(snap *core.UsageSnapshot, key string, value float64) { + if value <= 0 { + return + } + snap.Metrics[key] = core.Metric{Used: &value, Unit: "tokens", Window: defaultUsageWindowLabel} +} + +func addUsage(target map[string]tokenUsage, name string, delta tokenUsage) { + current := target[name] + current.InputTokens += delta.InputTokens + current.CachedInputTokens += delta.CachedInputTokens + current.OutputTokens += delta.OutputTokens + current.ReasoningOutputTokens += delta.ReasoningOutputTokens + current.TotalTokens += delta.TotalTokens + target[name] = current +} + +func addDailyUsage(target map[string]map[string]float64, name, day string, value float64) { + if day == "" || value <= 0 { + return + } + if target[name] == nil { + target[name] = make(map[string]float64) + } + target[name][day] += value +} + +func sortUsageEntries(values map[string]tokenUsage) []usageEntry { + out := make([]usageEntry, 0, len(values)) + for name, data := range values { + out = append(out, usageEntry{Name: name, Data: data}) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Data.TotalTokens == out[j].Data.TotalTokens { + return out[i].Name < out[j].Name + } + return out[i].Data.TotalTokens > out[j].Data.TotalTokens + }) + return out +} + +func formatUsageSummary(entries []usageEntry, max int) string { + total := 0 + for _, entry := range entries { + total += entry.Data.TotalTokens + } + if total <= 0 { + return "" + } + + limit := max + if limit > len(entries) { + limit = len(entries) + } + + parts := make([]string, 0, limit+1) + for i := 0; i < limit; i++ { + entry := entries[i] + pct := float64(entry.Data.TotalTokens) / float64(total) * 100 + parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, shared.FormatTokenCount(entry.Data.TotalTokens), pct)) + } + + if len(entries) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) + } + return strings.Join(parts, ", ") +} + +func usageDelta(current, previous tokenUsage) tokenUsage { + return tokenUsage{ + InputTokens: current.InputTokens - previous.InputTokens, + CachedInputTokens: current.CachedInputTokens - previous.CachedInputTokens, + OutputTokens: current.OutputTokens - previous.OutputTokens, + ReasoningOutputTokens: current.ReasoningOutputTokens - previous.ReasoningOutputTokens, + TotalTokens: current.TotalTokens - previous.TotalTokens, + } +} + +func validUsageDelta(delta tokenUsage) bool { + return delta.InputTokens >= 0 && + delta.CachedInputTokens >= 0 && + delta.OutputTokens >= 0 && + delta.ReasoningOutputTokens >= 0 && + delta.TotalTokens >= 0 +} + +func normalizeModelName(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "unknown" + } + return name +} + +func classifyClient(source, originator string) string { + src := strings.ToLower(strings.TrimSpace(source)) + org := strings.ToLower(strings.TrimSpace(originator)) + + switch { + case src == "openusage" || src == "codex": + return "CLI" + case strings.Contains(org, "desktop"): + return "Desktop App" + case strings.Contains(org, "exec") || src == "exec": + return "Exec" + case strings.Contains(org, "cli") || src == "cli": + return "CLI" + case src == "vscode" || src == "ide": + return "IDE" + case src == "": + return "Other" + default: + return strings.ToUpper(src) + } +} + +func normalizeClientName(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "Other" + } + return name +} + +func sanitizeMetricName(name string) string { + name = strings.ToLower(strings.TrimSpace(name)) + if name == "" { + return "unknown" + } + + var b strings.Builder + lastUnderscore := false + for _, r := range name { + switch { + case r >= 'a' && r <= 'z': + b.WriteRune(r) + lastUnderscore = false + case r >= '0' && r <= '9': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore { + b.WriteByte('_') + lastUnderscore = true + } + } + } + + out := strings.Trim(b.String(), "_") + if out == "" { + return "unknown" + } + return out +} + +func dayFromTimestamp(timestamp string) string { + if timestamp == "" { + return "" + } + + for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} { + if parsed, err := time.Parse(layout, timestamp); err == nil { + return parsed.Format("2006-01-02") + } + } + + if len(timestamp) >= 10 { + candidate := timestamp[:10] + if _, err := time.Parse("2006-01-02", candidate); err == nil { + return candidate + } + } + return "" +} + +func dayFromSessionPath(path, sessionsDir string) string { + rel, err := filepath.Rel(sessionsDir, path) + if err != nil { + return "" + } + + parts := strings.Split(filepath.ToSlash(rel), "/") + if len(parts) < 3 { + return "" + } + + candidate := fmt.Sprintf("%s-%s-%s", parts[0], parts[1], parts[2]) + if _, err := time.Parse("2006-01-02", candidate); err != nil { + return "" + } + return candidate +} + +func findLatestSessionFile(sessionsDir string) (string, error) { + var files []string + + err := filepath.Walk(sessionsDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + if !info.IsDir() && strings.HasSuffix(path, ".jsonl") { + files = append(files, path) + } + return nil + }) + if err != nil { + return "", fmt.Errorf("walking sessions dir: %w", err) + } + + if len(files) == 0 { + return "", fmt.Errorf("no session files found in %s", sessionsDir) + } + + sort.Slice(files, func(i, j int) bool { + si, _ := os.Stat(files[i]) + sj, _ := os.Stat(files[j]) + if si == nil || sj == nil { + return false + } + return si.ModTime().After(sj.ModTime()) + }) + + return files[0], nil +} + +func findLastTokenCount(path string) (*eventPayload, error) { + var lastPayload *eventPayload + if err := walkSessionFile(path, func(record sessionLine) error { + if record.EventPayload == nil || record.EventPayload.Type != "token_count" { + return nil + } + payload := *record.EventPayload + lastPayload = &payload + return nil + }); err != nil { + return nil, err + } + return lastPayload, nil +} + +func (p *Provider) readDailySessionCounts(sessionsDir string, snap *core.UsageSnapshot) { + dayCounts := make(map[string]int) + + _ = filepath.Walk(sessionsDir, func(path string, info os.FileInfo, err error) error { + if err != nil || info.IsDir() || !strings.HasSuffix(path, ".jsonl") { + return nil + } + rel, relErr := filepath.Rel(sessionsDir, path) + if relErr != nil { + return nil + } + parts := strings.Split(filepath.ToSlash(rel), "/") + if len(parts) >= 3 { + dateStr := fmt.Sprintf("%s-%s-%s", parts[0], parts[1], parts[2]) + if _, parseErr := time.Parse("2006-01-02", dateStr); parseErr == nil { + dayCounts[dateStr]++ + } + } + return nil + }) + + if len(dayCounts) == 0 { + return + } + + dates := lo.Keys(dayCounts) + sort.Strings(dates) + + for _, d := range dates { + snap.DailySeries["sessions"] = append(snap.DailySeries["sessions"], core.TimePoint{ + Date: d, + Value: float64(dayCounts[d]), + }) + } +} + +func formatWindow(minutes int) string { + if minutes <= 0 { + return "" + } + if minutes < 60 { + return fmt.Sprintf("%dm", minutes) + } + hours := minutes / 60 + remaining := minutes % 60 + if remaining == 0 { + if hours >= 24 { + days := hours / 24 + leftover := hours % 24 + if leftover == 0 { + return fmt.Sprintf("%dd", days) + } + return fmt.Sprintf("%dd%dh", days, leftover) + } + return fmt.Sprintf("%dh", hours) + } + return fmt.Sprintf("%dh%dm", hours, remaining) +} From d866d3639395ed16b099b857f338a92f56417ead Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 17:43:14 +0100 Subject: [PATCH 21/32] refactor: split claude code helpers and settings modal layout --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 8 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 13 +- internal/providers/claude_code/claude_code.go | 1090 ----------------- internal/providers/claude_code/local_files.go | 378 ++++++ .../providers/claude_code/local_helpers.go | 687 +++++++++++ internal/tui/settings_modal.go | 232 ---- internal/tui/settings_modal_layout.go | 239 ++++ 7 files changed, 1318 insertions(+), 1329 deletions(-) create mode 100644 internal/providers/claude_code/local_files.go create mode 100644 internal/providers/claude_code/local_helpers.go create mode 100644 internal/tui/settings_modal_layout.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 25d12ec..a42eaf7 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -60,20 +60,22 @@ This table captures every issue found in this pass. It is broad and high-signal, | R40 | Fixed | Analytics cost fallback extraction | `internal/core/analytics_costs.go`, `internal/tui/analytics_data.go` | Analytics all-time/today/week cost fallback rules now live in shared core logic instead of TUI-owned metric-key decoding. | Continue moving remaining analytics/detail metric decoding into shared extractors. | | R41 | Fixed | Usage-view aggregate fanout split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_aggregate.go` | Query fanout and aggregate assembly now live in a dedicated helper instead of inline in the main usage-view orchestration path. | Continue splitting only if the aggregate helper grows materially. | | R42 | Fixed | Provider display-info split and shared fallback metric helpers | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/core/dashboard_display_metrics.go` | Provider tile display-summary logic moved out of the main TUI model file, and fallback/rate-limit metric selection now lives in shared core helpers instead of ad hoc TUI parsing. | Continue moving the remaining analytics/detail-specific metric decoding into shared extractors. | +| R43 | Fixed | Codex live/session split | `internal/providers/codex/codex.go`, `internal/providers/codex/live_usage.go`, `internal/providers/codex/session_usage.go` | Codex now keeps provider wiring in the main file while live usage fetching and local session projection live in dedicated helpers. | Continue the same concern-based split for the remaining large providers. | +| R44 | Fixed | Claude Code local file/helper split and settings modal layout split | `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/local_files.go`, `internal/providers/claude_code/local_helpers.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go` | Claude Code local readers and generic helper logic are split out of the main provider file, and the settings modal layout/render wrapper no longer lives inline with all modal state/input handling. | Continue with deeper conversation-aggregation extraction in Claude Code and more TUI render-section splits. | ## Action Table | ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | -| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, and more detail logic is isolated, but TUI state-transition and rendering logic are still concentrated in very large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | +| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, and settings modal layout is separated, but TUI state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Composition bars, provider tile fallback/rate-limit selection, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/codex/codex.go` | Cursor and OpenRouter are now materially decomposed, but several other providers still combine transport, parsing, normalization, and projection in single 1900-2600 LOC files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | +| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/claude_code/claude_code.go` | Cursor, OpenRouter, and Codex are now materially decomposed, and Claude Code has local-reader/helper splits, but several providers still combine transport, parsing, normalization, and projection in single very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, telemetry helpers, and Claude Code conversation aggregation. | Smaller diffs, less drift risk, and easier provider-specific testing. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | -| A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/tiles_composition.go` | TUI logic is split across files, but the files are still individually very large and mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | +| A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/tui/tiles_composition.go` | TUI logic is split across more focused files now, but several files are still individually very large and still mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | | A15 | P3 | Performance optimization opportunity in render path | `internal/tui/model.go:441-450`, `internal/tui/tiles_composition.go:302-322`, `internal/tui/detail.go:752-1046`, `internal/tui/analytics.go:663-729` | The UI recomputes display/composition structures from raw metric maps repeatedly during rendering. It is correct, but the work is duplicated across views and frames. | Cache derived display/composition sections per snapshot update instead of rebuilding them in each view path. | Lower render cost and less duplicated parsing logic. | ## Suggested Execution Order diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index 004b09a..39afb07 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -5,7 +5,7 @@ Repository: `/Users/janekbaraniewski/Workspace/priv/openusage` ## Scope -This is a refreshed architecture review after the dashboard race fix, daemon/read-model cleanup, provider parser consolidation, and the recent Cursor/OpenRouter/Ollama/TUI refactors on branch `feat/dashboard-race-parser-cleanups`. +This is a refreshed architecture review after the dashboard race fix, daemon/read-model cleanup, provider parser consolidation, and the recent Cursor/OpenRouter/Ollama/Codex/Claude Code/TUI refactors on branch `feat/dashboard-race-parser-cleanups`. The goal of this report is not to restate already-fixed issues. It documents the meaningful problems still left in the current tree. @@ -19,8 +19,11 @@ These were major concerns in earlier reviews and are now materially addressed: - Telemetry source account binding for unambiguous local collectors and hooks. - Cursor parser/SQLite duplication across dashboard and telemetry paths. - Codex and Claude Code raw parser duplication. +- Codex live/session flow concentrated in one provider file. +- Claude Code local file readers and model-summary helpers concentrated in one provider file. - OpenRouter provider-resolution, analytics, generation, projection, and account-path monolith sprawl. - TUI side-effect leakage into config persistence / integration install / provider validation. +- Settings modal layout/render wrapper living inline with settings state/input handling. - Ollama hot-path `time.Now()` usage in behavioral window/reset logic. - Shared hook ingest parsing/local fallback drift between daemon and CLI. - Usage-view temp-table materialization and aggregate query fanout living inline in the main orchestration path. @@ -29,7 +32,7 @@ These were major concerns in earlier reviews and are now materially addressed: ### 1. [P2] TUI rendering and state handling are still concentrated in a few very large files -The TUI is much better than before, and provider tile display-summary logic no longer lives inline in `model.go`, but [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go), and [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) are still large enough that unrelated concerns move together. +The TUI is much better than before, and provider tile display-summary logic no longer lives inline in `model.go`, while the settings modal layout wrapper now lives in its own file. But [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go), and the remaining settings modal render sections are still large enough that unrelated concerns move together. Refs: - [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go) @@ -37,6 +40,7 @@ Refs: - [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go) - [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go) - [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) +- [settings_modal_layout.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal_layout.go) What to address: - Continue section-level file extraction from `detail.go`. @@ -74,7 +78,7 @@ What to address: ### 4. [P2] Several providers are still large mixed-responsibility units -Cursor and OpenRouter are now in much better shape, but several other providers remain monoliths that mix transport, parsing, normalization, and projection in one place. +Cursor, OpenRouter, and Codex are now in much better shape, and Claude Code has started the same split, but several providers still remain monoliths that mix transport, parsing, normalization, and projection in one place. Refs: - [ollama.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/ollama.go) @@ -82,7 +86,8 @@ Refs: - [gemini_cli.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/gemini_cli.go) - [copilot.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/copilot.go) - [claude_code.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/claude_code.go) -- [codex.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/codex/codex.go) +- [local_files.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_files.go) +- [local_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_helpers.go) What to address: - Split by concern, not by arbitrary line count: diff --git a/internal/providers/claude_code/claude_code.go b/internal/providers/claude_code/claude_code.go index 2fa21e7..fe3359b 100644 --- a/internal/providers/claude_code/claude_code.go +++ b/internal/providers/claude_code/claude_code.go @@ -1,22 +1,18 @@ package claude_code import ( - "bufio" "context" - "encoding/json" "fmt" "math" "os" "path/filepath" "sort" - "strconv" "strings" "sync" "time" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" - "github.com/janekbaraniewski/openusage/internal/providers/shared" "github.com/samber/lo" ) @@ -266,36 +262,6 @@ const ( maxModelUsageSummaryItems = 6 ) -func floorToHour(t time.Time) time.Time { - return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location()) -} - -func buildStatsCandidates(explicitPath, claudeDir, home string) []string { - if explicitPath != "" { - return []string{explicitPath} - } - - candidates := []string{ - filepath.Join(claudeDir, "stats-cache.json"), - filepath.Join(claudeDir, ".claude-backup", "stats-cache.json"), - filepath.Join(home, ".claude-backup", "stats-cache.json"), - } - - seen := make(map[string]struct{}, len(candidates)) - out := make([]string, 0, len(candidates)) - for _, candidate := range candidates { - if candidate == "" { - continue - } - if _, ok := seen[candidate]; ok { - continue - } - seen[candidate] = struct{}{} - out = append(out, candidate) - } - return out -} - func (p *Provider) DetailWidget() core.DetailWidget { return core.CodingToolDetailWidget(true) } @@ -429,388 +395,6 @@ func (p *Provider) setCachedUsage(u *usageResponse) { p.usageAPICache = u } -func applyUsageResponse(usage *usageResponse, snap *core.UsageSnapshot, now time.Time) { - applyUsageBucket := func(metricKey, window, resetKey string, bucket *usageBucket) { - if bucket == nil { - return - } - - util := bucket.Utilization - limit := float64(100) - if t, ok := parseReset(bucket.ResetsAt); ok { - // Prevent stale "100%" (or other pre-reset values) from persisting - // after reset boundary has already passed. - if !t.After(now) { - util = 0 - } - if resetKey != "" { - snap.Resets[resetKey] = t - } - } - - snap.Metrics[metricKey] = core.Metric{ - Used: &util, - Limit: &limit, - Unit: "%", - Window: window, - } - } - - applyUsageBucket("usage_five_hour", "5h", "usage_five_hour", usage.FiveHour) - applyUsageBucket("usage_seven_day", "7d", "usage_seven_day", usage.SevenDay) - applyUsageBucket("usage_seven_day_sonnet", "7d-sonnet", "", usage.SevenDaySonnet) - applyUsageBucket("usage_seven_day_opus", "7d-opus", "", usage.SevenDayOpus) - applyUsageBucket("usage_seven_day_cowork", "7d-cowork", "", usage.SevenDayCowork) -} - -func parseReset(raw string) (time.Time, bool) { - if raw == "" { - return time.Time{}, false - } - t, err := time.Parse(time.RFC3339, raw) - if err != nil { - return time.Time{}, false - } - return t, true -} - -func (p *Provider) readStats(path string, snap *core.UsageSnapshot) error { - data, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("reading stats cache: %w", err) - } - - var stats statsCache - if err := json.Unmarshal(data, &stats); err != nil { - return fmt.Errorf("parsing stats cache: %w", err) - } - - if stats.TotalMessages > 0 { - total := float64(stats.TotalMessages) - snap.Metrics["total_messages"] = core.Metric{ - Used: &total, - Unit: "messages", - Window: "all-time", - } - } - - if stats.TotalSessions > 0 { - total := float64(stats.TotalSessions) - snap.Metrics["total_sessions"] = core.Metric{ - Used: &total, - Unit: "sessions", - Window: "all-time", - } - } - - if stats.TotalSpeculationTimeSavedMs > 0 { - hoursSaved := float64(stats.TotalSpeculationTimeSavedMs) / float64(time.Hour/time.Millisecond) - snap.Metrics["speculation_time_saved_hours"] = core.Metric{ - Used: &hoursSaved, - Unit: "hours", - Window: "all-time", - } - } - - now := time.Now() - today := now.Format("2006-01-02") - weekStart := now.Add(-7 * 24 * time.Hour) - var weeklyMessages int - var weeklyToolCalls int - var weeklySessions int - for _, da := range stats.DailyActivity { - snap.DailySeries["messages"] = append(snap.DailySeries["messages"], core.TimePoint{ - Date: da.Date, Value: float64(da.MessageCount), - }) - snap.DailySeries["sessions"] = append(snap.DailySeries["sessions"], core.TimePoint{ - Date: da.Date, Value: float64(da.SessionCount), - }) - snap.DailySeries["tool_calls"] = append(snap.DailySeries["tool_calls"], core.TimePoint{ - Date: da.Date, Value: float64(da.ToolCallCount), - }) - - if da.Date == today { - msgs := float64(da.MessageCount) - snap.Metrics["messages_today"] = core.Metric{ - Used: &msgs, - Unit: "messages", - Window: "1d", - } - tools := float64(da.ToolCallCount) - snap.Metrics["tool_calls_today"] = core.Metric{ - Used: &tools, - Unit: "calls", - Window: "1d", - } - sessions := float64(da.SessionCount) - snap.Metrics["sessions_today"] = core.Metric{ - Used: &sessions, - Unit: "sessions", - Window: "1d", - } - } - - if day, err := time.Parse("2006-01-02", da.Date); err == nil && (day.After(weekStart) || day.Equal(weekStart)) { - weeklyMessages += da.MessageCount - weeklyToolCalls += da.ToolCallCount - weeklySessions += da.SessionCount - } - } - - if weeklyMessages > 0 { - wm := float64(weeklyMessages) - snap.Metrics["7d_messages"] = core.Metric{ - Used: &wm, - Unit: "messages", - Window: "rolling 7 days", - } - } - if weeklyToolCalls > 0 { - wt := float64(weeklyToolCalls) - snap.Metrics["7d_tool_calls"] = core.Metric{ - Used: &wt, - Unit: "calls", - Window: "rolling 7 days", - } - } - if weeklySessions > 0 { - ws := float64(weeklySessions) - snap.Metrics["7d_sessions"] = core.Metric{ - Used: &ws, - Unit: "sessions", - Window: "rolling 7 days", - } - } - - for _, dt := range stats.DailyModelTokens { - totalDayTokens := float64(0) - for model, tokens := range dt.TokensByModel { - name := sanitizeModelName(model) - key := fmt.Sprintf("tokens_%s", name) - snap.DailySeries[key] = append(snap.DailySeries[key], core.TimePoint{ - Date: dt.Date, Value: float64(tokens), - }) - totalDayTokens += float64(tokens) - } - snap.DailySeries["tokens_total"] = append(snap.DailySeries["tokens_total"], core.TimePoint{ - Date: dt.Date, Value: totalDayTokens, - }) - - if dt.Date == today { - for model, tokens := range dt.TokensByModel { - t := float64(tokens) - key := fmt.Sprintf("tokens_today_%s", sanitizeModelName(model)) - snap.Metrics[key] = core.Metric{ - Used: &t, - Unit: "tokens", - Window: "1d", - } - } - } - } - - var totalCostUSD float64 - for model, usage := range stats.ModelUsage { - outTokens := float64(usage.OutputTokens) - inTokens := float64(usage.InputTokens) - name := sanitizeModelName(model) - modelPrefix := "model_" + name - - setMetricMax(snap, modelPrefix+"_input_tokens", inTokens, "tokens", "all-time") - setMetricMax(snap, modelPrefix+"_output_tokens", outTokens, "tokens", "all-time") - setMetricMax(snap, modelPrefix+"_cached_tokens", float64(usage.CacheReadInputTokens), "tokens", "all-time") - setMetricMax(snap, modelPrefix+"_cache_creation_tokens", float64(usage.CacheCreationInputTokens), "tokens", "all-time") - setMetricMax(snap, modelPrefix+"_web_search_requests", float64(usage.WebSearchRequests), "requests", "all-time") - setMetricMax(snap, modelPrefix+"_context_window_tokens", float64(usage.ContextWindow), "tokens", "all-time") - setMetricMax(snap, modelPrefix+"_max_output_tokens", float64(usage.MaxOutputTokens), "tokens", "all-time") - - snap.Raw[fmt.Sprintf("model_%s_cache_read", name)] = fmt.Sprintf("%d tokens", usage.CacheReadInputTokens) - snap.Raw[fmt.Sprintf("model_%s_cache_create", name)] = fmt.Sprintf("%d tokens", usage.CacheCreationInputTokens) - if usage.WebSearchRequests > 0 { - snap.Raw[fmt.Sprintf("model_%s_web_search_requests", name)] = fmt.Sprintf("%d", usage.WebSearchRequests) - } - if usage.ContextWindow > 0 { - snap.Raw[fmt.Sprintf("model_%s_context_window", name)] = fmt.Sprintf("%d", usage.ContextWindow) - } - if usage.MaxOutputTokens > 0 { - snap.Raw[fmt.Sprintf("model_%s_max_output_tokens", name)] = fmt.Sprintf("%d", usage.MaxOutputTokens) - } - - if usage.CostUSD > 0 { - totalCostUSD += usage.CostUSD - setMetricMax(snap, modelPrefix+"_cost_usd", usage.CostUSD, "USD", "all-time") - } - - rec := core.ModelUsageRecord{ - RawModelID: model, - RawSource: "stats_cache", - Window: "all-time", - InputTokens: core.Float64Ptr(inTokens), - OutputTokens: core.Float64Ptr(outTokens), - TotalTokens: core.Float64Ptr(inTokens + outTokens), - } - if usage.CacheReadInputTokens > 0 || usage.CacheCreationInputTokens > 0 { - rec.CachedTokens = core.Float64Ptr(float64(usage.CacheReadInputTokens + usage.CacheCreationInputTokens)) - } - if usage.CostUSD > 0 { - rec.CostUSD = core.Float64Ptr(usage.CostUSD) - } - snap.AppendModelUsage(rec) - } - - if totalCostUSD > 0 { - cost := totalCostUSD - snap.Metrics["total_cost_usd"] = core.Metric{ - Used: &cost, - Unit: "USD", - Window: "all-time", - } - } - - snap.Raw["stats_last_computed"] = stats.LastComputedDate - if stats.FirstSessionDate != "" { - snap.Raw["first_session"] = stats.FirstSessionDate - } - if stats.LongestSession != nil { - if stats.LongestSession.Duration > 0 { - minutes := float64(stats.LongestSession.Duration) / float64(time.Minute/time.Millisecond) - snap.Metrics["longest_session_minutes"] = core.Metric{ - Used: &minutes, - Unit: "minutes", - Window: "all-time", - } - } - if stats.LongestSession.MessageCount > 0 { - msgs := float64(stats.LongestSession.MessageCount) - snap.Metrics["longest_session_messages"] = core.Metric{ - Used: &msgs, - Unit: "messages", - Window: "all-time", - } - } - if stats.LongestSession.SessionID != "" { - snap.Raw["longest_session_id"] = stats.LongestSession.SessionID - } - if stats.LongestSession.Timestamp != "" { - snap.Raw["longest_session_timestamp"] = stats.LongestSession.Timestamp - } - } - if len(stats.HourCounts) > 0 { - peakHour := "" - peakCount := 0 - for h, c := range stats.HourCounts { - if c > peakCount { - peakHour = h - peakCount = c - } - } - if peakHour != "" { - snap.Raw["peak_hour"] = peakHour - snap.Raw["peak_hour_messages"] = fmt.Sprintf("%d", peakCount) - } - } - - return nil -} - -func (p *Provider) readAccount(path string, snap *core.UsageSnapshot) error { - data, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("reading account config: %w", err) - } - - var acct accountConfig - if err := json.Unmarshal(data, &acct); err != nil { - return fmt.Errorf("parsing account config: %w", err) - } - - if acct.OAuthAccount != nil { - if acct.OAuthAccount.EmailAddress != "" { - snap.Raw["account_email"] = acct.OAuthAccount.EmailAddress - } - if acct.OAuthAccount.DisplayName != "" { - snap.Raw["account_name"] = acct.OAuthAccount.DisplayName - } - if acct.OAuthAccount.BillingType != "" { - snap.Raw["billing_type"] = acct.OAuthAccount.BillingType - } - if acct.OAuthAccount.HasExtraUsageEnabled { - snap.Raw["extra_usage_enabled"] = "true" - } - if acct.OAuthAccount.AccountCreatedAt != "" { - snap.Raw["account_created_at"] = acct.OAuthAccount.AccountCreatedAt - } - if acct.OAuthAccount.SubscriptionCreatedAt != "" { - snap.Raw["subscription_created_at"] = acct.OAuthAccount.SubscriptionCreatedAt - } - if acct.OAuthAccount.OrganizationUUID != "" { - snap.Raw["organization_uuid"] = acct.OAuthAccount.OrganizationUUID - } - } - - if acct.HasAvailableSubscription { - snap.Raw["subscription"] = "active" - } else { - snap.Raw["subscription"] = "none" - } - - if acct.ClaudeCodeFirstTokenDate != "" { - snap.Raw["claude_code_first_token_date"] = acct.ClaudeCodeFirstTokenDate - } - - if acct.PenguinModeOrgEnabled { - snap.Raw["penguin_mode_enabled"] = "true" - } - - for orgID, access := range acct.S1MAccessCache { - if access.HasAccess { - shortID := orgID - if len(shortID) > 8 { - shortID = shortID[:8] - } - snap.Raw[fmt.Sprintf("s1m_access_%s", shortID)] = "true" - } - } - - snap.Raw["num_startups"] = fmt.Sprintf("%d", acct.NumStartups) - if acct.InstallMethod != "" { - snap.Raw["install_method"] = acct.InstallMethod - } - if acct.ClientDataCache != nil && acct.ClientDataCache.Timestamp > 0 { - snap.Raw["client_data_cache_ts"] = strconv.FormatInt(acct.ClientDataCache.Timestamp, 10) - } - if len(acct.SkillUsage) > 0 { - counts := make(map[string]int, len(acct.SkillUsage)) - for skill, usage := range acct.SkillUsage { - counts[sanitizeModelName(skill)] = usage.UsageCount - } - snap.Raw["skill_usage"] = summarizeCountMap(counts, 6) - } - - return nil -} - -func (p *Provider) readSettings(path string, snap *core.UsageSnapshot) error { - data, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("reading settings: %w", err) - } - - var settings settingsConfig - if err := json.Unmarshal(data, &settings); err != nil { - return fmt.Errorf("parsing settings: %w", err) - } - - if settings.Model != "" { - snap.Raw["active_model"] = settings.Model - } - if settings.AlwaysThinkingEnabled { - snap.Raw["always_thinking"] = "true" - } - - return nil -} - func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, snap *core.UsageSnapshot) error { jsonlFiles := collectJSONLFiles(projectsDir) if altProjectsDir != "" { @@ -1626,677 +1210,3 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna return nil } - -func parseJSONLTimestamp(raw string) (time.Time, bool) { - t, err := shared.ParseTimestampString(raw) - if err != nil { - return time.Time{}, false - } - return t, true -} - -func isMutatingTool(name string) bool { - n := strings.ToLower(strings.TrimSpace(name)) - if n == "" { - return false - } - return strings.Contains(n, "edit") || - strings.Contains(n, "write") || - strings.Contains(n, "create") || - strings.Contains(n, "delete") || - strings.Contains(n, "rename") || - strings.Contains(n, "move") -} - -func extractToolCommand(input any) string { - var command string - var walk func(value any) - walk = func(value any) { - if command != "" || value == nil { - return - } - switch v := value.(type) { - case map[string]any: - for key, child := range v { - k := strings.ToLower(strings.TrimSpace(key)) - if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { - if s, ok := child.(string); ok { - command = strings.TrimSpace(s) - return - } - } - } - for _, child := range v { - walk(child) - if command != "" { - return - } - } - case []any: - for _, child := range v { - walk(child) - if command != "" { - return - } - } - } - } - walk(input) - return command -} - -func estimateToolLineDelta(toolName string, input any) (added int, removed int) { - lineCount := func(text string) int { - text = strings.TrimSpace(text) - if text == "" { - return 0 - } - return strings.Count(text, "\n") + 1 - } - lowerTool := strings.ToLower(strings.TrimSpace(toolName)) - var walk func(value any) - walk = func(value any) { - switch v := value.(type) { - case map[string]any: - oldKeys := []string{"old_string", "old_text", "from", "replace"} - newKeys := []string{"new_string", "new_text", "to", "with"} - var oldText string - var newText string - for _, key := range oldKeys { - if raw, ok := v[key]; ok { - if s, ok := raw.(string); ok { - oldText = s - break - } - } - } - for _, key := range newKeys { - if raw, ok := v[key]; ok { - if s, ok := raw.(string); ok { - newText = s - break - } - } - } - if oldText != "" || newText != "" { - removed += lineCount(oldText) - added += lineCount(newText) - } - if strings.Contains(lowerTool, "write") || strings.Contains(lowerTool, "create") { - if raw, ok := v["content"]; ok { - if s, ok := raw.(string); ok { - added += lineCount(s) - } - } - } - for _, child := range v { - walk(child) - } - case []any: - for _, child := range v { - walk(child) - } - } - } - walk(input) - return added, removed -} - -func extractToolPathCandidates(input any) []string { - pathKeyHints := map[string]bool{ - "path": true, "paths": true, "file": true, "files": true, "filepath": true, "file_path": true, - "cwd": true, "directory": true, "dir": true, "glob": true, "pattern": true, "target": true, - "from": true, "to": true, "include": true, "exclude": true, - } - - candidates := make(map[string]bool) - var walk func(value any, hinted bool) - walk = func(value any, hinted bool) { - switch v := value.(type) { - case map[string]any: - for key, child := range v { - k := strings.ToLower(strings.TrimSpace(key)) - childHinted := hinted || pathKeyHints[k] || strings.Contains(k, "path") || strings.Contains(k, "file") - walk(child, childHinted) - } - case []any: - for _, child := range v { - walk(child, hinted) - } - case string: - if !hinted { - return - } - for _, token := range extractPathTokens(v) { - candidates[token] = true - } - } - } - walk(input, false) - - out := make([]string, 0, len(candidates)) - for candidate := range candidates { - out = append(out, candidate) - } - sort.Strings(out) - return out -} - -func extractPathTokens(raw string) []string { - raw = strings.TrimSpace(raw) - if raw == "" { - return nil - } - fields := strings.Fields(raw) - if len(fields) == 0 { - fields = []string{raw} - } - var out []string - for _, field := range fields { - token := strings.Trim(field, "\"'`()[]{}<>,:;") - if token == "" { - continue - } - lower := strings.ToLower(token) - if strings.HasPrefix(lower, "http://") || strings.HasPrefix(lower, "https://") || strings.HasPrefix(lower, "file://") { - continue - } - if strings.HasPrefix(token, "-") { - continue - } - if !strings.Contains(token, "/") && !strings.Contains(token, "\\") && !strings.Contains(token, ".") { - continue - } - token = strings.TrimPrefix(token, "./") - token = strings.TrimSpace(token) - if token == "" { - continue - } - out = append(out, token) - } - return lo.Uniq(out) -} - -func inferLanguageFromPath(path string) string { - p := strings.ToLower(strings.TrimSpace(path)) - if p == "" { - return "" - } - base := strings.ToLower(filepath.Base(p)) - switch base { - case "dockerfile": - return "docker" - case "makefile": - return "make" - } - ext := strings.ToLower(filepath.Ext(p)) - switch ext { - case ".go": - return "go" - case ".py": - return "python" - case ".ts", ".tsx": - return "typescript" - case ".js", ".jsx": - return "javascript" - case ".tf", ".tfvars", ".hcl": - return "terraform" - case ".sh", ".bash", ".zsh", ".fish": - return "shell" - case ".md", ".mdx": - return "markdown" - case ".json": - return "json" - case ".yml", ".yaml": - return "yaml" - case ".sql": - return "sql" - case ".rs": - return "rust" - case ".java": - return "java" - case ".c", ".h": - return "c" - case ".cc", ".cpp", ".cxx", ".hpp": - return "cpp" - case ".rb": - return "ruby" - case ".php": - return "php" - case ".swift": - return "swift" - case ".vue": - return "vue" - case ".svelte": - return "svelte" - case ".toml": - return "toml" - case ".xml": - return "xml" - } - return "" -} - -func summarizeCountMap(values map[string]int, limit int) string { - type entry struct { - name string - value int - } - entries := make([]entry, 0, len(values)) - for name, value := range values { - if value <= 0 { - continue - } - entries = append(entries, entry{name: name, value: value}) - } - if len(entries) == 0 { - return "" - } - sort.Slice(entries, func(i, j int) bool { - if entries[i].value == entries[j].value { - return entries[i].name < entries[j].name - } - return entries[i].value > entries[j].value - }) - if limit <= 0 || limit > len(entries) { - limit = len(entries) - } - parts := make([]string, 0, limit+1) - for i := 0; i < limit; i++ { - name := strings.ReplaceAll(entries[i].name, "_", "-") - parts = append(parts, fmt.Sprintf("%s %d", name, entries[i].value)) - } - if len(entries) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) - } - return strings.Join(parts, ", ") -} - -func summarizeFloatMap(values map[string]float64, unit string, limit int) string { - type entry struct { - name string - value float64 - } - entries := make([]entry, 0, len(values)) - for name, value := range values { - if value <= 0 { - continue - } - entries = append(entries, entry{name: name, value: value}) - } - if len(entries) == 0 { - return "" - } - sort.Slice(entries, func(i, j int) bool { - if entries[i].value == entries[j].value { - return entries[i].name < entries[j].name - } - return entries[i].value > entries[j].value - }) - if limit <= 0 || limit > len(entries) { - limit = len(entries) - } - parts := make([]string, 0, limit+1) - for i := 0; i < limit; i++ { - name := strings.ReplaceAll(entries[i].name, "_", "-") - value := shortTokenCount(entries[i].value) - if unit != "" { - value += " " + unit - } - parts = append(parts, fmt.Sprintf("%s %s", name, value)) - } - if len(entries) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) - } - return strings.Join(parts, ", ") -} - -func summarizeTotalsMap(values map[string]*modelUsageTotals, preferCost bool, limit int) string { - type entry struct { - name string - tokens float64 - cost float64 - } - entries := make([]entry, 0, len(values)) - totalCost := 0.0 - for name, totals := range values { - if totals == nil { - continue - } - tokens := totals.input + totals.output + totals.cached + totals.cacheCreate + totals.reasoning - cost := totals.cost - if tokens <= 0 && cost <= 0 { - continue - } - totalCost += cost - entries = append(entries, entry{name: name, tokens: tokens, cost: cost}) - } - if len(entries) == 0 { - return "" - } - useCost := preferCost && totalCost > 0 - sort.Slice(entries, func(i, j int) bool { - left := entries[i].tokens - right := entries[j].tokens - if useCost { - left = entries[i].cost - right = entries[j].cost - } - if left == right { - return entries[i].name < entries[j].name - } - return left > right - }) - if limit <= 0 || limit > len(entries) { - limit = len(entries) - } - parts := make([]string, 0, limit+1) - for i := 0; i < limit; i++ { - name := strings.ReplaceAll(entries[i].name, "_", "-") - if useCost { - parts = append(parts, fmt.Sprintf("%s %s %s tok", name, formatUSDSummary(entries[i].cost), shortTokenCount(entries[i].tokens))) - } else { - parts = append(parts, fmt.Sprintf("%s %s tok", name, shortTokenCount(entries[i].tokens))) - } - } - if len(entries) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) - } - return strings.Join(parts, ", ") -} - -func collectJSONLFiles(dir string) []string { - var files []string - if _, err := os.Stat(dir); os.IsNotExist(err) { - return files - } - - _ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return nil // skip errors - } - if !info.IsDir() && strings.HasSuffix(path, ".jsonl") { - files = append(files, path) - } - return nil - }) - - return files -} - -func parseJSONLFile(path string) []jsonlEntry { - f, err := os.Open(path) - if err != nil { - return nil - } - defer f.Close() - - var entries []jsonlEntry - scanner := bufio.NewScanner(f) - buf := make([]byte, 0, 256*1024) - scanner.Buffer(buf, 10*1024*1024) // 10MB max line size - - for scanner.Scan() { - line := scanner.Bytes() - if len(line) == 0 { - continue - } - var entry jsonlEntry - if err := json.Unmarshal(line, &entry); err != nil { - continue // skip malformed lines - } - entries = append(entries, entry) - } - - return entries -} - -func sanitizeModelName(model string) string { - model = strings.ToLower(strings.TrimSpace(model)) - if model == "" { - return "unknown" - } - - result := make([]byte, 0, len(model)) - for _, c := range model { - if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') { - result = append(result, byte(c)) - } else { - result = append(result, '_') - } - } - - out := strings.Trim(string(result), "_") - if out == "" { - return "unknown" - } - return out -} - -func setMetricMax(snap *core.UsageSnapshot, key string, value float64, unit, window string) { - if value <= 0 { - return - } - if existing, ok := snap.Metrics[key]; ok && existing.Used != nil && *existing.Used >= value { - return - } - v := value - snap.Metrics[key] = core.Metric{ - Used: &v, - Unit: unit, - Window: window, - } -} - -func normalizeModelUsage(snap *core.UsageSnapshot) { - modelTotals := make(map[string]*modelUsageTotals) - legacyMetricKeys := make([]string, 0, 16) - - ensureModel := func(name string) *modelUsageTotals { - if _, ok := modelTotals[name]; !ok { - modelTotals[name] = &modelUsageTotals{} - } - return modelTotals[name] - } - - for key, metric := range snap.Metrics { - if metric.Used == nil { - continue - } - - switch { - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_input_tokens"): - model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_input_tokens") - ensureModel(model).input += *metric.Used - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_output_tokens"): - model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_output_tokens") - ensureModel(model).output += *metric.Used - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cost_usd"): - model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cost_usd") - ensureModel(model).cost += *metric.Used - case strings.HasPrefix(key, "input_tokens_"): - model := sanitizeModelName(strings.TrimPrefix(key, "input_tokens_")) - ensureModel(model).input += *metric.Used - legacyMetricKeys = append(legacyMetricKeys, key) - case strings.HasPrefix(key, "output_tokens_"): - model := sanitizeModelName(strings.TrimPrefix(key, "output_tokens_")) - ensureModel(model).output += *metric.Used - legacyMetricKeys = append(legacyMetricKeys, key) - } - } - - for key, value := range snap.Raw { - switch { - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cache_read"): - model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cache_read") - if parsed, ok := parseMetricNumber(value); ok { - setMetricMax(snap, "model_"+model+"_cached_tokens", parsed, "tokens", "all-time") - } - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cache_create"): - model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cache_create") - if parsed, ok := parseMetricNumber(value); ok { - setMetricMax(snap, "model_"+model+"_cache_creation_tokens", parsed, "tokens", "all-time") - } - } - } - - for _, key := range legacyMetricKeys { - delete(snap.Metrics, key) - } - - for model, totals := range modelTotals { - modelPrefix := "model_" + sanitizeModelName(model) - setMetricMax(snap, modelPrefix+"_input_tokens", totals.input, "tokens", "all-time") - setMetricMax(snap, modelPrefix+"_output_tokens", totals.output, "tokens", "all-time") - setMetricMax(snap, modelPrefix+"_cost_usd", totals.cost, "USD", "all-time") - } - - buildModelUsageSummaryRaw(snap) -} - -func parseMetricNumber(raw string) (float64, bool) { - clean := strings.TrimSpace(strings.ReplaceAll(raw, ",", "")) - if clean == "" { - return 0, false - } - fields := strings.Fields(clean) - if len(fields) == 0 { - return 0, false - } - v, err := strconv.ParseFloat(fields[0], 64) - if err != nil { - return 0, false - } - return v, true -} - -func buildModelUsageSummaryRaw(snap *core.UsageSnapshot) { - type entry struct { - name string - input float64 - output float64 - cost float64 - } - - byModel := make(map[string]*entry) - for key, metric := range snap.Metrics { - if metric.Used == nil || !strings.HasPrefix(key, "model_") { - continue - } - - switch { - case strings.HasSuffix(key, "_input_tokens"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_input_tokens") - if _, ok := byModel[name]; !ok { - byModel[name] = &entry{name: name} - } - byModel[name].input += *metric.Used - case strings.HasSuffix(key, "_output_tokens"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_output_tokens") - if _, ok := byModel[name]; !ok { - byModel[name] = &entry{name: name} - } - byModel[name].output += *metric.Used - case strings.HasSuffix(key, "_cost_usd"): - name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cost_usd") - if _, ok := byModel[name]; !ok { - byModel[name] = &entry{name: name} - } - byModel[name].cost += *metric.Used - } - } - - entries := make([]entry, 0, len(byModel)) - totalTokens := float64(0) - totalCost := float64(0) - for _, model := range byModel { - if model.input <= 0 && model.output <= 0 && model.cost <= 0 { - continue - } - entries = append(entries, *model) - totalTokens += model.input + model.output - totalCost += model.cost - } - if len(entries) == 0 { - delete(snap.Raw, "model_usage") - delete(snap.Raw, "model_usage_window") - delete(snap.Raw, "model_count") - return - } - - useCost := totalCost > 0 - total := totalTokens - if useCost { - total = totalCost - } - if total <= 0 { - delete(snap.Raw, "model_usage") - delete(snap.Raw, "model_usage_window") - delete(snap.Raw, "model_count") - return - } - - sort.Slice(entries, func(i, j int) bool { - left := entries[i].input + entries[i].output - right := entries[j].input + entries[j].output - if useCost { - left = entries[i].cost - right = entries[j].cost - } - if left == right { - return entries[i].name < entries[j].name - } - return left > right - }) - - limit := maxModelUsageSummaryItems - if limit > len(entries) { - limit = len(entries) - } - parts := make([]string, 0, limit+1) - for i := 0; i < limit; i++ { - value := entries[i].input + entries[i].output - if useCost { - value = entries[i].cost - } - if value <= 0 { - continue - } - pct := value / total * 100 - tokens := entries[i].input + entries[i].output - modelName := strings.ReplaceAll(entries[i].name, "_", "-") - - if useCost { - parts = append(parts, fmt.Sprintf("%s %s %s tok (%.0f%%)", modelName, formatUSDSummary(entries[i].cost), shortTokenCount(tokens), pct)) - } else { - parts = append(parts, fmt.Sprintf("%s %s tok (%.0f%%)", modelName, shortTokenCount(tokens), pct)) - } - } - if len(entries) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) - } - - snap.Raw["model_usage"] = strings.Join(parts, ", ") - snap.Raw["model_usage_window"] = "all-time" - snap.Raw["model_count"] = fmt.Sprintf("%d", len(entries)) -} - -func shortTokenCount(v float64) string { - switch { - case v >= 1_000_000_000: - return fmt.Sprintf("%.1fB", v/1_000_000_000) - case v >= 1_000_000: - return fmt.Sprintf("%.1fM", v/1_000_000) - case v >= 1_000: - return fmt.Sprintf("%.1fK", v/1_000) - default: - return fmt.Sprintf("%.0f", v) - } -} - -func formatUSDSummary(v float64) string { - if v >= 1000 { - return fmt.Sprintf("$%.0f", v) - } - return fmt.Sprintf("$%.2f", v) -} diff --git a/internal/providers/claude_code/local_files.go b/internal/providers/claude_code/local_files.go new file mode 100644 index 0000000..8ce04d5 --- /dev/null +++ b/internal/providers/claude_code/local_files.go @@ -0,0 +1,378 @@ +package claude_code + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func floorToHour(t time.Time) time.Time { + return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location()) +} + +func buildStatsCandidates(explicitPath, claudeDir, home string) []string { + if explicitPath != "" { + return []string{explicitPath} + } + + candidates := []string{ + filepath.Join(claudeDir, "stats-cache.json"), + filepath.Join(claudeDir, ".claude-backup", "stats-cache.json"), + filepath.Join(home, ".claude-backup", "stats-cache.json"), + } + + seen := make(map[string]struct{}, len(candidates)) + out := make([]string, 0, len(candidates)) + for _, candidate := range candidates { + if candidate == "" { + continue + } + if _, ok := seen[candidate]; ok { + continue + } + seen[candidate] = struct{}{} + out = append(out, candidate) + } + return out +} + +func applyUsageResponse(usage *usageResponse, snap *core.UsageSnapshot, now time.Time) { + applyUsageBucket := func(metricKey, window, resetKey string, bucket *usageBucket) { + if bucket == nil { + return + } + + util := bucket.Utilization + limit := float64(100) + if t, ok := parseReset(bucket.ResetsAt); ok { + if !t.After(now) { + util = 0 + } + if resetKey != "" { + snap.Resets[resetKey] = t + } + } + + snap.Metrics[metricKey] = core.Metric{ + Used: &util, + Limit: &limit, + Unit: "%", + Window: window, + } + } + + applyUsageBucket("usage_five_hour", "5h", "usage_five_hour", usage.FiveHour) + applyUsageBucket("usage_seven_day", "7d", "usage_seven_day", usage.SevenDay) + applyUsageBucket("usage_seven_day_sonnet", "7d-sonnet", "", usage.SevenDaySonnet) + applyUsageBucket("usage_seven_day_opus", "7d-opus", "", usage.SevenDayOpus) + applyUsageBucket("usage_seven_day_cowork", "7d-cowork", "", usage.SevenDayCowork) +} + +func parseReset(raw string) (time.Time, bool) { + if raw == "" { + return time.Time{}, false + } + t, err := time.Parse(time.RFC3339, raw) + if err != nil { + return time.Time{}, false + } + return t, true +} + +func (p *Provider) readStats(path string, snap *core.UsageSnapshot) error { + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("reading stats cache: %w", err) + } + + var stats statsCache + if err := json.Unmarshal(data, &stats); err != nil { + return fmt.Errorf("parsing stats cache: %w", err) + } + + if stats.TotalMessages > 0 { + total := float64(stats.TotalMessages) + snap.Metrics["total_messages"] = core.Metric{ + Used: &total, + Unit: "messages", + Window: "all-time", + } + } + + if stats.TotalSessions > 0 { + total := float64(stats.TotalSessions) + snap.Metrics["total_sessions"] = core.Metric{ + Used: &total, + Unit: "sessions", + Window: "all-time", + } + } + + if stats.TotalSpeculationTimeSavedMs > 0 { + hoursSaved := float64(stats.TotalSpeculationTimeSavedMs) / float64(time.Hour/time.Millisecond) + snap.Metrics["speculation_time_saved_hours"] = core.Metric{ + Used: &hoursSaved, + Unit: "hours", + Window: "all-time", + } + } + + now := time.Now() + today := now.Format("2006-01-02") + weekStart := now.Add(-7 * 24 * time.Hour) + var weeklyMessages int + var weeklyToolCalls int + var weeklySessions int + for _, da := range stats.DailyActivity { + snap.DailySeries["messages"] = append(snap.DailySeries["messages"], core.TimePoint{ + Date: da.Date, Value: float64(da.MessageCount), + }) + snap.DailySeries["sessions"] = append(snap.DailySeries["sessions"], core.TimePoint{ + Date: da.Date, Value: float64(da.SessionCount), + }) + snap.DailySeries["tool_calls"] = append(snap.DailySeries["tool_calls"], core.TimePoint{ + Date: da.Date, Value: float64(da.ToolCallCount), + }) + + if da.Date == today { + msgs := float64(da.MessageCount) + snap.Metrics["messages_today"] = core.Metric{Used: &msgs, Unit: "messages", Window: "1d"} + tools := float64(da.ToolCallCount) + snap.Metrics["tool_calls_today"] = core.Metric{Used: &tools, Unit: "calls", Window: "1d"} + sessions := float64(da.SessionCount) + snap.Metrics["sessions_today"] = core.Metric{Used: &sessions, Unit: "sessions", Window: "1d"} + } + + if day, err := time.Parse("2006-01-02", da.Date); err == nil && (day.After(weekStart) || day.Equal(weekStart)) { + weeklyMessages += da.MessageCount + weeklyToolCalls += da.ToolCallCount + weeklySessions += da.SessionCount + } + } + + if weeklyMessages > 0 { + wm := float64(weeklyMessages) + snap.Metrics["7d_messages"] = core.Metric{Used: &wm, Unit: "messages", Window: "rolling 7 days"} + } + if weeklyToolCalls > 0 { + wt := float64(weeklyToolCalls) + snap.Metrics["7d_tool_calls"] = core.Metric{Used: &wt, Unit: "calls", Window: "rolling 7 days"} + } + if weeklySessions > 0 { + ws := float64(weeklySessions) + snap.Metrics["7d_sessions"] = core.Metric{Used: &ws, Unit: "sessions", Window: "rolling 7 days"} + } + + for _, dt := range stats.DailyModelTokens { + totalDayTokens := float64(0) + for model, tokens := range dt.TokensByModel { + name := sanitizeModelName(model) + key := fmt.Sprintf("tokens_%s", name) + snap.DailySeries[key] = append(snap.DailySeries[key], core.TimePoint{Date: dt.Date, Value: float64(tokens)}) + totalDayTokens += float64(tokens) + } + snap.DailySeries["tokens_total"] = append(snap.DailySeries["tokens_total"], core.TimePoint{Date: dt.Date, Value: totalDayTokens}) + + if dt.Date == today { + for model, tokens := range dt.TokensByModel { + t := float64(tokens) + key := fmt.Sprintf("tokens_today_%s", sanitizeModelName(model)) + snap.Metrics[key] = core.Metric{Used: &t, Unit: "tokens", Window: "1d"} + } + } + } + + var totalCostUSD float64 + for model, usage := range stats.ModelUsage { + outTokens := float64(usage.OutputTokens) + inTokens := float64(usage.InputTokens) + name := sanitizeModelName(model) + modelPrefix := "model_" + name + + setMetricMax(snap, modelPrefix+"_input_tokens", inTokens, "tokens", "all-time") + setMetricMax(snap, modelPrefix+"_output_tokens", outTokens, "tokens", "all-time") + setMetricMax(snap, modelPrefix+"_cached_tokens", float64(usage.CacheReadInputTokens), "tokens", "all-time") + setMetricMax(snap, modelPrefix+"_cache_creation_tokens", float64(usage.CacheCreationInputTokens), "tokens", "all-time") + setMetricMax(snap, modelPrefix+"_web_search_requests", float64(usage.WebSearchRequests), "requests", "all-time") + setMetricMax(snap, modelPrefix+"_context_window_tokens", float64(usage.ContextWindow), "tokens", "all-time") + setMetricMax(snap, modelPrefix+"_max_output_tokens", float64(usage.MaxOutputTokens), "tokens", "all-time") + + snap.Raw[fmt.Sprintf("model_%s_cache_read", name)] = fmt.Sprintf("%d tokens", usage.CacheReadInputTokens) + snap.Raw[fmt.Sprintf("model_%s_cache_create", name)] = fmt.Sprintf("%d tokens", usage.CacheCreationInputTokens) + if usage.WebSearchRequests > 0 { + snap.Raw[fmt.Sprintf("model_%s_web_search_requests", name)] = fmt.Sprintf("%d", usage.WebSearchRequests) + } + if usage.ContextWindow > 0 { + snap.Raw[fmt.Sprintf("model_%s_context_window", name)] = fmt.Sprintf("%d", usage.ContextWindow) + } + if usage.MaxOutputTokens > 0 { + snap.Raw[fmt.Sprintf("model_%s_max_output_tokens", name)] = fmt.Sprintf("%d", usage.MaxOutputTokens) + } + + if usage.CostUSD > 0 { + totalCostUSD += usage.CostUSD + setMetricMax(snap, modelPrefix+"_cost_usd", usage.CostUSD, "USD", "all-time") + } + + rec := core.ModelUsageRecord{ + RawModelID: model, + RawSource: "stats_cache", + Window: "all-time", + InputTokens: core.Float64Ptr(inTokens), + OutputTokens: core.Float64Ptr(outTokens), + TotalTokens: core.Float64Ptr(inTokens + outTokens), + } + if usage.CacheReadInputTokens > 0 || usage.CacheCreationInputTokens > 0 { + rec.CachedTokens = core.Float64Ptr(float64(usage.CacheReadInputTokens + usage.CacheCreationInputTokens)) + } + if usage.CostUSD > 0 { + rec.CostUSD = core.Float64Ptr(usage.CostUSD) + } + snap.AppendModelUsage(rec) + } + + if totalCostUSD > 0 { + cost := totalCostUSD + snap.Metrics["total_cost_usd"] = core.Metric{Used: &cost, Unit: "USD", Window: "all-time"} + } + + snap.Raw["stats_last_computed"] = stats.LastComputedDate + if stats.FirstSessionDate != "" { + snap.Raw["first_session"] = stats.FirstSessionDate + } + if stats.LongestSession != nil { + if stats.LongestSession.Duration > 0 { + minutes := float64(stats.LongestSession.Duration) / float64(time.Minute/time.Millisecond) + snap.Metrics["longest_session_minutes"] = core.Metric{Used: &minutes, Unit: "minutes", Window: "all-time"} + } + if stats.LongestSession.MessageCount > 0 { + msgs := float64(stats.LongestSession.MessageCount) + snap.Metrics["longest_session_messages"] = core.Metric{Used: &msgs, Unit: "messages", Window: "all-time"} + } + if stats.LongestSession.SessionID != "" { + snap.Raw["longest_session_id"] = stats.LongestSession.SessionID + } + if stats.LongestSession.Timestamp != "" { + snap.Raw["longest_session_timestamp"] = stats.LongestSession.Timestamp + } + } + if len(stats.HourCounts) > 0 { + peakHour := "" + peakCount := 0 + for h, c := range stats.HourCounts { + if c > peakCount { + peakHour = h + peakCount = c + } + } + if peakHour != "" { + snap.Raw["peak_hour"] = peakHour + snap.Raw["peak_hour_messages"] = fmt.Sprintf("%d", peakCount) + } + } + + return nil +} + +func (p *Provider) readAccount(path string, snap *core.UsageSnapshot) error { + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("reading account config: %w", err) + } + + var acct accountConfig + if err := json.Unmarshal(data, &acct); err != nil { + return fmt.Errorf("parsing account config: %w", err) + } + + if acct.OAuthAccount != nil { + if acct.OAuthAccount.EmailAddress != "" { + snap.Raw["account_email"] = acct.OAuthAccount.EmailAddress + } + if acct.OAuthAccount.DisplayName != "" { + snap.Raw["account_name"] = acct.OAuthAccount.DisplayName + } + if acct.OAuthAccount.BillingType != "" { + snap.Raw["billing_type"] = acct.OAuthAccount.BillingType + } + if acct.OAuthAccount.HasExtraUsageEnabled { + snap.Raw["extra_usage_enabled"] = "true" + } + if acct.OAuthAccount.AccountCreatedAt != "" { + snap.Raw["account_created_at"] = acct.OAuthAccount.AccountCreatedAt + } + if acct.OAuthAccount.SubscriptionCreatedAt != "" { + snap.Raw["subscription_created_at"] = acct.OAuthAccount.SubscriptionCreatedAt + } + if acct.OAuthAccount.OrganizationUUID != "" { + snap.Raw["organization_uuid"] = acct.OAuthAccount.OrganizationUUID + } + } + + if acct.HasAvailableSubscription { + snap.Raw["subscription"] = "active" + } else { + snap.Raw["subscription"] = "none" + } + + if acct.ClaudeCodeFirstTokenDate != "" { + snap.Raw["claude_code_first_token_date"] = acct.ClaudeCodeFirstTokenDate + } + + if acct.PenguinModeOrgEnabled { + snap.Raw["penguin_mode_enabled"] = "true" + } + + for orgID, access := range acct.S1MAccessCache { + if access.HasAccess { + shortID := orgID + if len(shortID) > 8 { + shortID = shortID[:8] + } + snap.Raw[fmt.Sprintf("s1m_access_%s", shortID)] = "true" + } + } + + snap.Raw["num_startups"] = fmt.Sprintf("%d", acct.NumStartups) + if acct.InstallMethod != "" { + snap.Raw["install_method"] = acct.InstallMethod + } + if acct.ClientDataCache != nil && acct.ClientDataCache.Timestamp > 0 { + snap.Raw["client_data_cache_ts"] = strconv.FormatInt(acct.ClientDataCache.Timestamp, 10) + } + if len(acct.SkillUsage) > 0 { + counts := make(map[string]int, len(acct.SkillUsage)) + for skill, usage := range acct.SkillUsage { + counts[sanitizeModelName(skill)] = usage.UsageCount + } + snap.Raw["skill_usage"] = summarizeCountMap(counts, 6) + } + + return nil +} + +func (p *Provider) readSettings(path string, snap *core.UsageSnapshot) error { + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("reading settings: %w", err) + } + + var settings settingsConfig + if err := json.Unmarshal(data, &settings); err != nil { + return fmt.Errorf("parsing settings: %w", err) + } + + if settings.Model != "" { + snap.Raw["active_model"] = settings.Model + } + if settings.AlwaysThinkingEnabled { + snap.Raw["always_thinking"] = "true" + } + + return nil +} diff --git a/internal/providers/claude_code/local_helpers.go b/internal/providers/claude_code/local_helpers.go new file mode 100644 index 0000000..21538d1 --- /dev/null +++ b/internal/providers/claude_code/local_helpers.go @@ -0,0 +1,687 @@ +package claude_code + +import ( + "bufio" + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" + "github.com/samber/lo" +) + +func parseJSONLTimestamp(raw string) (time.Time, bool) { + t, err := shared.ParseTimestampString(raw) + if err != nil { + return time.Time{}, false + } + return t, true +} + +func isMutatingTool(name string) bool { + n := strings.ToLower(strings.TrimSpace(name)) + if n == "" { + return false + } + return strings.Contains(n, "edit") || + strings.Contains(n, "write") || + strings.Contains(n, "create") || + strings.Contains(n, "delete") || + strings.Contains(n, "rename") || + strings.Contains(n, "move") +} + +func extractToolCommand(input any) string { + var command string + var walk func(value any) + walk = func(value any) { + if command != "" || value == nil { + return + } + switch v := value.(type) { + case map[string]any: + for key, child := range v { + k := strings.ToLower(strings.TrimSpace(key)) + if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { + if s, ok := child.(string); ok { + command = strings.TrimSpace(s) + return + } + } + } + for _, child := range v { + walk(child) + if command != "" { + return + } + } + case []any: + for _, child := range v { + walk(child) + if command != "" { + return + } + } + } + } + walk(input) + return command +} + +func estimateToolLineDelta(toolName string, input any) (added int, removed int) { + lineCount := func(text string) int { + text = strings.TrimSpace(text) + if text == "" { + return 0 + } + return strings.Count(text, "\n") + 1 + } + lowerTool := strings.ToLower(strings.TrimSpace(toolName)) + var walk func(value any) + walk = func(value any) { + switch v := value.(type) { + case map[string]any: + oldKeys := []string{"old_string", "old_text", "from", "replace"} + newKeys := []string{"new_string", "new_text", "to", "with"} + var oldText string + var newText string + for _, key := range oldKeys { + if raw, ok := v[key]; ok { + if s, ok := raw.(string); ok { + oldText = s + break + } + } + } + for _, key := range newKeys { + if raw, ok := v[key]; ok { + if s, ok := raw.(string); ok { + newText = s + break + } + } + } + if oldText != "" || newText != "" { + removed += lineCount(oldText) + added += lineCount(newText) + } + if strings.Contains(lowerTool, "write") || strings.Contains(lowerTool, "create") { + if raw, ok := v["content"]; ok { + if s, ok := raw.(string); ok { + added += lineCount(s) + } + } + } + for _, child := range v { + walk(child) + } + case []any: + for _, child := range v { + walk(child) + } + } + } + walk(input) + return added, removed +} + +func extractToolPathCandidates(input any) []string { + pathKeyHints := map[string]bool{ + "path": true, "paths": true, "file": true, "files": true, "filepath": true, "file_path": true, + "cwd": true, "directory": true, "dir": true, "glob": true, "pattern": true, "target": true, + "from": true, "to": true, "include": true, "exclude": true, + } + + candidates := make(map[string]bool) + var walk func(value any, hinted bool) + walk = func(value any, hinted bool) { + switch v := value.(type) { + case map[string]any: + for key, child := range v { + k := strings.ToLower(strings.TrimSpace(key)) + childHinted := hinted || pathKeyHints[k] || strings.Contains(k, "path") || strings.Contains(k, "file") + walk(child, childHinted) + } + case []any: + for _, child := range v { + walk(child, hinted) + } + case string: + if !hinted { + return + } + for _, token := range extractPathTokens(v) { + candidates[token] = true + } + } + } + walk(input, false) + + out := make([]string, 0, len(candidates)) + for candidate := range candidates { + out = append(out, candidate) + } + sort.Strings(out) + return out +} + +func extractPathTokens(raw string) []string { + raw = strings.TrimSpace(raw) + if raw == "" { + return nil + } + fields := strings.Fields(raw) + if len(fields) == 0 { + fields = []string{raw} + } + var out []string + for _, field := range fields { + token := strings.Trim(field, "\"'`()[]{}<>,:;") + if token == "" { + continue + } + lower := strings.ToLower(token) + if strings.HasPrefix(lower, "http://") || strings.HasPrefix(lower, "https://") || strings.HasPrefix(lower, "file://") { + continue + } + if strings.HasPrefix(token, "-") { + continue + } + if !strings.Contains(token, "/") && !strings.Contains(token, "\\") && !strings.Contains(token, ".") { + continue + } + token = strings.TrimPrefix(token, "./") + token = strings.TrimSpace(token) + if token == "" { + continue + } + out = append(out, token) + } + return lo.Uniq(out) +} + +func inferLanguageFromPath(path string) string { + p := strings.ToLower(strings.TrimSpace(path)) + if p == "" { + return "" + } + base := strings.ToLower(filepath.Base(p)) + switch base { + case "dockerfile": + return "docker" + case "makefile": + return "make" + } + ext := strings.ToLower(filepath.Ext(p)) + switch ext { + case ".go": + return "go" + case ".py": + return "python" + case ".ts", ".tsx": + return "typescript" + case ".js", ".jsx": + return "javascript" + case ".tf", ".tfvars", ".hcl": + return "terraform" + case ".sh", ".bash", ".zsh", ".fish": + return "shell" + case ".md", ".mdx": + return "markdown" + case ".json": + return "json" + case ".yml", ".yaml": + return "yaml" + case ".sql": + return "sql" + case ".rs": + return "rust" + case ".java": + return "java" + case ".c", ".h": + return "c" + case ".cc", ".cpp", ".cxx", ".hpp": + return "cpp" + case ".rb": + return "ruby" + case ".php": + return "php" + case ".swift": + return "swift" + case ".vue": + return "vue" + case ".svelte": + return "svelte" + case ".toml": + return "toml" + case ".xml": + return "xml" + } + return "" +} + +func summarizeCountMap(values map[string]int, limit int) string { + type entry struct { + name string + value int + } + entries := make([]entry, 0, len(values)) + for name, value := range values { + if value <= 0 { + continue + } + entries = append(entries, entry{name: name, value: value}) + } + if len(entries) == 0 { + return "" + } + sort.Slice(entries, func(i, j int) bool { + if entries[i].value == entries[j].value { + return entries[i].name < entries[j].name + } + return entries[i].value > entries[j].value + }) + if limit <= 0 || limit > len(entries) { + limit = len(entries) + } + parts := make([]string, 0, limit+1) + for i := 0; i < limit; i++ { + name := strings.ReplaceAll(entries[i].name, "_", "-") + parts = append(parts, fmt.Sprintf("%s %d", name, entries[i].value)) + } + if len(entries) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) + } + return strings.Join(parts, ", ") +} + +func summarizeFloatMap(values map[string]float64, unit string, limit int) string { + type entry struct { + name string + value float64 + } + entries := make([]entry, 0, len(values)) + for name, value := range values { + if value <= 0 { + continue + } + entries = append(entries, entry{name: name, value: value}) + } + if len(entries) == 0 { + return "" + } + sort.Slice(entries, func(i, j int) bool { + if entries[i].value == entries[j].value { + return entries[i].name < entries[j].name + } + return entries[i].value > entries[j].value + }) + if limit <= 0 || limit > len(entries) { + limit = len(entries) + } + parts := make([]string, 0, limit+1) + for i := 0; i < limit; i++ { + name := strings.ReplaceAll(entries[i].name, "_", "-") + value := shortTokenCount(entries[i].value) + if unit != "" { + value += " " + unit + } + parts = append(parts, fmt.Sprintf("%s %s", name, value)) + } + if len(entries) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) + } + return strings.Join(parts, ", ") +} + +func summarizeTotalsMap(values map[string]*modelUsageTotals, preferCost bool, limit int) string { + type entry struct { + name string + tokens float64 + cost float64 + } + entries := make([]entry, 0, len(values)) + totalCost := 0.0 + for name, totals := range values { + if totals == nil { + continue + } + tokens := totals.input + totals.output + totals.cached + totals.cacheCreate + totals.reasoning + cost := totals.cost + if tokens <= 0 && cost <= 0 { + continue + } + totalCost += cost + entries = append(entries, entry{name: name, tokens: tokens, cost: cost}) + } + if len(entries) == 0 { + return "" + } + useCost := preferCost && totalCost > 0 + sort.Slice(entries, func(i, j int) bool { + left := entries[i].tokens + right := entries[j].tokens + if useCost { + left = entries[i].cost + right = entries[j].cost + } + if left == right { + return entries[i].name < entries[j].name + } + return left > right + }) + if limit <= 0 || limit > len(entries) { + limit = len(entries) + } + parts := make([]string, 0, limit+1) + for i := 0; i < limit; i++ { + name := strings.ReplaceAll(entries[i].name, "_", "-") + if useCost { + parts = append(parts, fmt.Sprintf("%s %s %s tok", name, formatUSDSummary(entries[i].cost), shortTokenCount(entries[i].tokens))) + } else { + parts = append(parts, fmt.Sprintf("%s %s tok", name, shortTokenCount(entries[i].tokens))) + } + } + if len(entries) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) + } + return strings.Join(parts, ", ") +} + +func collectJSONLFiles(dir string) []string { + var files []string + if _, err := os.Stat(dir); os.IsNotExist(err) { + return files + } + + _ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + if !info.IsDir() && strings.HasSuffix(path, ".jsonl") { + files = append(files, path) + } + return nil + }) + + return files +} + +func parseJSONLFile(path string) []jsonlEntry { + f, err := os.Open(path) + if err != nil { + return nil + } + defer f.Close() + + var entries []jsonlEntry + scanner := bufio.NewScanner(f) + buf := make([]byte, 0, 256*1024) + scanner.Buffer(buf, 10*1024*1024) + + for scanner.Scan() { + line := scanner.Bytes() + if len(line) == 0 { + continue + } + var entry jsonlEntry + if err := json.Unmarshal(line, &entry); err != nil { + continue + } + entries = append(entries, entry) + } + + return entries +} + +func sanitizeModelName(model string) string { + model = strings.ToLower(strings.TrimSpace(model)) + if model == "" { + return "unknown" + } + + result := make([]byte, 0, len(model)) + for _, c := range model { + if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') { + result = append(result, byte(c)) + } else { + result = append(result, '_') + } + } + + out := strings.Trim(string(result), "_") + if out == "" { + return "unknown" + } + return out +} + +func setMetricMax(snap *core.UsageSnapshot, key string, value float64, unit, window string) { + if value <= 0 { + return + } + if existing, ok := snap.Metrics[key]; ok && existing.Used != nil && *existing.Used >= value { + return + } + v := value + snap.Metrics[key] = core.Metric{Used: &v, Unit: unit, Window: window} +} + +func normalizeModelUsage(snap *core.UsageSnapshot) { + modelTotals := make(map[string]*modelUsageTotals) + legacyMetricKeys := make([]string, 0, 16) + + ensureModel := func(name string) *modelUsageTotals { + if _, ok := modelTotals[name]; !ok { + modelTotals[name] = &modelUsageTotals{} + } + return modelTotals[name] + } + + for key, metric := range snap.Metrics { + if metric.Used == nil { + continue + } + + switch { + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_input_tokens"): + model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_input_tokens") + ensureModel(model).input += *metric.Used + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_output_tokens"): + model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_output_tokens") + ensureModel(model).output += *metric.Used + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cost_usd"): + model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cost_usd") + ensureModel(model).cost += *metric.Used + case strings.HasPrefix(key, "input_tokens_"): + model := sanitizeModelName(strings.TrimPrefix(key, "input_tokens_")) + ensureModel(model).input += *metric.Used + legacyMetricKeys = append(legacyMetricKeys, key) + case strings.HasPrefix(key, "output_tokens_"): + model := sanitizeModelName(strings.TrimPrefix(key, "output_tokens_")) + ensureModel(model).output += *metric.Used + legacyMetricKeys = append(legacyMetricKeys, key) + } + } + + for key, value := range snap.Raw { + switch { + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cache_read"): + model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cache_read") + if parsed, ok := parseMetricNumber(value); ok { + setMetricMax(snap, "model_"+model+"_cached_tokens", parsed, "tokens", "all-time") + } + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_cache_create"): + model := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cache_create") + if parsed, ok := parseMetricNumber(value); ok { + setMetricMax(snap, "model_"+model+"_cache_creation_tokens", parsed, "tokens", "all-time") + } + } + } + + for _, key := range legacyMetricKeys { + delete(snap.Metrics, key) + } + + for model, totals := range modelTotals { + modelPrefix := "model_" + sanitizeModelName(model) + setMetricMax(snap, modelPrefix+"_input_tokens", totals.input, "tokens", "all-time") + setMetricMax(snap, modelPrefix+"_output_tokens", totals.output, "tokens", "all-time") + setMetricMax(snap, modelPrefix+"_cost_usd", totals.cost, "USD", "all-time") + } + + buildModelUsageSummaryRaw(snap) +} + +func parseMetricNumber(raw string) (float64, bool) { + clean := strings.TrimSpace(strings.ReplaceAll(raw, ",", "")) + if clean == "" { + return 0, false + } + fields := strings.Fields(clean) + if len(fields) == 0 { + return 0, false + } + v, err := strconv.ParseFloat(fields[0], 64) + if err != nil { + return 0, false + } + return v, true +} + +func buildModelUsageSummaryRaw(snap *core.UsageSnapshot) { + type entry struct { + name string + input float64 + output float64 + cost float64 + } + + byModel := make(map[string]*entry) + for key, metric := range snap.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "model_") { + continue + } + + switch { + case strings.HasSuffix(key, "_input_tokens"): + name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_input_tokens") + if _, ok := byModel[name]; !ok { + byModel[name] = &entry{name: name} + } + byModel[name].input += *metric.Used + case strings.HasSuffix(key, "_output_tokens"): + name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_output_tokens") + if _, ok := byModel[name]; !ok { + byModel[name] = &entry{name: name} + } + byModel[name].output += *metric.Used + case strings.HasSuffix(key, "_cost_usd"): + name := strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_cost_usd") + if _, ok := byModel[name]; !ok { + byModel[name] = &entry{name: name} + } + byModel[name].cost += *metric.Used + } + } + + entries := make([]entry, 0, len(byModel)) + totalTokens := float64(0) + totalCost := float64(0) + for _, model := range byModel { + if model.input <= 0 && model.output <= 0 && model.cost <= 0 { + continue + } + entries = append(entries, *model) + totalTokens += model.input + model.output + totalCost += model.cost + } + if len(entries) == 0 { + delete(snap.Raw, "model_usage") + delete(snap.Raw, "model_usage_window") + delete(snap.Raw, "model_count") + return + } + + useCost := totalCost > 0 + total := totalTokens + if useCost { + total = totalCost + } + if total <= 0 { + delete(snap.Raw, "model_usage") + delete(snap.Raw, "model_usage_window") + delete(snap.Raw, "model_count") + return + } + + sort.Slice(entries, func(i, j int) bool { + left := entries[i].input + entries[i].output + right := entries[j].input + entries[j].output + if useCost { + left = entries[i].cost + right = entries[j].cost + } + if left == right { + return entries[i].name < entries[j].name + } + return left > right + }) + + limit := maxModelUsageSummaryItems + if limit > len(entries) { + limit = len(entries) + } + parts := make([]string, 0, limit+1) + for i := 0; i < limit; i++ { + value := entries[i].input + entries[i].output + if useCost { + value = entries[i].cost + } + if value <= 0 { + continue + } + pct := value / total * 100 + tokens := entries[i].input + entries[i].output + modelName := strings.ReplaceAll(entries[i].name, "_", "-") + + if useCost { + parts = append(parts, fmt.Sprintf("%s %s %s tok (%.0f%%)", modelName, formatUSDSummary(entries[i].cost), shortTokenCount(tokens), pct)) + } else { + parts = append(parts, fmt.Sprintf("%s %s tok (%.0f%%)", modelName, shortTokenCount(tokens), pct)) + } + } + if len(entries) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) + } + + snap.Raw["model_usage"] = strings.Join(parts, ", ") + snap.Raw["model_usage_window"] = "all-time" + snap.Raw["model_count"] = fmt.Sprintf("%d", len(entries)) +} + +func shortTokenCount(v float64) string { + switch { + case v >= 1_000_000_000: + return fmt.Sprintf("%.1fB", v/1_000_000_000) + case v >= 1_000_000: + return fmt.Sprintf("%.1fM", v/1_000_000) + case v >= 1_000: + return fmt.Sprintf("%.1fK", v/1_000) + default: + return fmt.Sprintf("%.0f", v) + } +} + +func formatUSDSummary(v float64) string { + if v >= 1000 { + return fmt.Sprintf("$%.0f", v) + } + return fmt.Sprintf("$%.2f", v) +} diff --git a/internal/tui/settings_modal.go b/internal/tui/settings_modal.go index 0ef5107..7ae88aa 100644 --- a/internal/tui/settings_modal.go +++ b/internal/tui/settings_modal.go @@ -433,238 +433,6 @@ func (m Model) currentTimeWindowIndex() int { return 0 } -func (m Model) renderSettingsModalOverlay() string { - if m.width < 40 || m.height < 12 { - return m.renderDashboard() - } - - contentW := m.width - 24 - if contentW < 68 { - contentW = 68 - } - if contentW > 92 { - contentW = 92 - } - panelInnerW := contentW - 4 - if panelInnerW < 40 { - panelInnerW = 40 - } - - const modalBodyHeight = 20 - contentH := modalBodyHeight - maxAllowed := m.height - 14 - if maxAllowed < 8 { - maxAllowed = 8 - } - if contentH > maxAllowed { - contentH = maxAllowed - } - - title := lipgloss.NewStyle().Bold(true).Foreground(colorRosewater).Render("Settings") - tabs := m.renderSettingsModalTabs(panelInnerW) - body := m.renderSettingsModalBody(panelInnerW, contentH) - hint := dimStyle.Render(m.settingsModalHint()) - - status := "" - if m.settings.status != "" { - status = lipgloss.NewStyle().Foreground(colorSapphire).Render(m.settings.status) - } - - lines := []string{ - title, - tabs, - lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", panelInnerW)), - body, - lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", panelInnerW)), - hint, - } - if status != "" { - lines = append(lines, status) - } - - panel := lipgloss.NewStyle(). - Border(lipgloss.RoundedBorder()). - BorderForeground(colorAccent). - Background(colorBase). - Padding(1, 2). - Width(contentW). - Render(strings.Join(lines, "\n")) - if m.settings.tab != settingsTabWidgetSections { - return lipgloss.Place(m.width, m.height, lipgloss.Center, lipgloss.Center, panel) - } - - previewBodyH := contentH - sideBySide := m.width >= contentW*2+12 - previewBodyH = m.settingsWidgetPreviewBodyHeight(contentW, contentH, sideBySide) - previewPanel := m.renderSettingsWidgetPreviewPanel(contentW, previewBodyH) - - combined := "" - // Render side-by-side when terminal width allows two panels comfortably. - if sideBySide { - panelH := lipgloss.Height(panel) - previewH := lipgloss.Height(previewPanel) - if panelH < previewH { - panel = centerPanelVertically(panel, previewH) - } else if previewH < panelH { - previewPanel = centerPanelVertically(previewPanel, panelH) - } - combined = lipgloss.JoinHorizontal(lipgloss.Top, panel, " ", previewPanel) - } else { - combined = lipgloss.JoinVertical(lipgloss.Left, panel, "", previewPanel) - } - - return lipgloss.Place(m.width, m.height, lipgloss.Center, lipgloss.Center, combined) -} - -func (m Model) renderSettingsModalTabs(w int) string { - if len(settingsTabNames) == 0 { - return "" - } - if w < 40 { - w = 40 - } - - n := len(settingsTabNames) - gap := 1 - cellW := (w - gap*(n-1)) / n - if cellW < 6 { - cellW = 6 - gap = 0 - cellW = w / n - } - - tabTokens := []string{"PROV", "SECT", "THEME", "VIEW", "KEYS", "TELEM", "INTEG"} - if len(tabTokens) < n { - tabTokens = append(tabTokens, settingsTabNames[len(tabTokens):]...) - } - - activeStyle := lipgloss.NewStyle().Bold(true).Foreground(colorMantle).Background(colorAccent) - inactiveStyle := lipgloss.NewStyle().Foreground(colorSubtext) - - parts := make([]string, 0, n) - for i := 0; i < n; i++ { - token := settingsTabNames[i] - if i < len(tabTokens) { - token = tabTokens[i] - } - label := fmt.Sprintf("%d %s", i+1, token) - if lipgloss.Width(label) > cellW { - label = truncateToWidth(label, cellW) - } - if pad := cellW - lipgloss.Width(label); pad > 0 { - left := pad / 2 - right := pad - left - label = strings.Repeat(" ", left) + label + strings.Repeat(" ", right) - } - if settingsModalTab(i) == m.settings.tab { - parts = append(parts, activeStyle.Render(label)) - } else { - parts = append(parts, inactiveStyle.Render(label)) - } - } - - line := strings.Join(parts, strings.Repeat(" ", gap)) - return line -} - -func (m Model) settingsModalHint() string { - switch m.settings.tab { - case settingsTabProviders: - return "Up/Down: select · Shift+↑/↓ or Shift+J/K: move item · Space/Enter: enable/disable · Left/Right: switch tab · Esc: close" - case settingsTabWidgetSections: - return "Up/Down: select section · Shift+↑/↓ or Shift+J/K: reorder · Space/Enter: show/hide · h: toggle hide empty sections · PgUp/PgDn or Ctrl+U/D: scroll preview · Esc: close" - case settingsTabAPIKeys: - if m.settings.apiKeyEditing { - return "Type API key · Enter: validate & save · Esc: cancel" - } - return "Up/Down: select · Enter: edit key · d: delete key · Left/Right: switch tab · Esc: close" - case settingsTabView: - return "Up/Down: select view · Space/Enter: apply · v/Shift+V: cycle outside settings · Esc: close" - case settingsTabTelemetry: - return "Up/Down: select · Space/Enter: apply time window · Left/Right: switch tab · Esc: close" - case settingsTabIntegrations: - return "Up/Down: select · Enter/i: install/configure · u: upgrade · r: refresh · Esc: close" - default: - return "Up/Down: select theme · Space/Enter: apply theme · Left/Right: switch tab · Esc: close" - } -} - -func (m Model) renderSettingsModalBody(w, h int) string { - switch m.settings.tab { - case settingsTabProviders: - return m.renderSettingsProvidersBody(w, h) - case settingsTabWidgetSections: - return m.renderSettingsWidgetSectionsBody(w, h) - case settingsTabAPIKeys: - return m.renderSettingsAPIKeysBody(w, h) - case settingsTabView: - return m.renderSettingsViewBody(w, h) - case settingsTabTelemetry: - return m.renderSettingsTelemetryBody(w, h) - case settingsTabIntegrations: - return m.renderSettingsIntegrationsBody(w, h) - default: - return m.renderSettingsThemeBody(w, h) - } -} - -func settingsBodyHeaderLines(title, subtitle string) []string { - lines := []string{ - lipgloss.NewStyle().Foreground(colorTeal).Bold(true).Render(title), - } - if strings.TrimSpace(subtitle) != "" { - lines = append(lines, dimStyle.Render(subtitle)) - } - lines = append(lines, "") - return lines -} - -func settingsBodyRule(w int) string { - if w < 8 { - w = 8 - } - return dimStyle.Render(strings.Repeat("─", w-2)) -} - -func settingsSectionLabel(id core.DashboardStandardSection) string { - switch id { - case core.DashboardSectionTopUsageProgress: - return "Top Usage Progress" - case core.DashboardSectionModelBurn: - return "Model Burn" - case core.DashboardSectionClientBurn: - return "Client Burn" - case core.DashboardSectionProjectBreakdown: - return "Project Breakdown" - case core.DashboardSectionToolUsage: - return "Tool Usage" - case core.DashboardSectionMCPUsage: - return "MCP Usage" - case core.DashboardSectionLanguageBurn: - return "Language" - case core.DashboardSectionCodeStats: - return "Code Statistics" - case core.DashboardSectionDailyUsage: - return "Daily Usage" - case core.DashboardSectionProviderBurn: - return "Provider Burn" - case core.DashboardSectionUpstreamProviders: - return "Upstream Providers" - case core.DashboardSectionOtherData: - return "Other Data" - default: - raw := strings.TrimSpace(strings.ReplaceAll(string(id), "_", " ")) - if raw == "" { - return "Unknown" - } - parts := strings.Fields(raw) - for i := range parts { - parts[i] = titleCase(parts[i]) - } - return strings.Join(parts, " ") - } -} - func (m Model) renderSettingsProvidersBody(w, h int) string { ids := m.settingsIDs() diff --git a/internal/tui/settings_modal_layout.go b/internal/tui/settings_modal_layout.go new file mode 100644 index 0000000..10560cf --- /dev/null +++ b/internal/tui/settings_modal_layout.go @@ -0,0 +1,239 @@ +package tui + +import ( + "fmt" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (m Model) renderSettingsModalOverlay() string { + if m.width < 40 || m.height < 12 { + return m.renderDashboard() + } + + contentW := m.width - 24 + if contentW < 68 { + contentW = 68 + } + if contentW > 92 { + contentW = 92 + } + panelInnerW := contentW - 4 + if panelInnerW < 40 { + panelInnerW = 40 + } + + const modalBodyHeight = 20 + contentH := modalBodyHeight + maxAllowed := m.height - 14 + if maxAllowed < 8 { + maxAllowed = 8 + } + if contentH > maxAllowed { + contentH = maxAllowed + } + + title := lipgloss.NewStyle().Bold(true).Foreground(colorRosewater).Render("Settings") + tabs := m.renderSettingsModalTabs(panelInnerW) + body := m.renderSettingsModalBody(panelInnerW, contentH) + hint := dimStyle.Render(m.settingsModalHint()) + + status := "" + if m.settings.status != "" { + status = lipgloss.NewStyle().Foreground(colorSapphire).Render(m.settings.status) + } + + lines := []string{ + title, + tabs, + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", panelInnerW)), + body, + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", panelInnerW)), + hint, + } + if status != "" { + lines = append(lines, status) + } + + panel := lipgloss.NewStyle(). + Border(lipgloss.RoundedBorder()). + BorderForeground(colorAccent). + Background(colorBase). + Padding(1, 2). + Width(contentW). + Render(strings.Join(lines, "\n")) + if m.settings.tab != settingsTabWidgetSections { + return lipgloss.Place(m.width, m.height, lipgloss.Center, lipgloss.Center, panel) + } + + previewBodyH := contentH + sideBySide := m.width >= contentW*2+12 + previewBodyH = m.settingsWidgetPreviewBodyHeight(contentW, contentH, sideBySide) + previewPanel := m.renderSettingsWidgetPreviewPanel(contentW, previewBodyH) + + combined := "" + if sideBySide { + panelH := lipgloss.Height(panel) + previewH := lipgloss.Height(previewPanel) + if panelH < previewH { + panel = centerPanelVertically(panel, previewH) + } else if previewH < panelH { + previewPanel = centerPanelVertically(previewPanel, panelH) + } + combined = lipgloss.JoinHorizontal(lipgloss.Top, panel, " ", previewPanel) + } else { + combined = lipgloss.JoinVertical(lipgloss.Left, panel, "", previewPanel) + } + + return lipgloss.Place(m.width, m.height, lipgloss.Center, lipgloss.Center, combined) +} + +func (m Model) renderSettingsModalTabs(w int) string { + if len(settingsTabNames) == 0 { + return "" + } + if w < 40 { + w = 40 + } + + n := len(settingsTabNames) + gap := 1 + cellW := (w - gap*(n-1)) / n + if cellW < 6 { + cellW = 6 + gap = 0 + cellW = w / n + } + + tabTokens := []string{"PROV", "SECT", "THEME", "VIEW", "KEYS", "TELEM", "INTEG"} + if len(tabTokens) < n { + tabTokens = append(tabTokens, settingsTabNames[len(tabTokens):]...) + } + + activeStyle := lipgloss.NewStyle().Bold(true).Foreground(colorMantle).Background(colorAccent) + inactiveStyle := lipgloss.NewStyle().Foreground(colorSubtext) + + parts := make([]string, 0, n) + for i := 0; i < n; i++ { + token := settingsTabNames[i] + if i < len(tabTokens) { + token = tabTokens[i] + } + label := fmt.Sprintf("%d %s", i+1, token) + if lipgloss.Width(label) > cellW { + label = truncateToWidth(label, cellW) + } + if pad := cellW - lipgloss.Width(label); pad > 0 { + left := pad / 2 + right := pad - left + label = strings.Repeat(" ", left) + label + strings.Repeat(" ", right) + } + if settingsModalTab(i) == m.settings.tab { + parts = append(parts, activeStyle.Render(label)) + } else { + parts = append(parts, inactiveStyle.Render(label)) + } + } + + return strings.Join(parts, strings.Repeat(" ", gap)) +} + +func (m Model) settingsModalHint() string { + switch m.settings.tab { + case settingsTabProviders: + return "Up/Down: select · Shift+↑/↓ or Shift+J/K: move item · Space/Enter: enable/disable · Left/Right: switch tab · Esc: close" + case settingsTabWidgetSections: + return "Up/Down: select section · Shift+↑/↓ or Shift+J/K: reorder · Space/Enter: show/hide · h: toggle hide empty sections · PgUp/PgDn or Ctrl+U/D: scroll preview · Esc: close" + case settingsTabAPIKeys: + if m.settings.apiKeyEditing { + return "Type API key · Enter: validate & save · Esc: cancel" + } + return "Up/Down: select · Enter: edit key · d: delete key · Left/Right: switch tab · Esc: close" + case settingsTabView: + return "Up/Down: select view · Space/Enter: apply · v/Shift+V: cycle outside settings · Esc: close" + case settingsTabTelemetry: + return "Up/Down: select · Space/Enter: apply time window · Left/Right: switch tab · Esc: close" + case settingsTabIntegrations: + return "Up/Down: select · Enter/i: install/configure · u: upgrade · r: refresh · Esc: close" + default: + return "Up/Down: select theme · Space/Enter: apply theme · Left/Right: switch tab · Esc: close" + } +} + +func (m Model) renderSettingsModalBody(w, h int) string { + switch m.settings.tab { + case settingsTabProviders: + return m.renderSettingsProvidersBody(w, h) + case settingsTabWidgetSections: + return m.renderSettingsWidgetSectionsBody(w, h) + case settingsTabAPIKeys: + return m.renderSettingsAPIKeysBody(w, h) + case settingsTabView: + return m.renderSettingsViewBody(w, h) + case settingsTabTelemetry: + return m.renderSettingsTelemetryBody(w, h) + case settingsTabIntegrations: + return m.renderSettingsIntegrationsBody(w, h) + default: + return m.renderSettingsThemeBody(w, h) + } +} + +func settingsBodyHeaderLines(title, subtitle string) []string { + lines := []string{ + lipgloss.NewStyle().Foreground(colorTeal).Bold(true).Render(title), + } + if strings.TrimSpace(subtitle) != "" { + lines = append(lines, dimStyle.Render(subtitle)) + } + lines = append(lines, "") + return lines +} + +func settingsBodyRule(w int) string { + if w < 8 { + w = 8 + } + return dimStyle.Render(strings.Repeat("─", w-2)) +} + +func settingsSectionLabel(id core.DashboardStandardSection) string { + switch id { + case core.DashboardSectionTopUsageProgress: + return "Top Usage Progress" + case core.DashboardSectionModelBurn: + return "Model Burn" + case core.DashboardSectionClientBurn: + return "Client Burn" + case core.DashboardSectionProjectBreakdown: + return "Project Breakdown" + case core.DashboardSectionToolUsage: + return "Tool Usage" + case core.DashboardSectionMCPUsage: + return "MCP Usage" + case core.DashboardSectionLanguageBurn: + return "Language" + case core.DashboardSectionCodeStats: + return "Code Statistics" + case core.DashboardSectionDailyUsage: + return "Daily Usage" + case core.DashboardSectionProviderBurn: + return "Provider Burn" + case core.DashboardSectionUpstreamProviders: + return "Upstream Providers" + case core.DashboardSectionOtherData: + return "Other Data" + default: + raw := strings.TrimSpace(strings.ReplaceAll(string(id), "_", " ")) + if raw == "" { + return "Unknown" + } + parts := strings.Fields(raw) + for i := range parts { + parts[i] = titleCase(parts[i]) + } + return strings.Join(parts, " ") + } +} From 6eb33d9e5521c3d12b25da2b89204c6939ea8734 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 18:04:59 +0100 Subject: [PATCH 22/32] refactor: split copilot github api flow --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 3 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 4 +- internal/providers/copilot/api_data.go | 374 ++++++++++++++++++ internal/providers/copilot/copilot.go | 365 ----------------- 4 files changed, 379 insertions(+), 367 deletions(-) create mode 100644 internal/providers/copilot/api_data.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index a42eaf7..da5e906 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -62,6 +62,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | R42 | Fixed | Provider display-info split and shared fallback metric helpers | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/core/dashboard_display_metrics.go` | Provider tile display-summary logic moved out of the main TUI model file, and fallback/rate-limit metric selection now lives in shared core helpers instead of ad hoc TUI parsing. | Continue moving the remaining analytics/detail-specific metric decoding into shared extractors. | | R43 | Fixed | Codex live/session split | `internal/providers/codex/codex.go`, `internal/providers/codex/live_usage.go`, `internal/providers/codex/session_usage.go` | Codex now keeps provider wiring in the main file while live usage fetching and local session projection live in dedicated helpers. | Continue the same concern-based split for the remaining large providers. | | R44 | Fixed | Claude Code local file/helper split and settings modal layout split | `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/local_files.go`, `internal/providers/claude_code/local_helpers.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go` | Claude Code local readers and generic helper logic are split out of the main provider file, and the settings modal layout/render wrapper no longer lives inline with all modal state/input handling. | Continue with deeper conversation-aggregation extraction in Claude Code and more TUI render-section splits. | +| R45 | Fixed | Copilot GitHub API split | `internal/providers/copilot/copilot.go`, `internal/providers/copilot/api_data.go` | Copilot's GitHub API fetch, quota projection, and org metrics flow now live in a dedicated file instead of sharing the same unit as local config/log/session parsing. | Continue splitting the remaining local projection/helpers out of the main provider file. | ## Action Table @@ -70,7 +71,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, and settings modal layout is separated, but TUI state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Composition bars, provider tile fallback/rate-limit selection, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/claude_code/claude_code.go` | Cursor, OpenRouter, and Codex are now materially decomposed, and Claude Code has local-reader/helper splits, but several providers still combine transport, parsing, normalization, and projection in single very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, telemetry helpers, and Claude Code conversation aggregation. | Smaller diffs, less drift risk, and easier provider-specific testing. | +| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/copilot/api_data.go`, `internal/providers/claude_code/claude_code.go` | Cursor, OpenRouter, Codex, and Copilot's API path are materially decomposed, and Claude Code has local-reader/helper splits, but several providers still combine large local parsing/projection flows in very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, telemetry helpers, and Claude Code/Copilot local aggregation paths. | Smaller diffs, less drift risk, and easier provider-specific testing. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index 39afb07..dfb63b6 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -21,6 +21,7 @@ These were major concerns in earlier reviews and are now materially addressed: - Codex and Claude Code raw parser duplication. - Codex live/session flow concentrated in one provider file. - Claude Code local file readers and model-summary helpers concentrated in one provider file. +- Copilot GitHub API fetch/quota/org-metrics flow concentrated in the same file as local log/session parsing. - OpenRouter provider-resolution, analytics, generation, projection, and account-path monolith sprawl. - TUI side-effect leakage into config persistence / integration install / provider validation. - Settings modal layout/render wrapper living inline with settings state/input handling. @@ -78,13 +79,14 @@ What to address: ### 4. [P2] Several providers are still large mixed-responsibility units -Cursor, OpenRouter, and Codex are now in much better shape, and Claude Code has started the same split, but several providers still remain monoliths that mix transport, parsing, normalization, and projection in one place. +Cursor, OpenRouter, and Codex are now in much better shape, Claude Code has started the same split, and Copilot's GitHub API path is now separated, but several providers still remain monoliths that mix transport, parsing, normalization, and projection in one place. Refs: - [ollama.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/ollama.go) - [zai.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/zai.go) - [gemini_cli.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/gemini_cli.go) - [copilot.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/copilot.go) +- [api_data.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/api_data.go) - [claude_code.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/claude_code.go) - [local_files.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_files.go) - [local_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_helpers.go) diff --git a/internal/providers/copilot/api_data.go b/internal/providers/copilot/api_data.go new file mode 100644 index 0000000..3277ca5 --- /dev/null +++ b/internal/providers/copilot/api_data.go @@ -0,0 +1,374 @@ +package copilot + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (p *Provider) fetchUserInfo(ctx context.Context, binary string, snap *core.UsageSnapshot) { + userJSON, err := runGHAPI(ctx, binary, "/user") + if err != nil { + return + } + var user ghUser + if json.Unmarshal([]byte(userJSON), &user) != nil { + return + } + if user.Login != "" { + snap.Raw["github_login"] = user.Login + } + if user.Name != "" { + snap.Raw["github_name"] = user.Name + } + if user.Plan.Name != "" { + snap.Raw["github_plan"] = user.Plan.Name + } +} + +func (p *Provider) fetchCopilotInternalUser(ctx context.Context, binary string, snap *core.UsageSnapshot) { + body, err := runGHAPI(ctx, binary, "/copilot_internal/user") + if err != nil { + return + } + var cu copilotInternalUser + if json.Unmarshal([]byte(body), &cu) != nil { + return + } + p.applyCopilotInternalUser(&cu, snap) +} + +func (p *Provider) applyCopilotInternalUser(cu *copilotInternalUser, snap *core.UsageSnapshot) { + if cu == nil { + return + } + + snap.Raw["copilot_plan"] = cu.CopilotPlan + snap.Raw["access_type_sku"] = cu.AccessTypeSKU + if cu.AssignedDate != "" { + snap.Raw["assigned_date"] = cu.AssignedDate + } + if cu.CodexAgentEnabled { + snap.Raw["codex_agent_enabled"] = "true" + } + if cu.UsageResetDate != "" { + snap.Raw["quota_reset_date"] = cu.UsageResetDate + } + if cu.UsageResetDateUTC != "" { + snap.Raw["quota_reset_date_utc"] = cu.UsageResetDateUTC + } + + features := []string{} + if cu.ChatEnabled { + features = append(features, "chat") + } + if cu.MCPEnabled { + features = append(features, "mcp") + } + if cu.CopilotIgnoreEnabled { + features = append(features, "copilotignore") + } + if len(features) > 0 { + snap.Raw["features_enabled"] = strings.Join(features, ", ") + } + + if api, ok := cu.Endpoints["api"]; ok { + snap.Raw["api_endpoint"] = api + } + + if len(cu.OrganizationLoginList) > 0 { + snap.Raw["copilot_orgs"] = strings.Join(cu.OrganizationLoginList, ", ") + } + for _, org := range cu.OrganizationList { + key := fmt.Sprintf("org_%s_plan", org.Login) + snap.Raw[key] = org.CopilotPlan + if org.IsEnterprise { + snap.Raw[fmt.Sprintf("org_%s_enterprise", org.Login)] = "true" + } + } + + p.applyUsageSnapshotMetrics(cu.UsageSnapshots, snap) + + for _, candidate := range []string{cu.UsageResetDateUTC, cu.UsageResetDate, cu.LimitedUserResetDate} { + if t := parseCopilotTime(candidate); !t.IsZero() { + snap.Resets["quota_reset"] = t + break + } + } +} + +func (p *Provider) applyUsageSnapshotMetrics(snapshots *copilotUsageSnapshots, snap *core.UsageSnapshot) bool { + if snapshots == nil { + return false + } + + applied := false + if p.applySingleUsageSnapshot("chat_quota", "messages", snapshots.Chat, snap) { + applied = true + } + if p.applySingleUsageSnapshot("completions_quota", "completions", snapshots.Completions, snap) { + applied = true + } + if p.applySingleUsageSnapshot("premium_interactions_quota", "requests", snapshots.PremiumInteractions, snap) { + applied = true + } + return applied +} + +func (p *Provider) applySingleUsageSnapshot(key, unit string, quota *copilotUsageSnapshot, snap *core.UsageSnapshot) bool { + if quota == nil { + return false + } + + if quota.UsageID != "" { + snap.Raw[key+"_id"] = quota.UsageID + } + if quota.OveragePermitted != nil { + snap.Raw[key+"_overage_permitted"] = strconv.FormatBool(*quota.OveragePermitted) + } + if quota.Unlimited != nil && *quota.Unlimited { + snap.Raw[key+"_unlimited"] = "true" + return false + } + if quota.TimestampUTC != "" { + if t := parseCopilotTime(quota.TimestampUTC); !t.IsZero() { + snap.Resets[key+"_snapshot"] = t + } + } + + remaining := firstNonNilFloat(quota.UsageRemaining, quota.Remaining) + limit := quota.Entitlement + pct := clampPercent(firstFloat(quota.PercentRemaining)) + + switch { + case limit != nil && remaining != nil: + used := *limit - *remaining + if used < 0 { + used = 0 + } + snap.Metrics[key] = core.Metric{ + Limit: core.Float64Ptr(*limit), + Remaining: core.Float64Ptr(*remaining), + Used: core.Float64Ptr(used), + Unit: unit, + Window: "month", + } + return true + case pct >= 0: + limitPct := 100.0 + used := 100 - pct + snap.Metrics[key] = core.Metric{ + Limit: &limitPct, + Remaining: core.Float64Ptr(pct), + Used: core.Float64Ptr(used), + Unit: "%", + Window: "month", + } + return true + case remaining != nil: + snap.Metrics[key] = core.Metric{ + Used: core.Float64Ptr(*remaining), + Unit: unit, + Window: "month", + } + return true + default: + return false + } +} + +func (p *Provider) fetchRateLimits(ctx context.Context, binary string, snap *core.UsageSnapshot) { + body, err := runGHAPI(ctx, binary, "/rate_limit") + if err != nil { + return + } + var rl ghRateLimit + if json.Unmarshal([]byte(body), &rl) != nil { + return + } + + for _, resource := range []string{"core", "search", "graphql"} { + res, ok := rl.Resources[resource] + if !ok || res.Limit == 0 { + continue + } + limit := float64(res.Limit) + remaining := float64(res.Remaining) + used := float64(res.Used) + if used == 0 && res.Remaining >= 0 && res.Remaining <= res.Limit { + used = limit - remaining + } + key := "gh_" + resource + "_rpm" + snap.Metrics[key] = core.Metric{ + Limit: &limit, + Remaining: &remaining, + Used: &used, + Unit: "requests", + Window: "1h", + } + if res.Reset > 0 { + snap.Resets[key+"_reset"] = time.Unix(res.Reset, 0) + } + } +} + +func (p *Provider) fetchOrgData(ctx context.Context, binary string, snap *core.UsageSnapshot) { + orgs := snap.Raw["copilot_orgs"] + if orgs == "" { + return + } + + for _, org := range strings.Split(orgs, ", ") { + org = strings.TrimSpace(org) + if org == "" { + continue + } + p.fetchOrgBilling(ctx, binary, org, snap) + p.fetchOrgMetrics(ctx, binary, org, snap) + } +} + +func (p *Provider) fetchOrgBilling(ctx context.Context, binary, org string, snap *core.UsageSnapshot) { + body, err := runGHAPI(ctx, binary, fmt.Sprintf("/orgs/%s/copilot/billing", org)) + if err != nil { + return + } + var billing orgBilling + if json.Unmarshal([]byte(body), &billing) != nil { + return + } + + prefix := fmt.Sprintf("org_%s_", org) + snap.Raw[prefix+"billing_plan"] = billing.PlanType + snap.Raw[prefix+"seat_mgmt"] = billing.SeatManagementSetting + snap.Raw[prefix+"ide_chat"] = billing.IDEChat + snap.Raw[prefix+"platform_chat"] = billing.PlatformChat + snap.Raw[prefix+"cli"] = billing.CLI + snap.Raw[prefix+"public_code"] = billing.PublicCodeSuggestions + + if billing.SeatBreakdown.Total > 0 { + total := float64(billing.SeatBreakdown.Total) + active := float64(billing.SeatBreakdown.ActiveThisCycle) + snap.Metrics[prefix+"seats"] = core.Metric{ + Limit: &total, + Used: &active, + Unit: "seats", + Window: "cycle", + } + } +} + +func (p *Provider) fetchOrgMetrics(ctx context.Context, binary, org string, snap *core.UsageSnapshot) { + body, err := runGHAPI(ctx, binary, fmt.Sprintf("/orgs/%s/copilot/metrics", org)) + if err != nil { + return + } + var days []orgMetricsDay + if json.Unmarshal([]byte(body), &days) != nil { + return + } + if len(days) == 0 { + return + } + + prefix := "org_" + org + "_" + activeUsers := make([]core.TimePoint, 0, len(days)) + engagedUsers := make([]core.TimePoint, 0, len(days)) + totalSuggestions := make([]core.TimePoint, 0, len(days)) + totalAcceptances := make([]core.TimePoint, 0, len(days)) + totalChats := make([]core.TimePoint, 0, len(days)) + aggSuggestions := 0.0 + aggAcceptances := 0.0 + aggChats := 0.0 + + for _, day := range days { + activeUsers = append(activeUsers, core.TimePoint{Date: day.Date, Value: float64(day.TotalActiveUsers)}) + engagedUsers = append(engagedUsers, core.TimePoint{Date: day.Date, Value: float64(day.TotalEngagedUsers)}) + + var daySugg, dayAccept float64 + if day.Completions != nil { + for _, editor := range day.Completions.Editors { + for _, model := range editor.Models { + daySugg += float64(model.TotalSuggestions) + dayAccept += float64(model.TotalAcceptances) + } + } + } + totalSuggestions = append(totalSuggestions, core.TimePoint{Date: day.Date, Value: daySugg}) + totalAcceptances = append(totalAcceptances, core.TimePoint{Date: day.Date, Value: dayAccept}) + aggSuggestions += daySugg + aggAcceptances += dayAccept + + var dayChats float64 + if day.IDEChat != nil { + for _, editor := range day.IDEChat.Editors { + for _, model := range editor.Models { + dayChats += float64(model.TotalChats) + } + } + } + if day.DotcomChat != nil { + for _, editor := range day.DotcomChat.Editors { + for _, model := range editor.Models { + dayChats += float64(model.TotalChats) + } + } + } + totalChats = append(totalChats, core.TimePoint{Date: day.Date, Value: dayChats}) + aggChats += dayChats + } + + snap.DailySeries[prefix+"active_users"] = activeUsers + snap.DailySeries[prefix+"engaged_users"] = engagedUsers + snap.DailySeries[prefix+"suggestions"] = totalSuggestions + snap.DailySeries[prefix+"acceptances"] = totalAcceptances + snap.DailySeries[prefix+"chats"] = totalChats + + if len(activeUsers) > 0 { + lastActive := activeUsers[len(activeUsers)-1].Value + snap.Metrics[prefix+"active_users"] = core.Metric{Used: core.Float64Ptr(lastActive), Unit: "users", Window: "day"} + } + if len(engagedUsers) > 0 { + lastEngaged := engagedUsers[len(engagedUsers)-1].Value + snap.Metrics[prefix+"engaged_users"] = core.Metric{Used: core.Float64Ptr(lastEngaged), Unit: "users", Window: "day"} + } + if aggSuggestions > 0 { + snap.Metrics[prefix+"suggestions"] = core.Metric{Used: core.Float64Ptr(aggSuggestions), Unit: "suggestions", Window: "series"} + } + if aggAcceptances > 0 { + snap.Metrics[prefix+"acceptances"] = core.Metric{Used: core.Float64Ptr(aggAcceptances), Unit: "acceptances", Window: "series"} + } + if aggChats > 0 { + snap.Metrics[prefix+"chats"] = core.Metric{Used: core.Float64Ptr(aggChats), Unit: "chats", Window: "series"} + } +} + +func runGH(ctx context.Context, binary string, args ...string) (string, error) { + var stdout, stderr bytes.Buffer + cmd := exec.CommandContext(ctx, binary, args...) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return stdout.String() + stderr.String(), err + } + return stdout.String(), nil +} + +func runGHAPI(ctx context.Context, binary, endpoint string) (string, error) { + return runGH( + ctx, + binary, + "api", + "-H", "Cache-Control: no-cache", + "-H", "Pragma: no-cache", + endpoint, + ) +} diff --git a/internal/providers/copilot/copilot.go b/internal/providers/copilot/copilot.go index 968150c..551bed1 100644 --- a/internal/providers/copilot/copilot.go +++ b/internal/providers/copilot/copilot.go @@ -1,7 +1,6 @@ package copilot import ( - "bytes" "context" "encoding/json" "fmt" @@ -9,7 +8,6 @@ import ( "os/exec" "path/filepath" "sort" - "strconv" "strings" "time" @@ -370,345 +368,6 @@ func detectCopilotVersion(ctx context.Context, ghBinary, copilotBinary string) ( return "", "", fmt.Errorf("failed to resolve a working copilot version command") } -func (p *Provider) fetchUserInfo(ctx context.Context, binary string, snap *core.UsageSnapshot) { - userJSON, err := runGHAPI(ctx, binary, "/user") - if err != nil { - return - } - var user ghUser - if json.Unmarshal([]byte(userJSON), &user) != nil { - return - } - if user.Login != "" { - snap.Raw["github_login"] = user.Login - } - if user.Name != "" { - snap.Raw["github_name"] = user.Name - } - if user.Plan.Name != "" { - snap.Raw["github_plan"] = user.Plan.Name - } -} - -func (p *Provider) fetchCopilotInternalUser(ctx context.Context, binary string, snap *core.UsageSnapshot) { - body, err := runGHAPI(ctx, binary, "/copilot_internal/user") - if err != nil { - return - } - var cu copilotInternalUser - if json.Unmarshal([]byte(body), &cu) != nil { - return - } - p.applyCopilotInternalUser(&cu, snap) -} - -func (p *Provider) applyCopilotInternalUser(cu *copilotInternalUser, snap *core.UsageSnapshot) { - if cu == nil { - return - } - - snap.Raw["copilot_plan"] = cu.CopilotPlan - snap.Raw["access_type_sku"] = cu.AccessTypeSKU - if cu.AssignedDate != "" { - snap.Raw["assigned_date"] = cu.AssignedDate - } - if cu.CodexAgentEnabled { - snap.Raw["codex_agent_enabled"] = "true" - } - if cu.UsageResetDate != "" { - snap.Raw["quota_reset_date"] = cu.UsageResetDate - } - if cu.UsageResetDateUTC != "" { - snap.Raw["quota_reset_date_utc"] = cu.UsageResetDateUTC - } - - features := []string{} - if cu.ChatEnabled { - features = append(features, "chat") - } - if cu.MCPEnabled { - features = append(features, "mcp") - } - if cu.CopilotIgnoreEnabled { - features = append(features, "copilotignore") - } - if len(features) > 0 { - snap.Raw["features_enabled"] = strings.Join(features, ", ") - } - - if api, ok := cu.Endpoints["api"]; ok { - snap.Raw["api_endpoint"] = api - } - - if len(cu.OrganizationLoginList) > 0 { - snap.Raw["copilot_orgs"] = strings.Join(cu.OrganizationLoginList, ", ") - } - for _, org := range cu.OrganizationList { - key := fmt.Sprintf("org_%s_plan", org.Login) - snap.Raw[key] = org.CopilotPlan - if org.IsEnterprise { - snap.Raw[fmt.Sprintf("org_%s_enterprise", org.Login)] = "true" - } - } - - p.applyUsageSnapshotMetrics(cu.UsageSnapshots, snap) - - for _, candidate := range []string{cu.UsageResetDateUTC, cu.UsageResetDate, cu.LimitedUserResetDate} { - if t := parseCopilotTime(candidate); !t.IsZero() { - snap.Resets["quota_reset"] = t - break - } - } -} - -func (p *Provider) applyUsageSnapshotMetrics(snapshots *copilotUsageSnapshots, snap *core.UsageSnapshot) bool { - if snapshots == nil { - return false - } - - applied := false - if p.applySingleUsageSnapshot("chat_quota", "messages", snapshots.Chat, snap) { - applied = true - } - if p.applySingleUsageSnapshot("completions_quota", "completions", snapshots.Completions, snap) { - applied = true - } - if p.applySingleUsageSnapshot("premium_interactions_quota", "requests", snapshots.PremiumInteractions, snap) { - applied = true - } - return applied -} - -func (p *Provider) applySingleUsageSnapshot(key, unit string, quota *copilotUsageSnapshot, snap *core.UsageSnapshot) bool { - if quota == nil { - return false - } - - if quota.UsageID != "" { - snap.Raw[key+"_id"] = quota.UsageID - } - if quota.OveragePermitted != nil { - snap.Raw[key+"_overage_permitted"] = strconv.FormatBool(*quota.OveragePermitted) - } - if quota.Unlimited != nil && *quota.Unlimited { - snap.Raw[key+"_unlimited"] = "true" - return false - } - if quota.TimestampUTC != "" { - if t := parseCopilotTime(quota.TimestampUTC); !t.IsZero() { - snap.Resets[key+"_snapshot"] = t - } - } - - remaining := firstNonNilFloat(quota.UsageRemaining, quota.Remaining) - limit := quota.Entitlement - pct := clampPercent(firstFloat(quota.PercentRemaining)) - - switch { - case limit != nil && remaining != nil: - used := *limit - *remaining - if used < 0 { - used = 0 - } - snap.Metrics[key] = core.Metric{ - Limit: core.Float64Ptr(*limit), - Remaining: core.Float64Ptr(*remaining), - Used: core.Float64Ptr(used), - Unit: unit, - Window: "month", - } - return true - case pct >= 0: - limitPct := 100.0 - used := 100 - pct - snap.Metrics[key] = core.Metric{ - Limit: &limitPct, - Remaining: core.Float64Ptr(pct), - Used: core.Float64Ptr(used), - Unit: "%", - Window: "month", - } - return true - case remaining != nil: - snap.Metrics[key] = core.Metric{ - Used: core.Float64Ptr(*remaining), - Unit: unit, - Window: "month", - } - return true - default: - return false - } -} - -func (p *Provider) fetchRateLimits(ctx context.Context, binary string, snap *core.UsageSnapshot) { - body, err := runGHAPI(ctx, binary, "/rate_limit") - if err != nil { - return - } - var rl ghRateLimit - if json.Unmarshal([]byte(body), &rl) != nil { - return - } - - for _, resource := range []string{"core", "search", "graphql"} { - res, ok := rl.Resources[resource] - if !ok || res.Limit == 0 { - continue - } - limit := float64(res.Limit) - remaining := float64(res.Remaining) - used := float64(res.Used) - if used == 0 && res.Remaining >= 0 && res.Remaining <= res.Limit { - used = limit - remaining - } - key := "gh_" + resource + "_rpm" - snap.Metrics[key] = core.Metric{ - Limit: &limit, - Remaining: &remaining, - Used: &used, - Unit: "requests", - Window: "1h", - } - if res.Reset > 0 { - snap.Resets[key+"_reset"] = time.Unix(res.Reset, 0) - } - } -} - -func (p *Provider) fetchOrgData(ctx context.Context, binary string, snap *core.UsageSnapshot) { - orgs := snap.Raw["copilot_orgs"] - if orgs == "" { - return - } - - for _, org := range strings.Split(orgs, ", ") { - org = strings.TrimSpace(org) - if org == "" { - continue - } - p.fetchOrgBilling(ctx, binary, org, snap) - p.fetchOrgMetrics(ctx, binary, org, snap) - } -} - -func (p *Provider) fetchOrgBilling(ctx context.Context, binary, org string, snap *core.UsageSnapshot) { - body, err := runGHAPI(ctx, binary, fmt.Sprintf("/orgs/%s/copilot/billing", org)) - if err != nil { - return - } - var billing orgBilling - if json.Unmarshal([]byte(body), &billing) != nil { - return - } - - prefix := fmt.Sprintf("org_%s_", org) - snap.Raw[prefix+"billing_plan"] = billing.PlanType - snap.Raw[prefix+"seat_mgmt"] = billing.SeatManagementSetting - snap.Raw[prefix+"ide_chat"] = billing.IDEChat - snap.Raw[prefix+"platform_chat"] = billing.PlatformChat - snap.Raw[prefix+"cli"] = billing.CLI - snap.Raw[prefix+"public_code"] = billing.PublicCodeSuggestions - - if billing.SeatBreakdown.Total > 0 { - total := float64(billing.SeatBreakdown.Total) - active := float64(billing.SeatBreakdown.ActiveThisCycle) - inactive := total - active - snap.Metrics[prefix+"seats"] = core.Metric{ - Limit: &total, - Used: &active, - Unit: "seats", - Window: "cycle", - } - _ = inactive - } -} - -func (p *Provider) fetchOrgMetrics(ctx context.Context, binary, org string, snap *core.UsageSnapshot) { - body, err := runGHAPI(ctx, binary, fmt.Sprintf("/orgs/%s/copilot/metrics", org)) - if err != nil { - return - } - var days []orgMetricsDay - if json.Unmarshal([]byte(body), &days) != nil { - return - } - if len(days) == 0 { - return - } - - prefix := "org_" + org + "_" - activeUsers := make([]core.TimePoint, 0, len(days)) - engagedUsers := make([]core.TimePoint, 0, len(days)) - totalSuggestions := make([]core.TimePoint, 0, len(days)) - totalAcceptances := make([]core.TimePoint, 0, len(days)) - totalChats := make([]core.TimePoint, 0, len(days)) - aggSuggestions := 0.0 - aggAcceptances := 0.0 - aggChats := 0.0 - - for _, day := range days { - activeUsers = append(activeUsers, core.TimePoint{Date: day.Date, Value: float64(day.TotalActiveUsers)}) - engagedUsers = append(engagedUsers, core.TimePoint{Date: day.Date, Value: float64(day.TotalEngagedUsers)}) - - var daySugg, dayAccept float64 - if day.Completions != nil { - for _, editor := range day.Completions.Editors { - for _, model := range editor.Models { - daySugg += float64(model.TotalSuggestions) - dayAccept += float64(model.TotalAcceptances) - } - } - } - totalSuggestions = append(totalSuggestions, core.TimePoint{Date: day.Date, Value: daySugg}) - totalAcceptances = append(totalAcceptances, core.TimePoint{Date: day.Date, Value: dayAccept}) - aggSuggestions += daySugg - aggAcceptances += dayAccept - - var dayChats float64 - if day.IDEChat != nil { - for _, editor := range day.IDEChat.Editors { - for _, model := range editor.Models { - dayChats += float64(model.TotalChats) - } - } - } - if day.DotcomChat != nil { - for _, editor := range day.DotcomChat.Editors { - for _, model := range editor.Models { - dayChats += float64(model.TotalChats) - } - } - } - totalChats = append(totalChats, core.TimePoint{Date: day.Date, Value: dayChats}) - aggChats += dayChats - } - - snap.DailySeries[prefix+"active_users"] = activeUsers - snap.DailySeries[prefix+"engaged_users"] = engagedUsers - snap.DailySeries[prefix+"suggestions"] = totalSuggestions - snap.DailySeries[prefix+"acceptances"] = totalAcceptances - snap.DailySeries[prefix+"chats"] = totalChats - - if len(activeUsers) > 0 { - lastActive := activeUsers[len(activeUsers)-1].Value - snap.Metrics[prefix+"active_users"] = core.Metric{Used: core.Float64Ptr(lastActive), Unit: "users", Window: "day"} - } - if len(engagedUsers) > 0 { - lastEngaged := engagedUsers[len(engagedUsers)-1].Value - snap.Metrics[prefix+"engaged_users"] = core.Metric{Used: core.Float64Ptr(lastEngaged), Unit: "users", Window: "day"} - } - if aggSuggestions > 0 { - snap.Metrics[prefix+"suggestions"] = core.Metric{Used: core.Float64Ptr(aggSuggestions), Unit: "suggestions", Window: "series"} - } - if aggAcceptances > 0 { - snap.Metrics[prefix+"acceptances"] = core.Metric{Used: core.Float64Ptr(aggAcceptances), Unit: "acceptances", Window: "series"} - } - if aggChats > 0 { - snap.Metrics[prefix+"chats"] = core.Metric{Used: core.Float64Ptr(aggChats), Unit: "chats", Window: "series"} - } -} - func (p *Provider) fetchLocalData(acct core.AccountConfig, snap *core.UsageSnapshot) { if acct.ExtraData != nil { if dir := strings.TrimSpace(acct.ExtraData["config_dir"]); dir != "" { @@ -1738,30 +1397,6 @@ func skuLabel(sku string) string { } } -func runGH(ctx context.Context, binary string, args ...string) (string, error) { - var stdout, stderr bytes.Buffer - cmd := exec.CommandContext(ctx, binary, args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return stdout.String() + stderr.String(), err - } - return stdout.String(), nil -} - -func runGHAPI(ctx context.Context, binary, endpoint string) (string, error) { - // Ask GitHub to revalidate so we don't pin stale Copilot quota/rate data. - return runGH( - ctx, - binary, - "api", - "-H", "Cache-Control: no-cache", - "-H", "Pragma: no-cache", - endpoint, - ) -} - func parseSimpleYAML(content string) map[string]string { result := make(map[string]string) for _, line := range strings.Split(content, "\n") { From c987412e54d743dba74c51fff0716eb86029efd8 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 18:38:17 +0100 Subject: [PATCH 23/32] refactor: split copilot local session flow --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 3 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 5 +- internal/providers/copilot/copilot.go | 1529 ----------------- internal/providers/copilot/local_data.go | 877 ++++++++++ internal/providers/copilot/local_helpers.go | 665 +++++++ 5 files changed, 1548 insertions(+), 1531 deletions(-) create mode 100644 internal/providers/copilot/local_data.go create mode 100644 internal/providers/copilot/local_helpers.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index da5e906..ef6d0d0 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -63,6 +63,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | R43 | Fixed | Codex live/session split | `internal/providers/codex/codex.go`, `internal/providers/codex/live_usage.go`, `internal/providers/codex/session_usage.go` | Codex now keeps provider wiring in the main file while live usage fetching and local session projection live in dedicated helpers. | Continue the same concern-based split for the remaining large providers. | | R44 | Fixed | Claude Code local file/helper split and settings modal layout split | `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/local_files.go`, `internal/providers/claude_code/local_helpers.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go` | Claude Code local readers and generic helper logic are split out of the main provider file, and the settings modal layout/render wrapper no longer lives inline with all modal state/input handling. | Continue with deeper conversation-aggregation extraction in Claude Code and more TUI render-section splits. | | R45 | Fixed | Copilot GitHub API split | `internal/providers/copilot/copilot.go`, `internal/providers/copilot/api_data.go` | Copilot's GitHub API fetch, quota projection, and org metrics flow now live in a dedicated file instead of sharing the same unit as local config/log/session parsing. | Continue splitting the remaining local projection/helpers out of the main provider file. | +| R46 | Fixed | Copilot local config/log/session split | `internal/providers/copilot/copilot.go`, `internal/providers/copilot/local_data.go`, `internal/providers/copilot/local_helpers.go` | Copilot local config loading, log/session readers, and local parsing/projection helpers now live outside the main provider file. The coordinator file is reduced to provider setup, fetch orchestration, and status/metric selection helpers. | Keep future Copilot local-data work inside the dedicated helper units instead of re-growing the coordinator. | ## Action Table @@ -71,7 +72,7 @@ This table captures every issue found in this pass. It is broad and high-signal, | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, and settings modal layout is separated, but TUI state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Composition bars, provider tile fallback/rate-limit selection, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/copilot/copilot.go`, `internal/providers/copilot/api_data.go`, `internal/providers/claude_code/claude_code.go` | Cursor, OpenRouter, Codex, and Copilot's API path are materially decomposed, and Claude Code has local-reader/helper splits, but several providers still combine large local parsing/projection flows in very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, telemetry helpers, and Claude Code/Copilot local aggregation paths. | Smaller diffs, less drift risk, and easier provider-specific testing. | +| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/claude_code/claude_code.go` | Cursor, OpenRouter, Codex, and Copilot are now materially decomposed, and Claude Code has local-reader/helper splits, but several providers still combine large parsing/projection flows in very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, telemetry helpers, and the remaining Claude Code conversation aggregation path. | Smaller diffs, less drift risk, and easier provider-specific testing. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index dfb63b6..d2f6f0c 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -22,6 +22,7 @@ These were major concerns in earlier reviews and are now materially addressed: - Codex live/session flow concentrated in one provider file. - Claude Code local file readers and model-summary helpers concentrated in one provider file. - Copilot GitHub API fetch/quota/org-metrics flow concentrated in the same file as local log/session parsing. +- Copilot local config/log/session parsing concentrated in the same file as provider orchestration. - OpenRouter provider-resolution, analytics, generation, projection, and account-path monolith sprawl. - TUI side-effect leakage into config persistence / integration install / provider validation. - Settings modal layout/render wrapper living inline with settings state/input handling. @@ -79,7 +80,7 @@ What to address: ### 4. [P2] Several providers are still large mixed-responsibility units -Cursor, OpenRouter, and Codex are now in much better shape, Claude Code has started the same split, and Copilot's GitHub API path is now separated, but several providers still remain monoliths that mix transport, parsing, normalization, and projection in one place. +Cursor, OpenRouter, Codex, and Copilot are now in much better shape, Claude Code has started the same split, but several providers still remain monoliths that mix transport, parsing, normalization, and projection in one place. Refs: - [ollama.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/ollama.go) @@ -87,6 +88,8 @@ Refs: - [gemini_cli.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/gemini_cli.go) - [copilot.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/copilot.go) - [api_data.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/api_data.go) +- [local_data.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/local_data.go) +- [local_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/local_helpers.go) - [claude_code.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/claude_code.go) - [local_files.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_files.go) - [local_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_helpers.go) diff --git a/internal/providers/copilot/copilot.go b/internal/providers/copilot/copilot.go index 551bed1..ac819f7 100644 --- a/internal/providers/copilot/copilot.go +++ b/internal/providers/copilot/copilot.go @@ -7,14 +7,11 @@ import ( "os" "os/exec" "path/filepath" - "sort" "strings" "time" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" - "github.com/janekbaraniewski/openusage/internal/providers/shared" - "github.com/samber/lo" ) const ( @@ -391,945 +388,6 @@ func (p *Provider) fetchLocalData(acct core.AccountConfig, snap *core.UsageSnaps p.readSessions(copilotDir, snap, logData) } -func (p *Provider) readConfig(copilotDir string, snap *core.UsageSnapshot) { - data, err := os.ReadFile(filepath.Join(copilotDir, "config.json")) - if err != nil { - return - } - var cfg copilotConfig - if json.Unmarshal(data, &cfg) != nil { - return - } - if cfg.Model != "" { - snap.Raw["preferred_model"] = cfg.Model - } - if cfg.ReasoningEffort != "" { - snap.Raw["reasoning_effort"] = cfg.ReasoningEffort - } - if cfg.Experimental { - snap.Raw["experimental"] = "enabled" - } -} - -type logSummary struct { - DefaultModel string - SessionTokens map[string]logTokenEntry // sessionID → last CompactionProcessor entry - SessionBurn map[string]float64 // sessionID → cumulative positive token deltas -} - -func (p *Provider) readLogs(copilotDir string, snap *core.UsageSnapshot) logSummary { - ls := logSummary{ - SessionTokens: make(map[string]logTokenEntry), - SessionBurn: make(map[string]float64), - } - sessionEntries := make(map[string][]logTokenEntry) - logDir := filepath.Join(copilotDir, "logs") - entries, err := os.ReadDir(logDir) - if err != nil { - return ls - } - - var allTokenEntries []logTokenEntry - - for _, entry := range entries { - if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".log") { - continue - } - data, err := os.ReadFile(filepath.Join(logDir, entry.Name())) - if err != nil { - continue - } - - var currentSessionID string - for _, line := range strings.Split(string(data), "\n") { - line = strings.TrimSpace(line) - - if strings.Contains(line, "Workspace initialized:") { - if idx := strings.Index(line, "Workspace initialized:"); idx >= 0 { - rest := strings.TrimSpace(line[idx+len("Workspace initialized:"):]) - if spIdx := strings.Index(rest, " "); spIdx > 0 { - currentSessionID = rest[:spIdx] - } else if rest != "" { - currentSessionID = rest - } - } - } - - if strings.Contains(line, "Using default model:") { - if idx := strings.Index(line, "Using default model:"); idx >= 0 { - m := strings.TrimSpace(line[idx+len("Using default model:"):]) - if m != "" { - ls.DefaultModel = m - } - } - } - - if strings.Contains(line, "CompactionProcessor: Utilization") { - te := parseCompactionLine(line) - if te.Total > 0 { - allTokenEntries = append(allTokenEntries, te) - if currentSessionID != "" { - sessionEntries[currentSessionID] = append(sessionEntries[currentSessionID], te) - } - } - } - } - } - - if ls.DefaultModel != "" { - snap.Raw["default_model"] = ls.DefaultModel - } - - for sessionID, entries := range sessionEntries { - sortCompactionEntries(entries) - last := entries[len(entries)-1] - ls.SessionTokens[sessionID] = last - - burn := 0.0 - for idx, te := range entries { - if idx == 0 { - if te.Used > 0 { - burn += float64(te.Used) - } - continue - } - delta := te.Used - entries[idx-1].Used - if delta > 0 { - burn += float64(delta) - } - } - if burn > 0 { - ls.SessionBurn[sessionID] = burn - } - } - - if last, ok := newestCompactionEntry(allTokenEntries); ok { - snap.Raw["context_window_tokens"] = fmt.Sprintf("%d/%d", last.Used, last.Total) - pct := float64(last.Used) / float64(last.Total) * 100 - snap.Raw["context_window_pct"] = fmt.Sprintf("%.1f%%", pct) - used := float64(last.Used) - limit := float64(last.Total) - snap.Metrics["context_window"] = core.Metric{ - Limit: &limit, - Used: &used, - Remaining: core.Float64Ptr(limit - used), - Unit: "tokens", - Window: "session", - } - } - - return ls -} - -type assistantMsgData struct { - Content string `json:"content"` - ReasoningTxt string `json:"reasoningText"` - ToolRequests json.RawMessage `json:"toolRequests"` -} - -type quotaSnapshotEntry struct { - EntitlementRequests int `json:"entitlementRequests"` - UsedRequests int `json:"usedRequests"` - RemainingPercentage float64 `json:"remainingPercentage"` - ResetDate string `json:"resetDate"` -} - -type assistantUsageData struct { - Model string `json:"model"` - InputTokens float64 `json:"inputTokens"` - OutputTokens float64 `json:"outputTokens"` - CacheReadTokens float64 `json:"cacheReadTokens"` - CacheWriteTokens float64 `json:"cacheWriteTokens"` - Cost float64 `json:"cost"` - Duration int64 `json:"duration"` - QuotaSnapshots map[string]quotaSnapshotEntry `json:"quotaSnapshots"` -} - -type sessionShutdownData struct { - ShutdownType string `json:"shutdownType"` - TotalPremiumRequests int `json:"totalPremiumRequests"` - TotalAPIDurationMs int64 `json:"totalApiDurationMs"` - SessionStartTime string `json:"sessionStartTime"` - CodeChanges shutdownCodeChanges `json:"codeChanges"` - ModelMetrics map[string]shutdownModelMetric `json:"modelMetrics"` -} - -type shutdownCodeChanges struct { - LinesAdded int `json:"linesAdded"` - LinesRemoved int `json:"linesRemoved"` - FilesModified int `json:"filesModified"` -} - -type shutdownModelMetric struct { - Requests struct { - Count int `json:"count"` - Cost float64 `json:"cost"` - } `json:"requests"` - Usage struct { - InputTokens float64 `json:"inputTokens"` - OutputTokens float64 `json:"outputTokens"` - CacheReadTokens float64 `json:"cacheReadTokens"` - CacheWriteTokens float64 `json:"cacheWriteTokens"` - } `json:"usage"` -} - -func (p *Provider) readSessions(copilotDir string, snap *core.UsageSnapshot, logs logSummary) { - sessionDir := filepath.Join(copilotDir, "session-state") - entries, err := os.ReadDir(sessionDir) - if err != nil { - return - } - - snap.Raw["total_sessions"] = fmt.Sprintf("%d", len(entries)) - - type sessionInfo struct { - id string - createdAt time.Time - updatedAt time.Time - cwd string - repo string - branch string - client string - summary string - messages int - turns int - model string - responseChars int - reasoningChars int - toolCalls int - tokenUsed int - tokenTotal int - tokenBurn float64 - usageCost float64 - premiumRequests int - shutdownPremiumRequests int - linesAdded int - linesRemoved int - filesModified int - } - - var sessions []sessionInfo - dailyMessages := make(map[string]float64) - dailySessions := make(map[string]float64) - dailyToolCalls := make(map[string]float64) - dailyTokens := make(map[string]float64) - modelMessages := make(map[string]int) - modelTurns := make(map[string]int) - modelSessions := make(map[string]int) - modelResponseChars := make(map[string]int) - modelReasoningChars := make(map[string]int) - modelToolCalls := make(map[string]int) - dailyModelMessages := make(map[string]map[string]float64) - dailyModelTokens := make(map[string]map[string]float64) - modelInputTokens := make(map[string]float64) - usageInputTokens := make(map[string]float64) - usageOutputTokens := make(map[string]float64) - usageCacheReadTokens := make(map[string]float64) - usageCacheWriteTokens := make(map[string]float64) - usageCost := make(map[string]float64) - usageRequests := make(map[string]int) - usageDuration := make(map[string]int64) - dailyCost := make(map[string]float64) - var latestQuotaSnapshots map[string]quotaSnapshotEntry - var shutdownPremiumRequests int - var shutdownLinesAdded, shutdownLinesRemoved, shutdownFilesModified int - shutdownModelCost := make(map[string]float64) - shutdownModelRequests := make(map[string]int) - shutdownModelInputTokens := make(map[string]float64) - shutdownModelOutputTokens := make(map[string]float64) - toolUsageCounts := make(map[string]int) - languageUsageCounts := make(map[string]int) - changedFiles := make(map[string]bool) - commitCommands := make(map[string]bool) - clientLabels := make(map[string]string) - clientTokens := make(map[string]float64) - clientSessions := make(map[string]int) - clientMessages := make(map[string]int) - dailyClientTokens := make(map[string]map[string]float64) - var inferredLinesAdded, inferredLinesRemoved int - var inferredCommitCount int - - for _, entry := range entries { - if !entry.IsDir() { - continue - } - si := sessionInfo{id: entry.Name()} - sessPath := filepath.Join(sessionDir, entry.Name()) - - if wsData, err := os.ReadFile(filepath.Join(sessPath, "workspace.yaml")); err == nil { - ws := parseSimpleYAML(string(wsData)) - si.cwd = ws["cwd"] - si.repo = ws["repository"] - si.branch = ws["branch"] - si.summary = ws["summary"] - si.createdAt = flexParseTime(ws["created_at"]) - si.updatedAt = flexParseTime(ws["updated_at"]) - } - - if te, ok := logs.SessionTokens[si.id]; ok { - si.tokenUsed = te.Used - si.tokenTotal = te.Total - if !te.Timestamp.IsZero() { - if si.createdAt.IsZero() { - si.createdAt = te.Timestamp - } - if si.updatedAt.IsZero() || te.Timestamp.After(si.updatedAt) { - si.updatedAt = te.Timestamp - } - } - } - if burn, ok := logs.SessionBurn[si.id]; ok { - si.tokenBurn = burn - } - - if evtData, err := os.ReadFile(filepath.Join(sessPath, "events.jsonl")); err == nil { - currentModel := logs.DefaultModel - var firstEventAt, lastEventAt time.Time - lines := strings.Split(string(evtData), "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" { - continue - } - var evt sessionEvent - if json.Unmarshal([]byte(line), &evt) != nil { - continue - } - evtTime := flexParseTime(evt.Timestamp) - if !evtTime.IsZero() { - if firstEventAt.IsZero() || evtTime.Before(firstEventAt) { - firstEventAt = evtTime - } - if lastEventAt.IsZero() || evtTime.After(lastEventAt) { - lastEventAt = evtTime - } - } - - switch evt.Type { - case "session.start": - var start sessionStartData - if json.Unmarshal(evt.Data, &start) == nil { - if si.cwd == "" { - si.cwd = start.Context.CWD - } - if si.repo == "" { - si.repo = start.Context.Repository - } - if si.branch == "" { - si.branch = start.Context.Branch - } - if si.createdAt.IsZero() { - si.createdAt = flexParseTime(start.StartTime) - } - if currentModel == "" && start.SelectedModel != "" { - currentModel = start.SelectedModel - } - } - - case "session.model_change": - var mc modelChangeData - if json.Unmarshal(evt.Data, &mc) == nil && mc.NewModel != "" { - currentModel = mc.NewModel - } - - case "session.info": - var info sessionInfoData - if json.Unmarshal(evt.Data, &info) == nil && info.InfoType == "model" { - if m := extractModelFromInfoMsg(info.Message); m != "" { - currentModel = m - } - } - - case "user.message": - si.messages++ - day := parseDayFromTimestamp(evt.Timestamp) - if day != "" { - dailyMessages[day]++ - } - if currentModel != "" { - modelMessages[currentModel]++ - if day != "" { - if dailyModelMessages[currentModel] == nil { - dailyModelMessages[currentModel] = make(map[string]float64) - } - dailyModelMessages[currentModel][day]++ - } - } - - case "assistant.turn_start": - si.turns++ - if currentModel != "" { - modelTurns[currentModel]++ - } - - case "assistant.message": - var msg assistantMsgData - if json.Unmarshal(evt.Data, &msg) == nil { - si.responseChars += len(msg.Content) - si.reasoningChars += len(msg.ReasoningTxt) - if currentModel != "" { - modelResponseChars[currentModel] += len(msg.Content) - modelReasoningChars[currentModel] += len(msg.ReasoningTxt) - } - var tools []json.RawMessage - if json.Unmarshal(msg.ToolRequests, &tools) == nil && len(tools) > 0 { - si.toolCalls += len(tools) - if currentModel != "" { - modelToolCalls[currentModel] += len(tools) - } - for _, toolReq := range tools { - toolName := extractCopilotToolName(toolReq) - if toolName == "" { - toolName = "unknown" - } - toolUsageCounts[toolName]++ - toolLower := strings.ToLower(strings.TrimSpace(toolName)) - paths := extractCopilotToolPaths(toolReq) - for _, path := range paths { - if lang := inferCopilotLanguageFromPath(path); lang != "" { - languageUsageCounts[lang]++ - } - if isCopilotMutatingTool(toolLower) { - changedFiles[path] = true - } - } - if isCopilotMutatingTool(toolLower) { - added, removed := estimateCopilotToolLineDelta(toolReq) - inferredLinesAdded += added - inferredLinesRemoved += removed - } - cmd := extractCopilotToolCommand(toolReq) - if cmd != "" { - if strings.Contains(strings.ToLower(cmd), "git commit") && !commitCommands[cmd] { - commitCommands[cmd] = true - inferredCommitCount++ - } - } else if strings.Contains(toolLower, "commit") { - inferredCommitCount++ - } - } - day := parseDayFromTimestamp(evt.Timestamp) - if day != "" { - dailyToolCalls[day] += float64(len(tools)) - } - } - } - - case "assistant.usage": - var usage assistantUsageData - if json.Unmarshal(evt.Data, &usage) == nil && usage.Model != "" { - usageInputTokens[usage.Model] += usage.InputTokens - usageOutputTokens[usage.Model] += usage.OutputTokens - usageCacheReadTokens[usage.Model] += usage.CacheReadTokens - usageCacheWriteTokens[usage.Model] += usage.CacheWriteTokens - usageCost[usage.Model] += usage.Cost - usageRequests[usage.Model]++ - usageDuration[usage.Model] += usage.Duration - - si.usageCost += usage.Cost - si.premiumRequests++ - - day := parseDayFromTimestamp(evt.Timestamp) - if day != "" { - dailyCost[day] += usage.Cost - } - - if len(usage.QuotaSnapshots) > 0 { - latestQuotaSnapshots = usage.QuotaSnapshots - } - } - - case "session.shutdown": - var shutdown sessionShutdownData - if json.Unmarshal(evt.Data, &shutdown) == nil { - shutdownPremiumRequests += shutdown.TotalPremiumRequests - si.shutdownPremiumRequests += shutdown.TotalPremiumRequests - - si.linesAdded += shutdown.CodeChanges.LinesAdded - si.linesRemoved += shutdown.CodeChanges.LinesRemoved - si.filesModified += shutdown.CodeChanges.FilesModified - shutdownLinesAdded += shutdown.CodeChanges.LinesAdded - shutdownLinesRemoved += shutdown.CodeChanges.LinesRemoved - shutdownFilesModified += shutdown.CodeChanges.FilesModified - - for model, metrics := range shutdown.ModelMetrics { - shutdownModelCost[model] += metrics.Requests.Cost - shutdownModelRequests[model] += metrics.Requests.Count - shutdownModelInputTokens[model] += metrics.Usage.InputTokens - shutdownModelOutputTokens[model] += metrics.Usage.OutputTokens - } - } - } - } - if !firstEventAt.IsZero() && si.createdAt.IsZero() { - si.createdAt = firstEventAt - } - if !lastEventAt.IsZero() && (si.updatedAt.IsZero() || lastEventAt.After(si.updatedAt)) { - si.updatedAt = lastEventAt - } - si.model = currentModel - } - - day := dayForSession(si.createdAt, si.updatedAt) - if si.model != "" { - modelSessions[si.model]++ - } - if day != "" { - dailySessions[day]++ - } - - clientLabel := normalizeCopilotClient(si.repo, si.cwd) - clientKey := sanitizeMetricName(clientLabel) - if clientKey == "" { - clientKey = "cli" - } - si.client = clientLabel - if _, ok := clientLabels[clientKey]; !ok { - clientLabels[clientKey] = clientLabel - } - clientSessions[clientKey]++ - clientMessages[clientKey] += si.messages - - sessionTokens := float64(si.tokenUsed) - if si.tokenBurn > 0 { - sessionTokens = si.tokenBurn - } - if sessionTokens > 0 { - clientTokens[clientKey] += sessionTokens - if day != "" { - dailyTokens[day] += sessionTokens - if dailyClientTokens[clientKey] == nil { - dailyClientTokens[clientKey] = make(map[string]float64) - } - dailyClientTokens[clientKey][day] += sessionTokens - } - if si.model != "" { - modelInputTokens[si.model] += sessionTokens - if day != "" { - if dailyModelTokens[si.model] == nil { - dailyModelTokens[si.model] = make(map[string]float64) - } - dailyModelTokens[si.model][day] += sessionTokens - } - } - } - sessions = append(sessions, si) - } - - storeSeries(snap, "messages", dailyMessages) - storeSeries(snap, "sessions", dailySessions) - storeSeries(snap, "tool_calls", dailyToolCalls) - storeSeries(snap, "tokens_total", dailyTokens) - storeSeries(snap, "cli_messages", dailyMessages) - storeSeries(snap, "cli_sessions", dailySessions) - storeSeries(snap, "cli_tool_calls", dailyToolCalls) - if len(dailyCost) > 0 { - storeSeries(snap, "cost", dailyCost) - } - for model, dayCounts := range dailyModelMessages { - safe := sanitizeMetricName(model) - storeSeries(snap, "cli_messages_"+safe, dayCounts) - } - for model, dayCounts := range dailyModelTokens { - safe := sanitizeMetricName(model) - storeSeries(snap, "tokens_"+safe, dayCounts) - storeSeries(snap, "cli_tokens_"+safe, dayCounts) - } - - setRawStr(snap, "model_usage", formatModelMap(modelMessages, "msgs")) - setRawStr(snap, "model_turns", formatModelMap(modelTurns, "turns")) - setRawStr(snap, "model_sessions", formatModelMapPlain(modelSessions)) - setRawStr(snap, "model_response_chars", formatModelMap(modelResponseChars, "chars")) - setRawStr(snap, "model_reasoning_chars", formatModelMap(modelReasoningChars, "chars")) - setRawStr(snap, "model_tool_calls", formatModelMap(modelToolCalls, "calls")) - - sort.Slice(sessions, func(i, j int) bool { - ti := sessions[i].updatedAt - if ti.IsZero() { - ti = sessions[i].createdAt - } - tj := sessions[j].updatedAt - if tj.IsZero() { - tj = sessions[j].createdAt - } - return ti.After(tj) - }) - - var totalMessages, totalTurns, totalResponse, totalReasoning, totalTools int - totalTokens := 0.0 - for _, s := range sessions { - totalMessages += s.messages - totalTurns += s.turns - totalResponse += s.responseChars - totalReasoning += s.reasoningChars - totalTools += s.toolCalls - tokens := float64(s.tokenUsed) - if s.tokenBurn > 0 { - tokens = s.tokenBurn - } - totalTokens += tokens - } - setRawInt(snap, "total_cli_messages", totalMessages) - setRawInt(snap, "total_cli_turns", totalTurns) - setRawInt(snap, "total_response_chars", totalResponse) - setRawInt(snap, "total_reasoning_chars", totalReasoning) - setRawInt(snap, "total_tool_calls", totalTools) - - setUsedMetric(snap, "total_messages", float64(totalMessages), "messages", copilotAllTimeWindow) - setUsedMetric(snap, "total_sessions", float64(len(sessions)), "sessions", copilotAllTimeWindow) - setUsedMetric(snap, "total_turns", float64(totalTurns), "turns", copilotAllTimeWindow) - setUsedMetric(snap, "total_tool_calls", float64(totalTools), "calls", copilotAllTimeWindow) - setUsedMetric(snap, "tool_calls_total", float64(totalTools), "calls", copilotAllTimeWindow) - if totalTools > 0 { - setUsedMetric(snap, "tool_completed", float64(totalTools), "calls", copilotAllTimeWindow) - setUsedMetric(snap, "tool_success_rate", 100.0, "%", copilotAllTimeWindow) - } - setUsedMetric(snap, "total_response_chars", float64(totalResponse), "chars", copilotAllTimeWindow) - setUsedMetric(snap, "total_reasoning_chars", float64(totalReasoning), "chars", copilotAllTimeWindow) - setUsedMetric(snap, "total_conversations", float64(len(sessions)), "sessions", copilotAllTimeWindow) - setUsedMetric(snap, "cli_messages", float64(totalMessages), "messages", copilotAllTimeWindow) - setUsedMetric(snap, "cli_turns", float64(totalTurns), "turns", copilotAllTimeWindow) - setUsedMetric(snap, "cli_sessions", float64(len(sessions)), "sessions", copilotAllTimeWindow) - setUsedMetric(snap, "cli_tool_calls", float64(totalTools), "calls", copilotAllTimeWindow) - setUsedMetric(snap, "cli_response_chars", float64(totalResponse), "chars", copilotAllTimeWindow) - setUsedMetric(snap, "cli_reasoning_chars", float64(totalReasoning), "chars", copilotAllTimeWindow) - setUsedMetric(snap, "cli_input_tokens", totalTokens, "tokens", copilotAllTimeWindow) - setUsedMetric(snap, "cli_total_tokens", totalTokens, "tokens", copilotAllTimeWindow) - - // Emit new metrics from assistant.usage and session.shutdown events. - var totalUsageOutputTokens, totalUsageCacheRead, totalUsageCacheWrite, totalUsageCost float64 - var totalUsageRequests int - for _, v := range usageOutputTokens { - totalUsageOutputTokens += v - } - for _, v := range usageCacheReadTokens { - totalUsageCacheRead += v - } - for _, v := range usageCacheWriteTokens { - totalUsageCacheWrite += v - } - for _, v := range usageCost { - totalUsageCost += v - } - for _, v := range usageRequests { - totalUsageRequests += v - } - if totalUsageOutputTokens > 0 { - setUsedMetric(snap, "cli_output_tokens", totalUsageOutputTokens, "tokens", copilotAllTimeWindow) - } - if totalUsageCacheRead > 0 { - setUsedMetric(snap, "cli_cache_read_tokens", totalUsageCacheRead, "tokens", copilotAllTimeWindow) - } - if totalUsageCacheWrite > 0 { - setUsedMetric(snap, "cli_cache_write_tokens", totalUsageCacheWrite, "tokens", copilotAllTimeWindow) - } - if totalUsageCost > 0 { - setUsedMetric(snap, "cli_cost", totalUsageCost, "USD", copilotAllTimeWindow) - } - if totalUsageRequests > 0 { - setUsedMetric(snap, "cli_premium_requests", float64(totalUsageRequests), "requests", copilotAllTimeWindow) - } else if shutdownPremiumRequests > 0 { - setUsedMetric(snap, "cli_premium_requests", float64(shutdownPremiumRequests), "requests", copilotAllTimeWindow) - } - if shutdownLinesAdded > 0 || shutdownLinesRemoved > 0 { - setUsedMetric(snap, "cli_lines_added", float64(shutdownLinesAdded), "lines", copilotAllTimeWindow) - setUsedMetric(snap, "cli_lines_removed", float64(shutdownLinesRemoved), "lines", copilotAllTimeWindow) - } - if shutdownFilesModified > 0 { - setUsedMetric(snap, "cli_files_modified", float64(shutdownFilesModified), "files", copilotAllTimeWindow) - } - if totalUsageRequests > 0 { - var totalDuration int64 - for _, d := range usageDuration { - totalDuration += d - } - avgMs := float64(totalDuration) / float64(totalUsageRequests) - setUsedMetric(snap, "cli_avg_latency_ms", avgMs, "ms", copilotAllTimeWindow) - } - - // Apply latestQuotaSnapshots as fallback for premium_interactions_quota. - if qs, ok := latestQuotaSnapshots["premium_interactions"]; ok { - if _, exists := snap.Metrics["premium_interactions_quota"]; !exists { - entitlement := float64(qs.EntitlementRequests) - used := float64(qs.UsedRequests) - remaining := entitlement - used - if remaining < 0 { - remaining = 0 - } - snap.Metrics["premium_interactions_quota"] = core.Metric{ - Limit: &entitlement, - Used: core.Float64Ptr(used), - Remaining: core.Float64Ptr(remaining), - Unit: "requests", - Window: "billing-cycle", - } - } - } - - if _, v := latestSeriesValue(dailyCost); v > 0 { - setUsedMetric(snap, "cost_today", v, "USD", "today") - } - setUsedMetric(snap, "7d_cost", sumLastNDays(dailyCost, 7), "USD", "7d") - - if _, v := latestSeriesValue(dailyMessages); v > 0 { - setUsedMetric(snap, "messages_today", v, "messages", "today") - } - if _, v := latestSeriesValue(dailySessions); v > 0 { - setUsedMetric(snap, "sessions_today", v, "sessions", "today") - } - if _, v := latestSeriesValue(dailyToolCalls); v > 0 { - setUsedMetric(snap, "tool_calls_today", v, "calls", "today") - } - if _, v := latestSeriesValue(dailyTokens); v > 0 { - setUsedMetric(snap, "tokens_today", v, "tokens", "today") - } - setUsedMetric(snap, "7d_messages", sumLastNDays(dailyMessages, 7), "messages", "7d") - setUsedMetric(snap, "7d_sessions", sumLastNDays(dailySessions, 7), "sessions", "7d") - setUsedMetric(snap, "7d_tool_calls", sumLastNDays(dailyToolCalls, 7), "calls", "7d") - setUsedMetric(snap, "7d_tokens", sumLastNDays(dailyTokens, 7), "tokens", "7d") - setUsedMetric(snap, "total_prompts", float64(totalMessages), "prompts", copilotAllTimeWindow) - - // Merge usage event models into the topModels set so they appear even if - // they have no log-compaction data. - allModelTokens := make(map[string]float64, len(modelInputTokens)) - for k, v := range modelInputTokens { - allModelTokens[k] = v - } - for k, v := range usageInputTokens { - if allModelTokens[k] < v { - allModelTokens[k] = v - } - } - allModelMessages := make(map[string]int, len(modelMessages)) - for k, v := range modelMessages { - allModelMessages[k] = v - } - for k, v := range usageRequests { - if allModelMessages[k] < v { - allModelMessages[k] = v - } - } - topModels := topModelNames(allModelTokens, allModelMessages, maxCopilotModels) - for _, model := range topModels { - prefix := "model_" + sanitizeMetricName(model) - rec := core.ModelUsageRecord{ - RawModelID: model, - RawSource: "json", - Window: copilotAllTimeWindow, - } - - // Prefer usage event data (accurate) over log-compaction data (approximate). - inputTok := modelInputTokens[model] - if v := usageInputTokens[model]; v > 0 { - inputTok = v - } - outputTok := usageOutputTokens[model] - cacheTok := usageCacheReadTokens[model] + usageCacheWriteTokens[model] - - setUsedMetric(snap, prefix+"_input_tokens", inputTok, "tokens", copilotAllTimeWindow) - if inputTok > 0 { - rec.InputTokens = core.Float64Ptr(inputTok) - } - if outputTok > 0 { - setUsedMetric(snap, prefix+"_output_tokens", outputTok, "tokens", copilotAllTimeWindow) - rec.OutputTokens = core.Float64Ptr(outputTok) - } - if cacheTok > 0 { - rec.CachedTokens = core.Float64Ptr(cacheTok) - } - totalTok := inputTok + outputTok - if totalTok > 0 { - rec.TotalTokens = core.Float64Ptr(totalTok) - } - - // Cost from usage events; fall back to shutdown model metrics. - modelCost := usageCost[model] - if modelCost == 0 { - modelCost = shutdownModelCost[model] - } - if modelCost > 0 { - rec.CostUSD = core.Float64Ptr(modelCost) - setUsedMetric(snap, prefix+"_cost", modelCost, "USD", copilotAllTimeWindow) - } - - // Requests from usage events. - if reqs := usageRequests[model]; reqs > 0 { - rec.Requests = core.Float64Ptr(float64(reqs)) - } - - setUsedMetric(snap, prefix+"_messages", float64(modelMessages[model]), "messages", copilotAllTimeWindow) - setUsedMetric(snap, prefix+"_turns", float64(modelTurns[model]), "turns", copilotAllTimeWindow) - setUsedMetric(snap, prefix+"_sessions", float64(modelSessions[model]), "sessions", copilotAllTimeWindow) - setUsedMetric(snap, prefix+"_tool_calls", float64(modelToolCalls[model]), "calls", copilotAllTimeWindow) - setUsedMetric(snap, prefix+"_response_chars", float64(modelResponseChars[model]), "chars", copilotAllTimeWindow) - setUsedMetric(snap, prefix+"_reasoning_chars", float64(modelReasoningChars[model]), "chars", copilotAllTimeWindow) - snap.AppendModelUsage(rec) - } - - topClients := topCopilotClientNames(clientTokens, clientSessions, clientMessages, maxCopilotClients) - for _, client := range topClients { - clientPrefix := "client_" + client - setUsedMetric(snap, clientPrefix+"_total_tokens", clientTokens[client], "tokens", copilotAllTimeWindow) - setUsedMetric(snap, clientPrefix+"_input_tokens", clientTokens[client], "tokens", copilotAllTimeWindow) - setUsedMetric(snap, clientPrefix+"_sessions", float64(clientSessions[client]), "sessions", copilotAllTimeWindow) - if byDay := dailyClientTokens[client]; len(byDay) > 0 { - storeSeries(snap, "tokens_client_"+client, byDay) - } - } - setRawStr(snap, "client_usage", formatCopilotClientUsage(topClients, clientLabels, clientTokens, clientSessions)) - setRawStr(snap, "tool_usage", formatModelMap(toolUsageCounts, "calls")) - setRawStr(snap, "language_usage", formatModelMap(languageUsageCounts, "req")) - for toolName, count := range toolUsageCounts { - if count <= 0 { - continue - } - setUsedMetric(snap, "tool_"+sanitizeMetricName(toolName), float64(count), "calls", copilotAllTimeWindow) - } - for lang, count := range languageUsageCounts { - if count <= 0 { - continue - } - setUsedMetric(snap, "lang_"+sanitizeMetricName(lang), float64(count), "requests", copilotAllTimeWindow) - } - - linesAdded := shutdownLinesAdded - if inferredLinesAdded > linesAdded { - linesAdded = inferredLinesAdded - } - linesRemoved := shutdownLinesRemoved - if inferredLinesRemoved > linesRemoved { - linesRemoved = inferredLinesRemoved - } - filesChanged := shutdownFilesModified - if len(changedFiles) > filesChanged { - filesChanged = len(changedFiles) - } - if linesAdded > 0 { - setUsedMetric(snap, "composer_lines_added", float64(linesAdded), "lines", copilotAllTimeWindow) - } - if linesRemoved > 0 { - setUsedMetric(snap, "composer_lines_removed", float64(linesRemoved), "lines", copilotAllTimeWindow) - } - if filesChanged > 0 { - setUsedMetric(snap, "composer_files_changed", float64(filesChanged), "files", copilotAllTimeWindow) - } - if inferredCommitCount > 0 { - setUsedMetric(snap, "scored_commits", float64(inferredCommitCount), "commits", copilotAllTimeWindow) - } - if linesAdded > 0 || linesRemoved > 0 { - hundred := 100.0 - zero := 0.0 - snap.Metrics["ai_code_percentage"] = core.Metric{ - Used: &hundred, - Remaining: &zero, - Limit: &hundred, - Unit: "%", - Window: copilotAllTimeWindow, - } - } - - if len(sessions) > 0 { - r := sessions[0] - if r.client != "" { - snap.Raw["last_session_client"] = r.client - } - snap.Raw["last_session_repo"] = r.repo - snap.Raw["last_session_branch"] = r.branch - if r.summary != "" { - snap.Raw["last_session_summary"] = r.summary - } - if !r.updatedAt.IsZero() { - snap.Raw["last_session_time"] = r.updatedAt.Format(time.RFC3339) - } - if r.model != "" { - snap.Raw["last_session_model"] = r.model - } - sessionTokens := float64(r.tokenUsed) - if r.tokenBurn > 0 { - sessionTokens = r.tokenBurn - } - if sessionTokens > 0 { - snap.Raw["last_session_tokens"] = fmt.Sprintf("%.0f/%d", sessionTokens, r.tokenTotal) - setUsedMetric(snap, "session_input_tokens", sessionTokens, "tokens", "session") - setUsedMetric(snap, "session_total_tokens", sessionTokens, "tokens", "session") - if r.tokenTotal > 0 { - limit := float64(r.tokenTotal) - snap.Metrics["context_window"] = core.Metric{ - Limit: &limit, - Used: core.Float64Ptr(sessionTokens), - Remaining: core.Float64Ptr(max(limit-sessionTokens, 0)), - Unit: "tokens", - Window: "session", - } - } - } - } -} - -func parseCompactionLine(line string) logTokenEntry { - var entry logTokenEntry - - if len(line) >= 24 { - if t, err := time.Parse("2006-01-02T15:04:05.000Z", line[:24]); err == nil { - entry.Timestamp = t - } - } - - parenStart := strings.Index(line, "(") - parenEnd := strings.Index(line, " tokens)") - if parenStart >= 0 && parenEnd > parenStart { - inner := line[parenStart+1 : parenEnd] - parts := strings.Split(inner, "/") - if len(parts) == 2 { - fmt.Sscanf(parts[0], "%d", &entry.Used) - fmt.Sscanf(parts[1], "%d", &entry.Total) - } - } - - return entry -} - -func sortCompactionEntries(entries []logTokenEntry) { - sort.SliceStable(entries, func(i, j int) bool { - ti := entries[i].Timestamp - tj := entries[j].Timestamp - switch { - case ti.IsZero() && tj.IsZero(): - return entries[i].Used < entries[j].Used - case ti.IsZero(): - return false - case tj.IsZero(): - return true - default: - return ti.Before(tj) - } - }) -} - -func newestCompactionEntry(entries []logTokenEntry) (logTokenEntry, bool) { - if len(entries) == 0 { - return logTokenEntry{}, false - } - best := entries[0] - for _, te := range entries[1:] { - if best.Timestamp.IsZero() && !te.Timestamp.IsZero() { - best = te - continue - } - if !best.Timestamp.IsZero() && te.Timestamp.IsZero() { - continue - } - if !te.Timestamp.IsZero() && te.Timestamp.After(best.Timestamp) { - best = te - continue - } - if best.Timestamp.Equal(te.Timestamp) && te.Used > best.Used { - best = te - } - } - return best, true -} - func (p *Provider) resolveStatus(snap *core.UsageSnapshot, authOutput string) { lower := strings.ToLower(authOutput) if strings.Contains(lower, "rate limit") || strings.Contains(lower, "rate_limit") { @@ -1397,563 +455,6 @@ func skuLabel(sku string) string { } } -func parseSimpleYAML(content string) map[string]string { - result := make(map[string]string) - for _, line := range strings.Split(content, "\n") { - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "#") { - continue - } - idx := strings.Index(line, ":") - if idx < 0 { - continue - } - key := strings.TrimSpace(line[:idx]) - val := strings.TrimSpace(line[idx+1:]) - result[key] = val - } - return result -} - -func storeSeries(snap *core.UsageSnapshot, key string, m map[string]float64) { - if len(m) > 0 { - snap.DailySeries[key] = core.SortedTimePoints(m) - } -} - -func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { - if value <= 0 { - return - } - v := value - snap.Metrics[key] = core.Metric{ - Used: &v, - Unit: unit, - Window: window, - } -} - -func dayForSession(createdAt, updatedAt time.Time) string { - if !updatedAt.IsZero() { - return updatedAt.Format("2006-01-02") - } - if !createdAt.IsZero() { - return createdAt.Format("2006-01-02") - } - return "" -} - -func latestSeriesValue(m map[string]float64) (string, float64) { - if len(m) == 0 { - return "", 0 - } - dates := lo.Keys(m) - sort.Strings(dates) - last := dates[len(dates)-1] - return last, m[last] -} - -func sumLastNDays(m map[string]float64, days int) float64 { - if len(m) == 0 || days <= 0 { - return 0 - } - date, _ := latestSeriesValue(m) - if date == "" { - return 0 - } - end, err := time.Parse("2006-01-02", date) - if err != nil { - return 0 - } - start := end.AddDate(0, 0, -(days - 1)) - sum := 0.0 - for d, v := range m { - t, err := time.Parse("2006-01-02", d) - if err != nil { - continue - } - if !t.Before(start) && !t.After(end) { - sum += v - } - } - return sum -} - -func topModelNames(tokenMap map[string]float64, messageMap map[string]int, limit int) []string { - type row struct { - model string - tokens float64 - messages int - } - - seen := make(map[string]bool) - var rows []row - for model, tokens := range tokenMap { - seen[model] = true - rows = append(rows, row{model: model, tokens: tokens, messages: messageMap[model]}) - } - for model, messages := range messageMap { - if seen[model] { - continue - } - rows = append(rows, row{model: model, messages: messages}) - } - - sort.Slice(rows, func(i, j int) bool { - if rows[i].tokens == rows[j].tokens { - if rows[i].messages == rows[j].messages { - return rows[i].model < rows[j].model - } - return rows[i].messages > rows[j].messages - } - return rows[i].tokens > rows[j].tokens - }) - - if limit > 0 && len(rows) > limit { - rows = rows[:limit] - } - return lo.Map(rows, func(r row, _ int) string { return r.model }) -} - -func topCopilotClientNames(tokenMap map[string]float64, sessionMap, messageMap map[string]int, limit int) []string { - type row struct { - client string - tokens float64 - sessions int - messages int - } - - seen := make(map[string]bool) - var rows []row - for client, tokens := range tokenMap { - seen[client] = true - rows = append(rows, row{ - client: client, - tokens: tokens, - sessions: sessionMap[client], - messages: messageMap[client], - }) - } - for client, sessions := range sessionMap { - if seen[client] { - continue - } - seen[client] = true - rows = append(rows, row{ - client: client, - sessions: sessions, - messages: messageMap[client], - }) - } - for client, messages := range messageMap { - if seen[client] { - continue - } - rows = append(rows, row{ - client: client, - messages: messages, - }) - } - - sort.Slice(rows, func(i, j int) bool { - if rows[i].tokens == rows[j].tokens { - if rows[i].sessions == rows[j].sessions { - if rows[i].messages == rows[j].messages { - return rows[i].client < rows[j].client - } - return rows[i].messages > rows[j].messages - } - return rows[i].sessions > rows[j].sessions - } - return rows[i].tokens > rows[j].tokens - }) - - if limit > 0 && len(rows) > limit { - rows = rows[:limit] - } - return lo.Map(rows, func(r row, _ int) string { return r.client }) -} - -func normalizeCopilotClient(repo, cwd string) string { - repo = strings.TrimSpace(repo) - if repo != "" && repo != "." { - return repo - } - - cwd = strings.TrimSpace(cwd) - if cwd != "" { - base := strings.TrimSpace(filepath.Base(cwd)) - if base != "" && base != "." && base != string(filepath.Separator) { - return base - } - } - - return "cli" -} - -func formatCopilotClientUsage(clients []string, labels map[string]string, tokens map[string]float64, sessions map[string]int) string { - if len(clients) == 0 { - return "" - } - - parts := make([]string, 0, len(clients)) - for _, client := range clients { - label := labels[client] - if label == "" { - label = client - } - - value := tokens[client] - sessionCount := sessions[client] - - item := fmt.Sprintf("%s %s tok", label, formatCopilotTokenCount(value)) - if sessionCount > 0 { - item += fmt.Sprintf(" · %d sess", sessionCount) - } - parts = append(parts, item) - } - return strings.Join(parts, ", ") -} - -func formatCopilotTokenCount(value float64) string { return shared.FormatTokenCountF(value) } - -func parseDayFromTimestamp(ts string) string { - t := flexParseTime(ts) - if t.IsZero() { - return "" - } - return t.Format("2006-01-02") -} - -func flexParseTime(s string) time.Time { - return shared.FlexParseTime(s) -} - -func parseCopilotTime(s string) time.Time { - return shared.FlexParseTime(s) -} - -func extractModelFromInfoMsg(msg string) string { - idx := strings.Index(msg, ": ") - if idx < 0 { - return "" - } - m := strings.TrimSpace(msg[idx+2:]) - if pIdx := strings.Index(m, " ("); pIdx >= 0 { - m = m[:pIdx] - } - return m -} - -func extractCopilotToolName(raw json.RawMessage) string { - if len(strings.TrimSpace(string(raw))) == 0 { - return "" - } - - var tool struct { - Name string `json:"name"` - ToolName string `json:"toolName"` - Tool string `json:"tool"` - } - if err := json.Unmarshal(raw, &tool); err != nil { - return "" - } - - candidates := []string{tool.Name, tool.ToolName, tool.Tool} - for _, candidate := range candidates { - candidate = strings.TrimSpace(candidate) - if candidate != "" { - return candidate - } - } - return "" -} - -func isCopilotMutatingTool(toolName string) bool { - name := strings.ToLower(strings.TrimSpace(toolName)) - if name == "" { - return false - } - return strings.Contains(name, "edit") || - strings.Contains(name, "write") || - strings.Contains(name, "create") || - strings.Contains(name, "delete") || - strings.Contains(name, "rename") || - strings.Contains(name, "move") || - strings.Contains(name, "replace") -} - -func extractCopilotToolCommand(raw json.RawMessage) string { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return "" - } - var command string - var walk func(v any) - walk = func(v any) { - if command != "" || v == nil { - return - } - switch value := v.(type) { - case map[string]any: - for key, child := range value { - k := strings.ToLower(strings.TrimSpace(key)) - if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { - if s, ok := child.(string); ok { - command = strings.TrimSpace(s) - return - } - } - } - for _, child := range value { - walk(child) - if command != "" { - return - } - } - case []any: - for _, child := range value { - walk(child) - if command != "" { - return - } - } - } - } - walk(payload) - return command -} - -func extractCopilotToolPaths(raw json.RawMessage) []string { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return nil - } - - pathHints := map[string]bool{ - "path": true, "paths": true, "file": true, "files": true, "filepath": true, "file_path": true, - "cwd": true, "dir": true, "directory": true, "target": true, "pattern": true, "glob": true, - "from": true, "to": true, "include": true, "exclude": true, - } - - candidates := make(map[string]bool) - var walk func(v any, hinted bool) - walk = func(v any, hinted bool) { - switch value := v.(type) { - case map[string]any: - for key, child := range value { - k := strings.ToLower(strings.TrimSpace(key)) - childHinted := hinted || pathHints[k] || strings.Contains(k, "path") || strings.Contains(k, "file") - walk(child, childHinted) - } - case []any: - for _, child := range value { - walk(child, hinted) - } - case string: - if !hinted { - return - } - for _, token := range extractCopilotPathTokens(value) { - candidates[token] = true - } - } - } - walk(payload, false) - - out := make([]string, 0, len(candidates)) - for c := range candidates { - out = append(out, c) - } - sort.Strings(out) - return out -} - -func extractCopilotPathTokens(raw string) []string { - raw = strings.TrimSpace(raw) - if raw == "" { - return nil - } - fields := strings.Fields(raw) - if len(fields) == 0 { - fields = []string{raw} - } - - var out []string - for _, field := range fields { - token := strings.Trim(field, "\"'`()[]{}<>,:;") - if token == "" { - continue - } - lower := strings.ToLower(token) - if strings.HasPrefix(lower, "http://") || strings.HasPrefix(lower, "https://") || strings.HasPrefix(lower, "file://") { - continue - } - if strings.HasPrefix(token, "-") { - continue - } - if !strings.Contains(token, "/") && !strings.Contains(token, "\\") && !strings.Contains(token, ".") { - continue - } - token = strings.TrimPrefix(token, "./") - if token == "" { - continue - } - out = append(out, token) - } - return lo.Uniq(out) -} - -func estimateCopilotToolLineDelta(raw json.RawMessage) (added int, removed int) { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return 0, 0 - } - lineCount := func(text string) int { - text = strings.TrimSpace(text) - if text == "" { - return 0 - } - return strings.Count(text, "\n") + 1 - } - var walk func(v any) - walk = func(v any) { - switch value := v.(type) { - case map[string]any: - var oldText, newText string - for _, key := range []string{"old_string", "old_text", "from", "replace"} { - if rawValue, ok := value[key]; ok { - if s, ok := rawValue.(string); ok { - oldText = s - break - } - } - } - for _, key := range []string{"new_string", "new_text", "to", "with"} { - if rawValue, ok := value[key]; ok { - if s, ok := rawValue.(string); ok { - newText = s - break - } - } - } - if oldText != "" || newText != "" { - removed += lineCount(oldText) - added += lineCount(newText) - } - if rawValue, ok := value["content"]; ok { - if s, ok := rawValue.(string); ok { - added += lineCount(s) - } - } - for _, child := range value { - walk(child) - } - case []any: - for _, child := range value { - walk(child) - } - } - } - walk(payload) - return added, removed -} - -func inferCopilotLanguageFromPath(path string) string { - p := strings.ToLower(strings.TrimSpace(path)) - if p == "" { - return "" - } - base := strings.ToLower(filepath.Base(p)) - switch base { - case "dockerfile": - return "docker" - case "makefile": - return "make" - } - switch strings.ToLower(filepath.Ext(p)) { - case ".go": - return "go" - case ".py": - return "python" - case ".ts", ".tsx": - return "typescript" - case ".js", ".jsx": - return "javascript" - case ".tf", ".tfvars", ".hcl": - return "terraform" - case ".sh", ".bash", ".zsh", ".fish": - return "shell" - case ".md", ".mdx": - return "markdown" - case ".json": - return "json" - case ".yml", ".yaml": - return "yaml" - case ".sql": - return "sql" - case ".rs": - return "rust" - case ".java": - return "java" - case ".c", ".h": - return "c" - case ".cc", ".cpp", ".cxx", ".hpp": - return "cpp" - case ".rb": - return "ruby" - case ".php": - return "php" - case ".swift": - return "swift" - case ".vue": - return "vue" - case ".svelte": - return "svelte" - case ".toml": - return "toml" - case ".xml": - return "xml" - } - return "" -} - -func formatModelMap(m map[string]int, unit string) string { - if len(m) == 0 { - return "" - } - parts := make([]string, 0, len(m)) - for model, count := range m { - parts = append(parts, fmt.Sprintf("%s: %d %s", model, count, unit)) - } - sort.Strings(parts) - return strings.Join(parts, ", ") -} - -func formatModelMapPlain(m map[string]int) string { - if len(m) == 0 { - return "" - } - parts := make([]string, 0, len(m)) - for model, count := range m { - parts = append(parts, fmt.Sprintf("%s: %d", model, count)) - } - sort.Strings(parts) - return strings.Join(parts, ", ") -} - -func setRawInt(snap *core.UsageSnapshot, key string, v int) { - if v > 0 { - snap.Raw[key] = fmt.Sprintf("%d", v) - } -} - -func setRawStr(snap *core.UsageSnapshot, key, v string) { - if v != "" { - snap.Raw[key] = v - } -} - func firstNonNilFloat(values ...*float64) *float64 { for _, v := range values { if v != nil { @@ -1979,33 +480,3 @@ func clampPercent(v float64) float64 { } return v } - -func sanitizeMetricName(name string) string { - name = strings.ToLower(strings.TrimSpace(name)) - if name == "" { - return "unknown" - } - - var b strings.Builder - lastUnderscore := false - for _, r := range name { - switch { - case r >= 'a' && r <= 'z': - b.WriteRune(r) - lastUnderscore = false - case r >= '0' && r <= '9': - b.WriteRune(r) - lastUnderscore = false - default: - if !lastUnderscore { - b.WriteByte('_') - lastUnderscore = true - } - } - } - out := strings.Trim(b.String(), "_") - if out == "" { - return "unknown" - } - return out -} diff --git a/internal/providers/copilot/local_data.go b/internal/providers/copilot/local_data.go new file mode 100644 index 0000000..10498cb --- /dev/null +++ b/internal/providers/copilot/local_data.go @@ -0,0 +1,877 @@ +package copilot + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (p *Provider) readConfig(copilotDir string, snap *core.UsageSnapshot) { + data, err := os.ReadFile(filepath.Join(copilotDir, "config.json")) + if err != nil { + return + } + var cfg copilotConfig + if json.Unmarshal(data, &cfg) != nil { + return + } + if cfg.Model != "" { + snap.Raw["preferred_model"] = cfg.Model + } + if cfg.ReasoningEffort != "" { + snap.Raw["reasoning_effort"] = cfg.ReasoningEffort + } + if cfg.Experimental { + snap.Raw["experimental"] = "enabled" + } +} + +type logSummary struct { + DefaultModel string + SessionTokens map[string]logTokenEntry + SessionBurn map[string]float64 +} + +func (p *Provider) readLogs(copilotDir string, snap *core.UsageSnapshot) logSummary { + ls := logSummary{ + SessionTokens: make(map[string]logTokenEntry), + SessionBurn: make(map[string]float64), + } + sessionEntries := make(map[string][]logTokenEntry) + logDir := filepath.Join(copilotDir, "logs") + entries, err := os.ReadDir(logDir) + if err != nil { + return ls + } + + var allTokenEntries []logTokenEntry + + for _, entry := range entries { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".log") { + continue + } + data, err := os.ReadFile(filepath.Join(logDir, entry.Name())) + if err != nil { + continue + } + + var currentSessionID string + for _, line := range strings.Split(string(data), "\n") { + line = strings.TrimSpace(line) + + if strings.Contains(line, "Workspace initialized:") { + if idx := strings.Index(line, "Workspace initialized:"); idx >= 0 { + rest := strings.TrimSpace(line[idx+len("Workspace initialized:"):]) + if spIdx := strings.Index(rest, " "); spIdx > 0 { + currentSessionID = rest[:spIdx] + } else if rest != "" { + currentSessionID = rest + } + } + } + + if strings.Contains(line, "Using default model:") { + if idx := strings.Index(line, "Using default model:"); idx >= 0 { + m := strings.TrimSpace(line[idx+len("Using default model:"):]) + if m != "" { + ls.DefaultModel = m + } + } + } + + if strings.Contains(line, "CompactionProcessor: Utilization") { + te := parseCompactionLine(line) + if te.Total > 0 { + allTokenEntries = append(allTokenEntries, te) + if currentSessionID != "" { + sessionEntries[currentSessionID] = append(sessionEntries[currentSessionID], te) + } + } + } + } + } + + if ls.DefaultModel != "" { + snap.Raw["default_model"] = ls.DefaultModel + } + + for sessionID, entries := range sessionEntries { + sortCompactionEntries(entries) + last := entries[len(entries)-1] + ls.SessionTokens[sessionID] = last + + burn := 0.0 + for idx, te := range entries { + if idx == 0 { + if te.Used > 0 { + burn += float64(te.Used) + } + continue + } + delta := te.Used - entries[idx-1].Used + if delta > 0 { + burn += float64(delta) + } + } + if burn > 0 { + ls.SessionBurn[sessionID] = burn + } + } + + if last, ok := newestCompactionEntry(allTokenEntries); ok { + snap.Raw["context_window_tokens"] = fmt.Sprintf("%d/%d", last.Used, last.Total) + pct := float64(last.Used) / float64(last.Total) * 100 + snap.Raw["context_window_pct"] = fmt.Sprintf("%.1f%%", pct) + used := float64(last.Used) + limit := float64(last.Total) + snap.Metrics["context_window"] = core.Metric{ + Limit: &limit, + Used: &used, + Remaining: core.Float64Ptr(limit - used), + Unit: "tokens", + Window: "session", + } + } + + return ls +} + +type assistantMsgData struct { + Content string `json:"content"` + ReasoningTxt string `json:"reasoningText"` + ToolRequests json.RawMessage `json:"toolRequests"` +} + +type quotaSnapshotEntry struct { + EntitlementRequests int `json:"entitlementRequests"` + UsedRequests int `json:"usedRequests"` + RemainingPercentage float64 `json:"remainingPercentage"` + ResetDate string `json:"resetDate"` +} + +type assistantUsageData struct { + Model string `json:"model"` + InputTokens float64 `json:"inputTokens"` + OutputTokens float64 `json:"outputTokens"` + CacheReadTokens float64 `json:"cacheReadTokens"` + CacheWriteTokens float64 `json:"cacheWriteTokens"` + Cost float64 `json:"cost"` + Duration int64 `json:"duration"` + QuotaSnapshots map[string]quotaSnapshotEntry `json:"quotaSnapshots"` +} + +type sessionShutdownData struct { + ShutdownType string `json:"shutdownType"` + TotalPremiumRequests int `json:"totalPremiumRequests"` + TotalAPIDurationMs int64 `json:"totalApiDurationMs"` + SessionStartTime string `json:"sessionStartTime"` + CodeChanges shutdownCodeChanges `json:"codeChanges"` + ModelMetrics map[string]shutdownModelMetric `json:"modelMetrics"` +} + +type shutdownCodeChanges struct { + LinesAdded int `json:"linesAdded"` + LinesRemoved int `json:"linesRemoved"` + FilesModified int `json:"filesModified"` +} + +type shutdownModelMetric struct { + Requests struct { + Count int `json:"count"` + Cost float64 `json:"cost"` + } `json:"requests"` + Usage struct { + InputTokens float64 `json:"inputTokens"` + OutputTokens float64 `json:"outputTokens"` + CacheReadTokens float64 `json:"cacheReadTokens"` + CacheWriteTokens float64 `json:"cacheWriteTokens"` + } `json:"usage"` +} + +func (p *Provider) readSessions(copilotDir string, snap *core.UsageSnapshot, logs logSummary) { + sessionDir := filepath.Join(copilotDir, "session-state") + entries, err := os.ReadDir(sessionDir) + if err != nil { + return + } + + snap.Raw["total_sessions"] = fmt.Sprintf("%d", len(entries)) + + type sessionInfo struct { + id string + createdAt time.Time + updatedAt time.Time + cwd string + repo string + branch string + client string + summary string + messages int + turns int + model string + responseChars int + reasoningChars int + toolCalls int + tokenUsed int + tokenTotal int + tokenBurn float64 + usageCost float64 + premiumRequests int + shutdownPremiumRequests int + linesAdded int + linesRemoved int + filesModified int + } + + var sessions []sessionInfo + dailyMessages := make(map[string]float64) + dailySessions := make(map[string]float64) + dailyToolCalls := make(map[string]float64) + dailyTokens := make(map[string]float64) + modelMessages := make(map[string]int) + modelTurns := make(map[string]int) + modelSessions := make(map[string]int) + modelResponseChars := make(map[string]int) + modelReasoningChars := make(map[string]int) + modelToolCalls := make(map[string]int) + dailyModelMessages := make(map[string]map[string]float64) + dailyModelTokens := make(map[string]map[string]float64) + modelInputTokens := make(map[string]float64) + usageInputTokens := make(map[string]float64) + usageOutputTokens := make(map[string]float64) + usageCacheReadTokens := make(map[string]float64) + usageCacheWriteTokens := make(map[string]float64) + usageCost := make(map[string]float64) + usageRequests := make(map[string]int) + usageDuration := make(map[string]int64) + dailyCost := make(map[string]float64) + var latestQuotaSnapshots map[string]quotaSnapshotEntry + var shutdownPremiumRequests int + var shutdownLinesAdded, shutdownLinesRemoved, shutdownFilesModified int + shutdownModelCost := make(map[string]float64) + shutdownModelRequests := make(map[string]int) + shutdownModelInputTokens := make(map[string]float64) + shutdownModelOutputTokens := make(map[string]float64) + toolUsageCounts := make(map[string]int) + languageUsageCounts := make(map[string]int) + changedFiles := make(map[string]bool) + commitCommands := make(map[string]bool) + clientLabels := make(map[string]string) + clientTokens := make(map[string]float64) + clientSessions := make(map[string]int) + clientMessages := make(map[string]int) + dailyClientTokens := make(map[string]map[string]float64) + var inferredLinesAdded, inferredLinesRemoved int + var inferredCommitCount int + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + si := sessionInfo{id: entry.Name()} + sessPath := filepath.Join(sessionDir, entry.Name()) + + if wsData, err := os.ReadFile(filepath.Join(sessPath, "workspace.yaml")); err == nil { + ws := parseSimpleYAML(string(wsData)) + si.cwd = ws["cwd"] + si.repo = ws["repository"] + si.branch = ws["branch"] + si.summary = ws["summary"] + si.createdAt = flexParseTime(ws["created_at"]) + si.updatedAt = flexParseTime(ws["updated_at"]) + } + + if te, ok := logs.SessionTokens[si.id]; ok { + si.tokenUsed = te.Used + si.tokenTotal = te.Total + if !te.Timestamp.IsZero() { + if si.createdAt.IsZero() { + si.createdAt = te.Timestamp + } + if si.updatedAt.IsZero() || te.Timestamp.After(si.updatedAt) { + si.updatedAt = te.Timestamp + } + } + } + if burn, ok := logs.SessionBurn[si.id]; ok { + si.tokenBurn = burn + } + + if evtData, err := os.ReadFile(filepath.Join(sessPath, "events.jsonl")); err == nil { + currentModel := logs.DefaultModel + var firstEventAt, lastEventAt time.Time + lines := strings.Split(string(evtData), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + var evt sessionEvent + if json.Unmarshal([]byte(line), &evt) != nil { + continue + } + evtTime := flexParseTime(evt.Timestamp) + if !evtTime.IsZero() { + if firstEventAt.IsZero() || evtTime.Before(firstEventAt) { + firstEventAt = evtTime + } + if lastEventAt.IsZero() || evtTime.After(lastEventAt) { + lastEventAt = evtTime + } + } + + switch evt.Type { + case "session.start": + var start sessionStartData + if json.Unmarshal(evt.Data, &start) == nil { + if si.cwd == "" { + si.cwd = start.Context.CWD + } + if si.repo == "" { + si.repo = start.Context.Repository + } + if si.branch == "" { + si.branch = start.Context.Branch + } + if si.createdAt.IsZero() { + si.createdAt = flexParseTime(start.StartTime) + } + if currentModel == "" && start.SelectedModel != "" { + currentModel = start.SelectedModel + } + } + + case "session.model_change": + var mc modelChangeData + if json.Unmarshal(evt.Data, &mc) == nil && mc.NewModel != "" { + currentModel = mc.NewModel + } + + case "session.info": + var info sessionInfoData + if json.Unmarshal(evt.Data, &info) == nil && info.InfoType == "model" { + if m := extractModelFromInfoMsg(info.Message); m != "" { + currentModel = m + } + } + + case "user.message": + si.messages++ + day := parseDayFromTimestamp(evt.Timestamp) + if day != "" { + dailyMessages[day]++ + } + if currentModel != "" { + modelMessages[currentModel]++ + if day != "" { + if dailyModelMessages[currentModel] == nil { + dailyModelMessages[currentModel] = make(map[string]float64) + } + dailyModelMessages[currentModel][day]++ + } + } + + case "assistant.turn_start": + si.turns++ + if currentModel != "" { + modelTurns[currentModel]++ + } + + case "assistant.message": + var msg assistantMsgData + if json.Unmarshal(evt.Data, &msg) == nil { + si.responseChars += len(msg.Content) + si.reasoningChars += len(msg.ReasoningTxt) + if currentModel != "" { + modelResponseChars[currentModel] += len(msg.Content) + modelReasoningChars[currentModel] += len(msg.ReasoningTxt) + } + var tools []json.RawMessage + if json.Unmarshal(msg.ToolRequests, &tools) == nil && len(tools) > 0 { + si.toolCalls += len(tools) + if currentModel != "" { + modelToolCalls[currentModel] += len(tools) + } + for _, toolReq := range tools { + toolName := extractCopilotToolName(toolReq) + if toolName == "" { + toolName = "unknown" + } + toolUsageCounts[toolName]++ + toolLower := strings.ToLower(strings.TrimSpace(toolName)) + paths := extractCopilotToolPaths(toolReq) + for _, path := range paths { + if lang := inferCopilotLanguageFromPath(path); lang != "" { + languageUsageCounts[lang]++ + } + if isCopilotMutatingTool(toolLower) { + changedFiles[path] = true + } + } + if isCopilotMutatingTool(toolLower) { + added, removed := estimateCopilotToolLineDelta(toolReq) + inferredLinesAdded += added + inferredLinesRemoved += removed + } + cmd := extractCopilotToolCommand(toolReq) + if cmd != "" { + if strings.Contains(strings.ToLower(cmd), "git commit") && !commitCommands[cmd] { + commitCommands[cmd] = true + inferredCommitCount++ + } + } else if strings.Contains(toolLower, "commit") { + inferredCommitCount++ + } + } + day := parseDayFromTimestamp(evt.Timestamp) + if day != "" { + dailyToolCalls[day] += float64(len(tools)) + } + } + } + + case "assistant.usage": + var usage assistantUsageData + if json.Unmarshal(evt.Data, &usage) == nil && usage.Model != "" { + usageInputTokens[usage.Model] += usage.InputTokens + usageOutputTokens[usage.Model] += usage.OutputTokens + usageCacheReadTokens[usage.Model] += usage.CacheReadTokens + usageCacheWriteTokens[usage.Model] += usage.CacheWriteTokens + usageCost[usage.Model] += usage.Cost + usageRequests[usage.Model]++ + usageDuration[usage.Model] += usage.Duration + + si.usageCost += usage.Cost + si.premiumRequests++ + + day := parseDayFromTimestamp(evt.Timestamp) + if day != "" { + dailyCost[day] += usage.Cost + } + + if len(usage.QuotaSnapshots) > 0 { + latestQuotaSnapshots = usage.QuotaSnapshots + } + } + + case "session.shutdown": + var shutdown sessionShutdownData + if json.Unmarshal(evt.Data, &shutdown) == nil { + shutdownPremiumRequests += shutdown.TotalPremiumRequests + si.shutdownPremiumRequests += shutdown.TotalPremiumRequests + + si.linesAdded += shutdown.CodeChanges.LinesAdded + si.linesRemoved += shutdown.CodeChanges.LinesRemoved + si.filesModified += shutdown.CodeChanges.FilesModified + shutdownLinesAdded += shutdown.CodeChanges.LinesAdded + shutdownLinesRemoved += shutdown.CodeChanges.LinesRemoved + shutdownFilesModified += shutdown.CodeChanges.FilesModified + + for model, metrics := range shutdown.ModelMetrics { + shutdownModelCost[model] += metrics.Requests.Cost + shutdownModelRequests[model] += metrics.Requests.Count + shutdownModelInputTokens[model] += metrics.Usage.InputTokens + shutdownModelOutputTokens[model] += metrics.Usage.OutputTokens + } + } + } + } + if !firstEventAt.IsZero() && si.createdAt.IsZero() { + si.createdAt = firstEventAt + } + if !lastEventAt.IsZero() && (si.updatedAt.IsZero() || lastEventAt.After(si.updatedAt)) { + si.updatedAt = lastEventAt + } + si.model = currentModel + } + + day := dayForSession(si.createdAt, si.updatedAt) + if si.model != "" { + modelSessions[si.model]++ + } + if day != "" { + dailySessions[day]++ + } + + clientLabel := normalizeCopilotClient(si.repo, si.cwd) + clientKey := sanitizeMetricName(clientLabel) + if clientKey == "" { + clientKey = "cli" + } + si.client = clientLabel + if _, ok := clientLabels[clientKey]; !ok { + clientLabels[clientKey] = clientLabel + } + clientSessions[clientKey]++ + clientMessages[clientKey] += si.messages + + sessionTokens := float64(si.tokenUsed) + if si.tokenBurn > 0 { + sessionTokens = si.tokenBurn + } + if sessionTokens > 0 { + clientTokens[clientKey] += sessionTokens + if day != "" { + dailyTokens[day] += sessionTokens + if dailyClientTokens[clientKey] == nil { + dailyClientTokens[clientKey] = make(map[string]float64) + } + dailyClientTokens[clientKey][day] += sessionTokens + } + if si.model != "" { + modelInputTokens[si.model] += sessionTokens + if day != "" { + if dailyModelTokens[si.model] == nil { + dailyModelTokens[si.model] = make(map[string]float64) + } + dailyModelTokens[si.model][day] += sessionTokens + } + } + } + sessions = append(sessions, si) + } + + storeSeries(snap, "messages", dailyMessages) + storeSeries(snap, "sessions", dailySessions) + storeSeries(snap, "tool_calls", dailyToolCalls) + storeSeries(snap, "tokens_total", dailyTokens) + storeSeries(snap, "cli_messages", dailyMessages) + storeSeries(snap, "cli_sessions", dailySessions) + storeSeries(snap, "cli_tool_calls", dailyToolCalls) + if len(dailyCost) > 0 { + storeSeries(snap, "cost", dailyCost) + } + for model, dayCounts := range dailyModelMessages { + safe := sanitizeMetricName(model) + storeSeries(snap, "cli_messages_"+safe, dayCounts) + } + for model, dayCounts := range dailyModelTokens { + safe := sanitizeMetricName(model) + storeSeries(snap, "tokens_"+safe, dayCounts) + storeSeries(snap, "cli_tokens_"+safe, dayCounts) + } + + setRawStr(snap, "model_usage", formatModelMap(modelMessages, "msgs")) + setRawStr(snap, "model_turns", formatModelMap(modelTurns, "turns")) + setRawStr(snap, "model_sessions", formatModelMapPlain(modelSessions)) + setRawStr(snap, "model_response_chars", formatModelMap(modelResponseChars, "chars")) + setRawStr(snap, "model_reasoning_chars", formatModelMap(modelReasoningChars, "chars")) + setRawStr(snap, "model_tool_calls", formatModelMap(modelToolCalls, "calls")) + + sort.Slice(sessions, func(i, j int) bool { + ti := sessions[i].updatedAt + if ti.IsZero() { + ti = sessions[i].createdAt + } + tj := sessions[j].updatedAt + if tj.IsZero() { + tj = sessions[j].createdAt + } + return ti.After(tj) + }) + + var totalMessages, totalTurns, totalResponse, totalReasoning, totalTools int + totalTokens := 0.0 + for _, s := range sessions { + totalMessages += s.messages + totalTurns += s.turns + totalResponse += s.responseChars + totalReasoning += s.reasoningChars + totalTools += s.toolCalls + tokens := float64(s.tokenUsed) + if s.tokenBurn > 0 { + tokens = s.tokenBurn + } + totalTokens += tokens + } + setRawInt(snap, "total_cli_messages", totalMessages) + setRawInt(snap, "total_cli_turns", totalTurns) + setRawInt(snap, "total_response_chars", totalResponse) + setRawInt(snap, "total_reasoning_chars", totalReasoning) + setRawInt(snap, "total_tool_calls", totalTools) + + setUsedMetric(snap, "total_messages", float64(totalMessages), "messages", copilotAllTimeWindow) + setUsedMetric(snap, "total_sessions", float64(len(sessions)), "sessions", copilotAllTimeWindow) + setUsedMetric(snap, "total_turns", float64(totalTurns), "turns", copilotAllTimeWindow) + setUsedMetric(snap, "total_tool_calls", float64(totalTools), "calls", copilotAllTimeWindow) + setUsedMetric(snap, "tool_calls_total", float64(totalTools), "calls", copilotAllTimeWindow) + if totalTools > 0 { + setUsedMetric(snap, "tool_completed", float64(totalTools), "calls", copilotAllTimeWindow) + setUsedMetric(snap, "tool_success_rate", 100.0, "%", copilotAllTimeWindow) + } + setUsedMetric(snap, "total_response_chars", float64(totalResponse), "chars", copilotAllTimeWindow) + setUsedMetric(snap, "total_reasoning_chars", float64(totalReasoning), "chars", copilotAllTimeWindow) + setUsedMetric(snap, "total_conversations", float64(len(sessions)), "sessions", copilotAllTimeWindow) + setUsedMetric(snap, "cli_messages", float64(totalMessages), "messages", copilotAllTimeWindow) + setUsedMetric(snap, "cli_turns", float64(totalTurns), "turns", copilotAllTimeWindow) + setUsedMetric(snap, "cli_sessions", float64(len(sessions)), "sessions", copilotAllTimeWindow) + setUsedMetric(snap, "cli_tool_calls", float64(totalTools), "calls", copilotAllTimeWindow) + setUsedMetric(snap, "cli_response_chars", float64(totalResponse), "chars", copilotAllTimeWindow) + setUsedMetric(snap, "cli_reasoning_chars", float64(totalReasoning), "chars", copilotAllTimeWindow) + setUsedMetric(snap, "cli_input_tokens", totalTokens, "tokens", copilotAllTimeWindow) + setUsedMetric(snap, "cli_total_tokens", totalTokens, "tokens", copilotAllTimeWindow) + + var totalUsageOutputTokens, totalUsageCacheRead, totalUsageCacheWrite, totalUsageCost float64 + var totalUsageRequests int + for _, v := range usageOutputTokens { + totalUsageOutputTokens += v + } + for _, v := range usageCacheReadTokens { + totalUsageCacheRead += v + } + for _, v := range usageCacheWriteTokens { + totalUsageCacheWrite += v + } + for _, v := range usageCost { + totalUsageCost += v + } + for _, v := range usageRequests { + totalUsageRequests += v + } + if totalUsageOutputTokens > 0 { + setUsedMetric(snap, "cli_output_tokens", totalUsageOutputTokens, "tokens", copilotAllTimeWindow) + } + if totalUsageCacheRead > 0 { + setUsedMetric(snap, "cli_cache_read_tokens", totalUsageCacheRead, "tokens", copilotAllTimeWindow) + } + if totalUsageCacheWrite > 0 { + setUsedMetric(snap, "cli_cache_write_tokens", totalUsageCacheWrite, "tokens", copilotAllTimeWindow) + } + if totalUsageCost > 0 { + setUsedMetric(snap, "cli_cost", totalUsageCost, "USD", copilotAllTimeWindow) + } + if totalUsageRequests > 0 { + setUsedMetric(snap, "cli_premium_requests", float64(totalUsageRequests), "requests", copilotAllTimeWindow) + } else if shutdownPremiumRequests > 0 { + setUsedMetric(snap, "cli_premium_requests", float64(shutdownPremiumRequests), "requests", copilotAllTimeWindow) + } + if shutdownLinesAdded > 0 || shutdownLinesRemoved > 0 { + setUsedMetric(snap, "cli_lines_added", float64(shutdownLinesAdded), "lines", copilotAllTimeWindow) + setUsedMetric(snap, "cli_lines_removed", float64(shutdownLinesRemoved), "lines", copilotAllTimeWindow) + } + if shutdownFilesModified > 0 { + setUsedMetric(snap, "cli_files_modified", float64(shutdownFilesModified), "files", copilotAllTimeWindow) + } + if totalUsageRequests > 0 { + var totalDuration int64 + for _, d := range usageDuration { + totalDuration += d + } + avgMs := float64(totalDuration) / float64(totalUsageRequests) + setUsedMetric(snap, "cli_avg_latency_ms", avgMs, "ms", copilotAllTimeWindow) + } + + if qs, ok := latestQuotaSnapshots["premium_interactions"]; ok { + if _, exists := snap.Metrics["premium_interactions_quota"]; !exists { + entitlement := float64(qs.EntitlementRequests) + used := float64(qs.UsedRequests) + remaining := entitlement - used + if remaining < 0 { + remaining = 0 + } + snap.Metrics["premium_interactions_quota"] = core.Metric{ + Limit: &entitlement, + Used: core.Float64Ptr(used), + Remaining: core.Float64Ptr(remaining), + Unit: "requests", + Window: "billing-cycle", + } + } + } + + if _, v := latestSeriesValue(dailyCost); v > 0 { + setUsedMetric(snap, "cost_today", v, "USD", "today") + } + setUsedMetric(snap, "7d_cost", sumLastNDays(dailyCost, 7), "USD", "7d") + + if _, v := latestSeriesValue(dailyMessages); v > 0 { + setUsedMetric(snap, "messages_today", v, "messages", "today") + } + if _, v := latestSeriesValue(dailySessions); v > 0 { + setUsedMetric(snap, "sessions_today", v, "sessions", "today") + } + if _, v := latestSeriesValue(dailyToolCalls); v > 0 { + setUsedMetric(snap, "tool_calls_today", v, "calls", "today") + } + if _, v := latestSeriesValue(dailyTokens); v > 0 { + setUsedMetric(snap, "tokens_today", v, "tokens", "today") + } + setUsedMetric(snap, "7d_messages", sumLastNDays(dailyMessages, 7), "messages", "7d") + setUsedMetric(snap, "7d_sessions", sumLastNDays(dailySessions, 7), "sessions", "7d") + setUsedMetric(snap, "7d_tool_calls", sumLastNDays(dailyToolCalls, 7), "calls", "7d") + setUsedMetric(snap, "7d_tokens", sumLastNDays(dailyTokens, 7), "tokens", "7d") + setUsedMetric(snap, "total_prompts", float64(totalMessages), "prompts", copilotAllTimeWindow) + + allModelTokens := make(map[string]float64, len(modelInputTokens)) + for k, v := range modelInputTokens { + allModelTokens[k] = v + } + for k, v := range usageInputTokens { + if allModelTokens[k] < v { + allModelTokens[k] = v + } + } + allModelMessages := make(map[string]int, len(modelMessages)) + for k, v := range modelMessages { + allModelMessages[k] = v + } + for k, v := range usageRequests { + if allModelMessages[k] < v { + allModelMessages[k] = v + } + } + topModels := topModelNames(allModelTokens, allModelMessages, maxCopilotModels) + for _, model := range topModels { + prefix := "model_" + sanitizeMetricName(model) + rec := core.ModelUsageRecord{RawModelID: model, RawSource: "json", Window: copilotAllTimeWindow} + + inputTok := modelInputTokens[model] + if v := usageInputTokens[model]; v > 0 { + inputTok = v + } + outputTok := usageOutputTokens[model] + cacheTok := usageCacheReadTokens[model] + usageCacheWriteTokens[model] + + setUsedMetric(snap, prefix+"_input_tokens", inputTok, "tokens", copilotAllTimeWindow) + if inputTok > 0 { + rec.InputTokens = core.Float64Ptr(inputTok) + } + if outputTok > 0 { + setUsedMetric(snap, prefix+"_output_tokens", outputTok, "tokens", copilotAllTimeWindow) + rec.OutputTokens = core.Float64Ptr(outputTok) + } + if cacheTok > 0 { + rec.CachedTokens = core.Float64Ptr(cacheTok) + } + totalTok := inputTok + outputTok + if totalTok > 0 { + rec.TotalTokens = core.Float64Ptr(totalTok) + } + + modelCost := usageCost[model] + if modelCost == 0 { + modelCost = shutdownModelCost[model] + } + if modelCost > 0 { + rec.CostUSD = core.Float64Ptr(modelCost) + setUsedMetric(snap, prefix+"_cost", modelCost, "USD", copilotAllTimeWindow) + } + + if reqs := usageRequests[model]; reqs > 0 { + rec.Requests = core.Float64Ptr(float64(reqs)) + } + + setUsedMetric(snap, prefix+"_messages", float64(modelMessages[model]), "messages", copilotAllTimeWindow) + setUsedMetric(snap, prefix+"_turns", float64(modelTurns[model]), "turns", copilotAllTimeWindow) + setUsedMetric(snap, prefix+"_sessions", float64(modelSessions[model]), "sessions", copilotAllTimeWindow) + setUsedMetric(snap, prefix+"_tool_calls", float64(modelToolCalls[model]), "calls", copilotAllTimeWindow) + setUsedMetric(snap, prefix+"_response_chars", float64(modelResponseChars[model]), "chars", copilotAllTimeWindow) + setUsedMetric(snap, prefix+"_reasoning_chars", float64(modelReasoningChars[model]), "chars", copilotAllTimeWindow) + snap.AppendModelUsage(rec) + } + + topClients := topCopilotClientNames(clientTokens, clientSessions, clientMessages, maxCopilotClients) + for _, client := range topClients { + clientPrefix := "client_" + client + setUsedMetric(snap, clientPrefix+"_total_tokens", clientTokens[client], "tokens", copilotAllTimeWindow) + setUsedMetric(snap, clientPrefix+"_input_tokens", clientTokens[client], "tokens", copilotAllTimeWindow) + setUsedMetric(snap, clientPrefix+"_sessions", float64(clientSessions[client]), "sessions", copilotAllTimeWindow) + if byDay := dailyClientTokens[client]; len(byDay) > 0 { + storeSeries(snap, "tokens_client_"+client, byDay) + } + } + setRawStr(snap, "client_usage", formatCopilotClientUsage(topClients, clientLabels, clientTokens, clientSessions)) + setRawStr(snap, "tool_usage", formatModelMap(toolUsageCounts, "calls")) + setRawStr(snap, "language_usage", formatModelMap(languageUsageCounts, "req")) + for toolName, count := range toolUsageCounts { + if count <= 0 { + continue + } + setUsedMetric(snap, "tool_"+sanitizeMetricName(toolName), float64(count), "calls", copilotAllTimeWindow) + } + for lang, count := range languageUsageCounts { + if count <= 0 { + continue + } + setUsedMetric(snap, "lang_"+sanitizeMetricName(lang), float64(count), "requests", copilotAllTimeWindow) + } + + linesAdded := shutdownLinesAdded + if inferredLinesAdded > linesAdded { + linesAdded = inferredLinesAdded + } + linesRemoved := shutdownLinesRemoved + if inferredLinesRemoved > linesRemoved { + linesRemoved = inferredLinesRemoved + } + filesChanged := shutdownFilesModified + if len(changedFiles) > filesChanged { + filesChanged = len(changedFiles) + } + if linesAdded > 0 { + setUsedMetric(snap, "composer_lines_added", float64(linesAdded), "lines", copilotAllTimeWindow) + } + if linesRemoved > 0 { + setUsedMetric(snap, "composer_lines_removed", float64(linesRemoved), "lines", copilotAllTimeWindow) + } + if filesChanged > 0 { + setUsedMetric(snap, "composer_files_changed", float64(filesChanged), "files", copilotAllTimeWindow) + } + if inferredCommitCount > 0 { + setUsedMetric(snap, "scored_commits", float64(inferredCommitCount), "commits", copilotAllTimeWindow) + } + if linesAdded > 0 || linesRemoved > 0 { + hundred := 100.0 + zero := 0.0 + snap.Metrics["ai_code_percentage"] = core.Metric{ + Used: &hundred, + Remaining: &zero, + Limit: &hundred, + Unit: "%", + Window: copilotAllTimeWindow, + } + } + + if len(sessions) > 0 { + r := sessions[0] + if r.client != "" { + snap.Raw["last_session_client"] = r.client + } + snap.Raw["last_session_repo"] = r.repo + snap.Raw["last_session_branch"] = r.branch + if r.summary != "" { + snap.Raw["last_session_summary"] = r.summary + } + if !r.updatedAt.IsZero() { + snap.Raw["last_session_time"] = r.updatedAt.Format(time.RFC3339) + } + if r.model != "" { + snap.Raw["last_session_model"] = r.model + } + sessionTokens := float64(r.tokenUsed) + if r.tokenBurn > 0 { + sessionTokens = r.tokenBurn + } + if sessionTokens > 0 { + snap.Raw["last_session_tokens"] = fmt.Sprintf("%.0f/%d", sessionTokens, r.tokenTotal) + setUsedMetric(snap, "session_input_tokens", sessionTokens, "tokens", "session") + setUsedMetric(snap, "session_total_tokens", sessionTokens, "tokens", "session") + if r.tokenTotal > 0 { + limit := float64(r.tokenTotal) + snap.Metrics["context_window"] = core.Metric{ + Limit: &limit, + Used: core.Float64Ptr(sessionTokens), + Remaining: core.Float64Ptr(max(limit-sessionTokens, 0)), + Unit: "tokens", + Window: "session", + } + } + } + } +} diff --git a/internal/providers/copilot/local_helpers.go b/internal/providers/copilot/local_helpers.go new file mode 100644 index 0000000..8d145bd --- /dev/null +++ b/internal/providers/copilot/local_helpers.go @@ -0,0 +1,665 @@ +package copilot + +import ( + "encoding/json" + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" + "github.com/samber/lo" +) + +func parseCompactionLine(line string) logTokenEntry { + var entry logTokenEntry + + if len(line) >= 24 { + if t, err := time.Parse("2006-01-02T15:04:05.000Z", line[:24]); err == nil { + entry.Timestamp = t + } + } + + parenStart := strings.Index(line, "(") + parenEnd := strings.Index(line, " tokens)") + if parenStart >= 0 && parenEnd > parenStart { + inner := line[parenStart+1 : parenEnd] + parts := strings.Split(inner, "/") + if len(parts) == 2 { + fmt.Sscanf(parts[0], "%d", &entry.Used) + fmt.Sscanf(parts[1], "%d", &entry.Total) + } + } + + return entry +} + +func sortCompactionEntries(entries []logTokenEntry) { + sort.SliceStable(entries, func(i, j int) bool { + ti := entries[i].Timestamp + tj := entries[j].Timestamp + switch { + case ti.IsZero() && tj.IsZero(): + return entries[i].Used < entries[j].Used + case ti.IsZero(): + return false + case tj.IsZero(): + return true + default: + return ti.Before(tj) + } + }) +} + +func newestCompactionEntry(entries []logTokenEntry) (logTokenEntry, bool) { + if len(entries) == 0 { + return logTokenEntry{}, false + } + best := entries[0] + for _, te := range entries[1:] { + if best.Timestamp.IsZero() && !te.Timestamp.IsZero() { + best = te + continue + } + if !best.Timestamp.IsZero() && te.Timestamp.IsZero() { + continue + } + if !te.Timestamp.IsZero() && te.Timestamp.After(best.Timestamp) { + best = te + continue + } + if best.Timestamp.Equal(te.Timestamp) && te.Used > best.Used { + best = te + } + } + return best, true +} + +func parseSimpleYAML(content string) map[string]string { + result := make(map[string]string) + for _, line := range strings.Split(content, "\n") { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + idx := strings.Index(line, ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(line[:idx]) + val := strings.TrimSpace(line[idx+1:]) + result[key] = val + } + return result +} + +func storeSeries(snap *core.UsageSnapshot, key string, m map[string]float64) { + if len(m) > 0 { + snap.DailySeries[key] = core.SortedTimePoints(m) + } +} + +func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { + if value <= 0 { + return + } + v := value + snap.Metrics[key] = core.Metric{ + Used: &v, + Unit: unit, + Window: window, + } +} + +func dayForSession(createdAt, updatedAt time.Time) string { + if !updatedAt.IsZero() { + return updatedAt.Format("2006-01-02") + } + if !createdAt.IsZero() { + return createdAt.Format("2006-01-02") + } + return "" +} + +func latestSeriesValue(m map[string]float64) (string, float64) { + if len(m) == 0 { + return "", 0 + } + dates := lo.Keys(m) + sort.Strings(dates) + last := dates[len(dates)-1] + return last, m[last] +} + +func sumLastNDays(m map[string]float64, days int) float64 { + if len(m) == 0 || days <= 0 { + return 0 + } + date, _ := latestSeriesValue(m) + if date == "" { + return 0 + } + end, err := time.Parse("2006-01-02", date) + if err != nil { + return 0 + } + start := end.AddDate(0, 0, -(days - 1)) + sum := 0.0 + for d, v := range m { + t, err := time.Parse("2006-01-02", d) + if err != nil { + continue + } + if !t.Before(start) && !t.After(end) { + sum += v + } + } + return sum +} + +func topModelNames(tokenMap map[string]float64, messageMap map[string]int, limit int) []string { + type row struct { + model string + tokens float64 + messages int + } + + seen := make(map[string]bool) + var rows []row + for model, tokens := range tokenMap { + seen[model] = true + rows = append(rows, row{model: model, tokens: tokens, messages: messageMap[model]}) + } + for model, messages := range messageMap { + if seen[model] { + continue + } + rows = append(rows, row{model: model, messages: messages}) + } + + sort.Slice(rows, func(i, j int) bool { + if rows[i].tokens == rows[j].tokens { + if rows[i].messages == rows[j].messages { + return rows[i].model < rows[j].model + } + return rows[i].messages > rows[j].messages + } + return rows[i].tokens > rows[j].tokens + }) + + if limit > 0 && len(rows) > limit { + rows = rows[:limit] + } + return lo.Map(rows, func(r row, _ int) string { return r.model }) +} + +func topCopilotClientNames(tokenMap map[string]float64, sessionMap, messageMap map[string]int, limit int) []string { + type row struct { + client string + tokens float64 + sessions int + messages int + } + + seen := make(map[string]bool) + var rows []row + for client, tokens := range tokenMap { + seen[client] = true + rows = append(rows, row{ + client: client, + tokens: tokens, + sessions: sessionMap[client], + messages: messageMap[client], + }) + } + for client, sessions := range sessionMap { + if seen[client] { + continue + } + seen[client] = true + rows = append(rows, row{ + client: client, + sessions: sessions, + messages: messageMap[client], + }) + } + for client, messages := range messageMap { + if seen[client] { + continue + } + rows = append(rows, row{ + client: client, + messages: messages, + }) + } + + sort.Slice(rows, func(i, j int) bool { + if rows[i].tokens == rows[j].tokens { + if rows[i].sessions == rows[j].sessions { + if rows[i].messages == rows[j].messages { + return rows[i].client < rows[j].client + } + return rows[i].messages > rows[j].messages + } + return rows[i].sessions > rows[j].sessions + } + return rows[i].tokens > rows[j].tokens + }) + + if limit > 0 && len(rows) > limit { + rows = rows[:limit] + } + return lo.Map(rows, func(r row, _ int) string { return r.client }) +} + +func normalizeCopilotClient(repo, cwd string) string { + repo = strings.TrimSpace(repo) + if repo != "" && repo != "." { + return repo + } + + cwd = strings.TrimSpace(cwd) + if cwd != "" { + base := strings.TrimSpace(filepath.Base(cwd)) + if base != "" && base != "." && base != string(filepath.Separator) { + return base + } + } + + return "cli" +} + +func formatCopilotClientUsage(clients []string, labels map[string]string, tokens map[string]float64, sessions map[string]int) string { + if len(clients) == 0 { + return "" + } + + parts := make([]string, 0, len(clients)) + for _, client := range clients { + label := labels[client] + if label == "" { + label = client + } + + value := tokens[client] + sessionCount := sessions[client] + + item := fmt.Sprintf("%s %s tok", label, formatCopilotTokenCount(value)) + if sessionCount > 0 { + item += fmt.Sprintf(" · %d sess", sessionCount) + } + parts = append(parts, item) + } + return strings.Join(parts, ", ") +} + +func formatCopilotTokenCount(value float64) string { return shared.FormatTokenCountF(value) } + +func parseDayFromTimestamp(ts string) string { + t := flexParseTime(ts) + if t.IsZero() { + return "" + } + return t.Format("2006-01-02") +} + +func flexParseTime(s string) time.Time { + return shared.FlexParseTime(s) +} + +func parseCopilotTime(s string) time.Time { + return shared.FlexParseTime(s) +} + +func extractModelFromInfoMsg(msg string) string { + idx := strings.Index(msg, ": ") + if idx < 0 { + return "" + } + m := strings.TrimSpace(msg[idx+2:]) + if pIdx := strings.Index(m, " ("); pIdx >= 0 { + m = m[:pIdx] + } + return m +} + +func extractCopilotToolName(raw json.RawMessage) string { + if len(strings.TrimSpace(string(raw))) == 0 { + return "" + } + + var tool struct { + Name string `json:"name"` + ToolName string `json:"toolName"` + Tool string `json:"tool"` + } + if err := json.Unmarshal(raw, &tool); err != nil { + return "" + } + + candidates := []string{tool.Name, tool.ToolName, tool.Tool} + for _, candidate := range candidates { + candidate = strings.TrimSpace(candidate) + if candidate != "" { + return candidate + } + } + return "" +} + +func isCopilotMutatingTool(toolName string) bool { + name := strings.ToLower(strings.TrimSpace(toolName)) + if name == "" { + return false + } + return strings.Contains(name, "edit") || + strings.Contains(name, "write") || + strings.Contains(name, "create") || + strings.Contains(name, "delete") || + strings.Contains(name, "rename") || + strings.Contains(name, "move") || + strings.Contains(name, "replace") +} + +func extractCopilotToolCommand(raw json.RawMessage) string { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return "" + } + var command string + var walk func(v any) + walk = func(v any) { + if command != "" || v == nil { + return + } + switch value := v.(type) { + case map[string]any: + for key, child := range value { + k := strings.ToLower(strings.TrimSpace(key)) + if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { + if s, ok := child.(string); ok { + command = strings.TrimSpace(s) + return + } + } + } + for _, child := range value { + walk(child) + if command != "" { + return + } + } + case []any: + for _, child := range value { + walk(child) + if command != "" { + return + } + } + } + } + walk(payload) + return command +} + +func extractCopilotToolPaths(raw json.RawMessage) []string { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return nil + } + + pathHints := map[string]bool{ + "path": true, "paths": true, "file": true, "files": true, "filepath": true, "file_path": true, + "cwd": true, "dir": true, "directory": true, "target": true, "pattern": true, "glob": true, + "from": true, "to": true, "include": true, "exclude": true, + } + + candidates := make(map[string]bool) + var walk func(v any, hinted bool) + walk = func(v any, hinted bool) { + switch value := v.(type) { + case map[string]any: + for key, child := range value { + k := strings.ToLower(strings.TrimSpace(key)) + childHinted := hinted || pathHints[k] || strings.Contains(k, "path") || strings.Contains(k, "file") + walk(child, childHinted) + } + case []any: + for _, child := range value { + walk(child, hinted) + } + case string: + if !hinted { + return + } + for _, token := range extractCopilotPathTokens(value) { + candidates[token] = true + } + } + } + walk(payload, false) + + out := make([]string, 0, len(candidates)) + for c := range candidates { + out = append(out, c) + } + sort.Strings(out) + return out +} + +func extractCopilotPathTokens(raw string) []string { + raw = strings.TrimSpace(raw) + if raw == "" { + return nil + } + fields := strings.Fields(raw) + if len(fields) == 0 { + fields = []string{raw} + } + + var out []string + for _, field := range fields { + token := strings.Trim(field, "\"'`()[]{}<>,:;") + if token == "" { + continue + } + lower := strings.ToLower(token) + if strings.HasPrefix(lower, "http://") || strings.HasPrefix(lower, "https://") || strings.HasPrefix(lower, "file://") { + continue + } + if strings.HasPrefix(token, "-") { + continue + } + if !strings.Contains(token, "/") && !strings.Contains(token, "\\") && !strings.Contains(token, ".") { + continue + } + token = strings.TrimPrefix(token, "./") + if token == "" { + continue + } + out = append(out, token) + } + return lo.Uniq(out) +} + +func estimateCopilotToolLineDelta(raw json.RawMessage) (added int, removed int) { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return 0, 0 + } + lineCount := func(text string) int { + text = strings.TrimSpace(text) + if text == "" { + return 0 + } + return strings.Count(text, "\n") + 1 + } + var walk func(v any) + walk = func(v any) { + switch value := v.(type) { + case map[string]any: + var oldText, newText string + for _, key := range []string{"old_string", "old_text", "from", "replace"} { + if rawValue, ok := value[key]; ok { + if s, ok := rawValue.(string); ok { + oldText = s + break + } + } + } + for _, key := range []string{"new_string", "new_text", "to", "with"} { + if rawValue, ok := value[key]; ok { + if s, ok := rawValue.(string); ok { + newText = s + break + } + } + } + if oldText != "" || newText != "" { + removed += lineCount(oldText) + added += lineCount(newText) + } + if rawValue, ok := value["content"]; ok { + if s, ok := rawValue.(string); ok { + added += lineCount(s) + } + } + for _, child := range value { + walk(child) + } + case []any: + for _, child := range value { + walk(child) + } + } + } + walk(payload) + return added, removed +} + +func inferCopilotLanguageFromPath(path string) string { + p := strings.ToLower(strings.TrimSpace(path)) + if p == "" { + return "" + } + base := strings.ToLower(filepath.Base(p)) + switch base { + case "dockerfile": + return "docker" + case "makefile": + return "make" + } + switch strings.ToLower(filepath.Ext(p)) { + case ".go": + return "go" + case ".py": + return "python" + case ".ts", ".tsx": + return "typescript" + case ".js", ".jsx": + return "javascript" + case ".tf", ".tfvars", ".hcl": + return "terraform" + case ".sh", ".bash", ".zsh", ".fish": + return "shell" + case ".md", ".mdx": + return "markdown" + case ".json": + return "json" + case ".yml", ".yaml": + return "yaml" + case ".sql": + return "sql" + case ".rs": + return "rust" + case ".java": + return "java" + case ".c", ".h": + return "c" + case ".cc", ".cpp", ".cxx", ".hpp": + return "cpp" + case ".rb": + return "ruby" + case ".php": + return "php" + case ".swift": + return "swift" + case ".vue": + return "vue" + case ".svelte": + return "svelte" + case ".toml": + return "toml" + case ".xml": + return "xml" + } + return "" +} + +func formatModelMap(m map[string]int, unit string) string { + if len(m) == 0 { + return "" + } + parts := make([]string, 0, len(m)) + for model, count := range m { + parts = append(parts, fmt.Sprintf("%s: %d %s", model, count, unit)) + } + sort.Strings(parts) + return strings.Join(parts, ", ") +} + +func formatModelMapPlain(m map[string]int) string { + if len(m) == 0 { + return "" + } + parts := make([]string, 0, len(m)) + for model, count := range m { + parts = append(parts, fmt.Sprintf("%s: %d", model, count)) + } + sort.Strings(parts) + return strings.Join(parts, ", ") +} + +func setRawInt(snap *core.UsageSnapshot, key string, v int) { + if v > 0 { + snap.Raw[key] = fmt.Sprintf("%d", v) + } +} + +func setRawStr(snap *core.UsageSnapshot, key, v string) { + if v != "" { + snap.Raw[key] = v + } +} + +func sanitizeMetricName(name string) string { + name = strings.ToLower(strings.TrimSpace(name)) + if name == "" { + return "unknown" + } + + var b strings.Builder + lastUnderscore := false + for _, r := range name { + switch { + case r >= 'a' && r <= 'z': + b.WriteRune(r) + lastUnderscore = false + case r >= '0' && r <= '9': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore { + b.WriteByte('_') + lastUnderscore = true + } + } + } + out := strings.Trim(b.String(), "_") + if out == "" { + return "unknown" + } + return out +} From ada047076f622d42c5316a9e2a7238b59f49a687 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 18:53:06 +0100 Subject: [PATCH 24/32] refactor: cache tile composition and split claude conversations --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 11 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 7 +- internal/providers/claude_code/claude_code.go | 819 ----------------- .../claude_code/conversation_usage.go | 829 ++++++++++++++++++ internal/tui/model.go | 2 + internal/tui/model_input.go | 2 + internal/tui/settings_modal.go | 145 --- internal/tui/settings_modal_preview.go | 148 ++++ internal/tui/tiles.go | 197 +---- internal/tui/tiles_cache.go | 269 ++++++ 10 files changed, 1267 insertions(+), 1162 deletions(-) create mode 100644 internal/providers/claude_code/conversation_usage.go create mode 100644 internal/tui/settings_modal_preview.go create mode 100644 internal/tui/tiles_cache.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index ef6d0d0..ae7fdaa 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -64,21 +64,24 @@ This table captures every issue found in this pass. It is broad and high-signal, | R44 | Fixed | Claude Code local file/helper split and settings modal layout split | `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/local_files.go`, `internal/providers/claude_code/local_helpers.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go` | Claude Code local readers and generic helper logic are split out of the main provider file, and the settings modal layout/render wrapper no longer lives inline with all modal state/input handling. | Continue with deeper conversation-aggregation extraction in Claude Code and more TUI render-section splits. | | R45 | Fixed | Copilot GitHub API split | `internal/providers/copilot/copilot.go`, `internal/providers/copilot/api_data.go` | Copilot's GitHub API fetch, quota projection, and org metrics flow now live in a dedicated file instead of sharing the same unit as local config/log/session parsing. | Continue splitting the remaining local projection/helpers out of the main provider file. | | R46 | Fixed | Copilot local config/log/session split | `internal/providers/copilot/copilot.go`, `internal/providers/copilot/local_data.go`, `internal/providers/copilot/local_helpers.go` | Copilot local config loading, log/session readers, and local parsing/projection helpers now live outside the main provider file. The coordinator file is reduced to provider setup, fetch orchestration, and status/metric selection helpers. | Keep future Copilot local-data work inside the dedicated helper units instead of re-growing the coordinator. | +| R47 | Fixed | Claude Code conversation aggregation split | `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/conversation_usage.go` | Claude Code's JSONL conversation aggregation, block-window estimation, and local tool/session projection no longer live inline with provider setup and API plumbing. The main provider file is now mostly provider wiring and API-side flow. | Keep future conversation-record projections in the dedicated conversation unit. | +| R48 | Fixed | Tile render-path derivation caching | `internal/tui/model.go`, `internal/tui/model_input.go`, `internal/tui/tiles.go`, `internal/tui/tiles_cache.go` | Tile body derivation is now cached per snapshot/update state instead of rebuilding the full composition section stack on every render frame. Dynamic header and reset animation still render live, while static body composition is reused until snapshots or size change. | Apply the same pattern selectively to detail/analytics only where profiling or repeated drift justifies it. | +| R49 | Fixed | Settings modal preview-data split | `internal/tui/settings_modal.go`, `internal/tui/settings_modal_preview.go` | The large preview snapshot fixture for widget-section configuration moved out of the main settings modal behavior file, reducing render/input coupling inside `settings_modal.go`. | Continue moving purely preview/demo helpers out of modal behavior files. | ## Action Table | ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | -| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, and settings modal layout is separated, but TUI state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | +| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/tui/settings_modal_preview.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, tile-body composition is cached, and settings preview/layout pieces are separated, but TUI state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Composition bars, provider tile fallback/rate-limit selection, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/claude_code/claude_code.go` | Cursor, OpenRouter, Codex, and Copilot are now materially decomposed, and Claude Code has local-reader/helper splits, but several providers still combine large parsing/projection flows in very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, telemetry helpers, and the remaining Claude Code conversation aggregation path. | Smaller diffs, less drift risk, and easier provider-specific testing. | +| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go` | Cursor, OpenRouter, Codex, Copilot, and Claude Code are now materially decomposed, but several providers still combine large parsing/projection flows in very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | -| A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/tui/tiles_composition.go` | TUI logic is split across more focused files now, but several files are still individually very large and still mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | -| A15 | P3 | Performance optimization opportunity in render path | `internal/tui/model.go:441-450`, `internal/tui/tiles_composition.go:302-322`, `internal/tui/detail.go:752-1046`, `internal/tui/analytics.go:663-729` | The UI recomputes display/composition structures from raw metric maps repeatedly during rendering. It is correct, but the work is duplicated across views and frames. | Cache derived display/composition sections per snapshot update instead of rebuilding them in each view path. | Lower render cost and less duplicated parsing logic. | +| A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/tui/settings_modal_preview.go`, `internal/tui/tiles_composition.go` | TUI logic is split across more focused files now, but several files are still individually large and still mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | +| A15 | P3 | Performance optimization follow-through in render path | `internal/tui/model.go`, `internal/tui/tiles.go`, `internal/tui/tiles_cache.go`, `internal/tui/detail.go`, `internal/tui/analytics.go` | Tile body composition is now cached per snapshot/update state, but detail and analytics still rebuild some derived structures on each render path. | Extend caching only to the remaining high-cost detail/analytics derivations if profiling or repeated churn justifies it. | Lower render cost without over-caching the whole UI. | ## Suggested Execution Order diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index d2f6f0c..38c5a59 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -20,7 +20,7 @@ These were major concerns in earlier reviews and are now materially addressed: - Cursor parser/SQLite duplication across dashboard and telemetry paths. - Codex and Claude Code raw parser duplication. - Codex live/session flow concentrated in one provider file. -- Claude Code local file readers and model-summary helpers concentrated in one provider file. +- Claude Code local file readers, model-summary helpers, and conversation aggregation concentrated in one provider file. - Copilot GitHub API fetch/quota/org-metrics flow concentrated in the same file as local log/session parsing. - Copilot local config/log/session parsing concentrated in the same file as provider orchestration. - OpenRouter provider-resolution, analytics, generation, projection, and account-path monolith sprawl. @@ -34,7 +34,7 @@ These were major concerns in earlier reviews and are now materially addressed: ### 1. [P2] TUI rendering and state handling are still concentrated in a few very large files -The TUI is much better than before, and provider tile display-summary logic no longer lives inline in `model.go`, while the settings modal layout wrapper now lives in its own file. But [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go), and the remaining settings modal render sections are still large enough that unrelated concerns move together. +The TUI is much better than before, and provider tile display-summary logic no longer lives inline in `model.go`, while the settings modal layout wrapper now lives in its own file. Tile-body derivation is cached now as well. But [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go), and the remaining settings modal render sections are still large enough that unrelated concerns move together. Refs: - [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go) @@ -80,7 +80,7 @@ What to address: ### 4. [P2] Several providers are still large mixed-responsibility units -Cursor, OpenRouter, Codex, and Copilot are now in much better shape, Claude Code has started the same split, but several providers still remain monoliths that mix transport, parsing, normalization, and projection in one place. +Cursor, OpenRouter, Codex, Copilot, and Claude Code are now in much better shape, but several providers still remain monoliths that mix transport, parsing, normalization, and projection in one place. Refs: - [ollama.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/ollama.go) @@ -93,6 +93,7 @@ Refs: - [claude_code.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/claude_code.go) - [local_files.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_files.go) - [local_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_helpers.go) +- [conversation_usage.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/conversation_usage.go) What to address: - Split by concern, not by arbitrary line count: diff --git a/internal/providers/claude_code/claude_code.go b/internal/providers/claude_code/claude_code.go index fe3359b..5cac350 100644 --- a/internal/providers/claude_code/claude_code.go +++ b/internal/providers/claude_code/claude_code.go @@ -3,17 +3,14 @@ package claude_code import ( "context" "fmt" - "math" "os" "path/filepath" - "sort" "strings" "sync" "time" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" - "github.com/samber/lo" ) type Provider struct { @@ -394,819 +391,3 @@ func (p *Provider) setCachedUsage(u *usageResponse) { defer p.mu.Unlock() p.usageAPICache = u } - -func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, snap *core.UsageSnapshot) error { - jsonlFiles := collectJSONLFiles(projectsDir) - if altProjectsDir != "" { - jsonlFiles = append(jsonlFiles, collectJSONLFiles(altProjectsDir)...) - } - jsonlFiles = lo.Uniq(lo.Compact(jsonlFiles)) - sort.Strings(jsonlFiles) - - if len(jsonlFiles) == 0 { - return fmt.Errorf("no JSONL conversation files found") - } - - snap.Raw["jsonl_files_found"] = fmt.Sprintf("%d", len(jsonlFiles)) - - now := time.Now() - today := now.Format("2006-01-02") - todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) - weekStart := now.Add(-7 * 24 * time.Hour) - - var ( - todayCostUSD float64 - todayInputTokens int - todayOutputTokens int - todayCacheRead int - todayCacheCreate int - todayMessages int - todayModels = make(map[string]bool) - - weeklyCostUSD float64 - weeklyInputTokens int - weeklyOutputTokens int - weeklyMessages int - - currentBlockStart time.Time - currentBlockEnd time.Time - blockCostUSD float64 - blockInputTokens int - blockOutputTokens int - blockCacheRead int - blockCacheCreate int - blockMessages int - blockModels = make(map[string]bool) - inCurrentBlock bool - - allTimeCostUSD float64 - allTimeEntries int - ) - - blockStartCandidates := []time.Time{} - - var allUsages []conversationRecord - modelTotals := make(map[string]*modelUsageTotals) - clientTotals := make(map[string]*modelUsageTotals) - projectTotals := make(map[string]*modelUsageTotals) - agentTotals := make(map[string]*modelUsageTotals) - serviceTierTotals := make(map[string]float64) - inferenceGeoTotals := make(map[string]float64) - toolUsageCounts := make(map[string]int) - languageUsageCounts := make(map[string]int) - changedFiles := make(map[string]bool) - seenCommitCommands := make(map[string]bool) - clientSessions := make(map[string]map[string]bool) - projectSessions := make(map[string]map[string]bool) - agentSessions := make(map[string]map[string]bool) - seenUsageKeys := make(map[string]bool) - seenToolKeys := make(map[string]bool) - dailyClientTokens := make(map[string]map[string]float64) - dailyTokenTotals := make(map[string]int) - dailyMessages := make(map[string]int) - dailyCost := make(map[string]float64) - dailyModelTokens := make(map[string]map[string]int) - todaySessions := make(map[string]bool) - weeklySessions := make(map[string]bool) - var ( - todayCacheCreate5m int - todayCacheCreate1h int - todayReasoning int - todayToolCalls int - todayWebSearch int - todayWebFetch int - weeklyCacheRead int - weeklyCacheCreate int - weeklyCacheCreate5m int - weeklyCacheCreate1h int - weeklyReasoning int - weeklyToolCalls int - weeklyWebSearch int - weeklyWebFetch int - allTimeInputTokens int - allTimeOutputTokens int - allTimeCacheRead int - allTimeCacheCreate int - allTimeCacheCreate5m int - allTimeCacheCreate1h int - allTimeReasoning int - allTimeToolCalls int - allTimeWebSearch int - allTimeWebFetch int - allTimeLinesAdded int - allTimeLinesRemoved int - allTimeCommitCount int - ) - - ensureTotals := func(m map[string]*modelUsageTotals, key string) *modelUsageTotals { - if _, ok := m[key]; !ok { - m[key] = &modelUsageTotals{} - } - return m[key] - } - ensureSessionSet := func(m map[string]map[string]bool, key string) map[string]bool { - if _, ok := m[key]; !ok { - m[key] = make(map[string]bool) - } - return m[key] - } - normalizeAgent := func(path string) string { - if strings.Contains(path, string(filepath.Separator)+"subagents"+string(filepath.Separator)) { - return "subagents" - } - return "main" - } - normalizeProject := func(cwd, sourcePath string) string { - if cwd != "" { - base := filepath.Base(cwd) - if base != "" && base != "." && base != string(filepath.Separator) { - return sanitizeModelName(base) - } - return sanitizeModelName(cwd) - } - dir := filepath.Base(filepath.Dir(sourcePath)) - if dir == "" || dir == "." { - return "unknown" - } - return sanitizeModelName(dir) - } - for _, fpath := range jsonlFiles { - allUsages = append(allUsages, parseConversationRecords(fpath)...) - } - - sort.Slice(allUsages, func(i, j int) bool { - return allUsages[i].timestamp.Before(allUsages[j].timestamp) - }) - - seenForBlock := make(map[string]bool) - for _, u := range allUsages { - if u.usage == nil { - continue - } - key := conversationUsageDedupKey(u) - if key != "" { - if seenForBlock[key] { - continue - } - seenForBlock[key] = true - } - if currentBlockEnd.IsZero() || u.timestamp.After(currentBlockEnd) { - currentBlockStart = floorToHour(u.timestamp) - currentBlockEnd = currentBlockStart.Add(billingBlockDuration) - blockStartCandidates = append(blockStartCandidates, currentBlockStart) - } - } - - inCurrentBlock = false - if !currentBlockEnd.IsZero() && now.Before(currentBlockEnd) && (now.Equal(currentBlockStart) || now.After(currentBlockStart)) { - inCurrentBlock = true - } - - for _, u := range allUsages { - for idx, item := range u.content { - if item.Type != "tool_use" { - continue - } - toolKey := conversationToolDedupKey(u, idx, item) - if seenToolKeys[toolKey] { - continue - } - seenToolKeys[toolKey] = true - toolName := strings.ToLower(strings.TrimSpace(item.Name)) - if toolName == "" { - toolName = "unknown" - } - toolUsageCounts[toolName]++ - allTimeToolCalls++ - - pathCandidates := extractToolPathCandidates(item.Input) - for _, candidate := range pathCandidates { - if lang := inferLanguageFromPath(candidate); lang != "" { - languageUsageCounts[lang]++ - } - if isMutatingTool(toolName) { - changedFiles[candidate] = true - } - } - if isMutatingTool(toolName) { - added, removed := estimateToolLineDelta(toolName, item.Input) - allTimeLinesAdded += added - allTimeLinesRemoved += removed - } - if cmd := extractToolCommand(item.Input); cmd != "" && strings.Contains(strings.ToLower(cmd), "git commit") { - if !seenCommitCommands[cmd] { - seenCommitCommands[cmd] = true - allTimeCommitCount++ - } - } - - if u.timestamp.After(todayStart) || u.timestamp.Equal(todayStart) { - todayToolCalls++ - } - if u.timestamp.After(weekStart) || u.timestamp.Equal(weekStart) { - weeklyToolCalls++ - } - } - - if u.usage == nil { - continue - } - usageKey := conversationUsageDedupKey(u) - if usageKey != "" && seenUsageKeys[usageKey] { - continue - } - if usageKey != "" { - seenUsageKeys[usageKey] = true - } - - modelID := sanitizeModelName(u.model) - modelTotalsEntry := ensureTotals(modelTotals, modelID) - projectID := normalizeProject(u.cwd, u.sourcePath) - clientID := projectID - clientTotalsEntry := ensureTotals(clientTotals, clientID) - projectTotalsEntry := ensureTotals(projectTotals, projectID) - agentID := normalizeAgent(u.sourcePath) - agentTotalsEntry := ensureTotals(agentTotals, agentID) - - if u.sessionID != "" { - ensureSessionSet(clientSessions, clientID)[u.sessionID] = true - ensureSessionSet(projectSessions, projectID)[u.sessionID] = true - ensureSessionSet(agentSessions, agentID)[u.sessionID] = true - if u.timestamp.After(todayStart) || u.timestamp.Equal(todayStart) { - todaySessions[u.sessionID] = true - } - if u.timestamp.After(weekStart) || u.timestamp.Equal(weekStart) { - weeklySessions[u.sessionID] = true - } - } - - cost := estimateCost(u.model, u.usage) - allTimeCostUSD += cost - allTimeEntries++ - modelTotalsEntry.input += float64(u.usage.InputTokens) - modelTotalsEntry.output += float64(u.usage.OutputTokens) - modelTotalsEntry.cached += float64(u.usage.CacheReadInputTokens) - modelTotalsEntry.cacheCreate += float64(u.usage.CacheCreationInputTokens) - modelTotalsEntry.reasoning += float64(u.usage.ReasoningTokens) - modelTotalsEntry.cost += cost - if u.usage.CacheCreation != nil { - modelTotalsEntry.cache5m += float64(u.usage.CacheCreation.Ephemeral5mInputTokens) - modelTotalsEntry.cache1h += float64(u.usage.CacheCreation.Ephemeral1hInputTokens) - allTimeCacheCreate5m += u.usage.CacheCreation.Ephemeral5mInputTokens - allTimeCacheCreate1h += u.usage.CacheCreation.Ephemeral1hInputTokens - } - if u.usage.ServerToolUse != nil { - modelTotalsEntry.webSearch += float64(u.usage.ServerToolUse.WebSearchRequests) - modelTotalsEntry.webFetch += float64(u.usage.ServerToolUse.WebFetchRequests) - } - - tokenVolume := float64(u.usage.InputTokens + u.usage.OutputTokens + u.usage.CacheReadInputTokens + u.usage.CacheCreationInputTokens + u.usage.ReasoningTokens) - clientTotalsEntry.input += float64(u.usage.InputTokens) - clientTotalsEntry.output += float64(u.usage.OutputTokens) - clientTotalsEntry.cached += float64(u.usage.CacheReadInputTokens) - clientTotalsEntry.cacheCreate += float64(u.usage.CacheCreationInputTokens) - clientTotalsEntry.reasoning += float64(u.usage.ReasoningTokens) - clientTotalsEntry.cost += cost - clientTotalsEntry.sessions = float64(len(clientSessions[clientID])) - - projectTotalsEntry.input += float64(u.usage.InputTokens) - projectTotalsEntry.output += float64(u.usage.OutputTokens) - projectTotalsEntry.cached += float64(u.usage.CacheReadInputTokens) - projectTotalsEntry.cacheCreate += float64(u.usage.CacheCreationInputTokens) - projectTotalsEntry.reasoning += float64(u.usage.ReasoningTokens) - projectTotalsEntry.cost += cost - projectTotalsEntry.sessions = float64(len(projectSessions[projectID])) - - agentTotalsEntry.input += float64(u.usage.InputTokens) - agentTotalsEntry.output += float64(u.usage.OutputTokens) - agentTotalsEntry.cached += float64(u.usage.CacheReadInputTokens) - agentTotalsEntry.cacheCreate += float64(u.usage.CacheCreationInputTokens) - agentTotalsEntry.reasoning += float64(u.usage.ReasoningTokens) - agentTotalsEntry.cost += cost - agentTotalsEntry.sessions = float64(len(agentSessions[agentID])) - - allTimeInputTokens += u.usage.InputTokens - allTimeOutputTokens += u.usage.OutputTokens - allTimeCacheRead += u.usage.CacheReadInputTokens - allTimeCacheCreate += u.usage.CacheCreationInputTokens - allTimeReasoning += u.usage.ReasoningTokens - if u.usage.ServerToolUse != nil { - allTimeWebSearch += u.usage.ServerToolUse.WebSearchRequests - allTimeWebFetch += u.usage.ServerToolUse.WebFetchRequests - } - - day := u.timestamp.Format("2006-01-02") - dailyTokenTotals[day] += u.usage.InputTokens + u.usage.OutputTokens - dailyMessages[day]++ - dailyCost[day] += cost - if dailyModelTokens[day] == nil { - dailyModelTokens[day] = make(map[string]int) - } - dailyModelTokens[day][u.model] += u.usage.InputTokens + u.usage.OutputTokens - if dailyClientTokens[day] == nil { - dailyClientTokens[day] = make(map[string]float64) - } - dailyClientTokens[day][clientID] += tokenVolume - - if tier := strings.ToLower(strings.TrimSpace(u.usage.ServiceTier)); tier != "" { - serviceTierTotals[tier] += tokenVolume - } - if geo := strings.ToLower(strings.TrimSpace(u.usage.InferenceGeo)); geo != "" { - inferenceGeoTotals[geo] += tokenVolume - } - - if u.timestamp.After(todayStart) || u.timestamp.Equal(todayStart) { - todayCostUSD += cost - todayInputTokens += u.usage.InputTokens - todayOutputTokens += u.usage.OutputTokens - todayCacheRead += u.usage.CacheReadInputTokens - todayCacheCreate += u.usage.CacheCreationInputTokens - todayReasoning += u.usage.ReasoningTokens - if u.usage.CacheCreation != nil { - todayCacheCreate5m += u.usage.CacheCreation.Ephemeral5mInputTokens - todayCacheCreate1h += u.usage.CacheCreation.Ephemeral1hInputTokens - } - if u.usage.ServerToolUse != nil { - todayWebSearch += u.usage.ServerToolUse.WebSearchRequests - todayWebFetch += u.usage.ServerToolUse.WebFetchRequests - } - todayMessages++ - todayModels[modelID] = true - } - - if u.timestamp.After(weekStart) || u.timestamp.Equal(weekStart) { - weeklyCostUSD += cost - weeklyInputTokens += u.usage.InputTokens - weeklyOutputTokens += u.usage.OutputTokens - weeklyCacheRead += u.usage.CacheReadInputTokens - weeklyCacheCreate += u.usage.CacheCreationInputTokens - weeklyReasoning += u.usage.ReasoningTokens - if u.usage.CacheCreation != nil { - weeklyCacheCreate5m += u.usage.CacheCreation.Ephemeral5mInputTokens - weeklyCacheCreate1h += u.usage.CacheCreation.Ephemeral1hInputTokens - } - if u.usage.ServerToolUse != nil { - weeklyWebSearch += u.usage.ServerToolUse.WebSearchRequests - weeklyWebFetch += u.usage.ServerToolUse.WebFetchRequests - } - weeklyMessages++ - } - - if inCurrentBlock && (u.timestamp.After(currentBlockStart) || u.timestamp.Equal(currentBlockStart)) && u.timestamp.Before(currentBlockEnd) { - blockCostUSD += cost - blockInputTokens += u.usage.InputTokens - blockOutputTokens += u.usage.OutputTokens - blockCacheRead += u.usage.CacheReadInputTokens - blockCacheCreate += u.usage.CacheCreationInputTokens - blockMessages++ - blockModels[modelID] = true - } - } - - for model, totals := range modelTotals { - modelPrefix := "model_" + model - setMetricMax(snap, modelPrefix+"_input_tokens", totals.input, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_output_tokens", totals.output, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cached_tokens", totals.cached, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cache_creation_tokens", totals.cacheCreate, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cache_creation_5m_tokens", totals.cache5m, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cache_creation_1h_tokens", totals.cache1h, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_reasoning_tokens", totals.reasoning, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_web_search_requests", totals.webSearch, "requests", "all-time estimate") - setMetricMax(snap, modelPrefix+"_web_fetch_requests", totals.webFetch, "requests", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cost_usd", totals.cost, "USD", "all-time estimate") - } - - for client, totals := range clientTotals { - key := "client_" + client - setMetricMax(snap, key+"_input_tokens", totals.input, "tokens", "all-time") - setMetricMax(snap, key+"_output_tokens", totals.output, "tokens", "all-time") - setMetricMax(snap, key+"_cached_tokens", totals.cached, "tokens", "all-time") - setMetricMax(snap, key+"_reasoning_tokens", totals.reasoning, "tokens", "all-time") - setMetricMax(snap, key+"_total_tokens", totals.input+totals.output+totals.cached+totals.cacheCreate+totals.reasoning, "tokens", "all-time") - setMetricMax(snap, key+"_sessions", totals.sessions, "sessions", "all-time") - } - - if snap.DailySeries == nil { - snap.DailySeries = make(map[string][]core.TimePoint) - } - dates := lo.Keys(dailyTokenTotals) - sort.Strings(dates) - - if len(snap.DailySeries["messages"]) == 0 && len(dates) > 0 { - for _, d := range dates { - snap.DailySeries["messages"] = append(snap.DailySeries["messages"], core.TimePoint{Date: d, Value: float64(dailyMessages[d])}) - snap.DailySeries["tokens_total"] = append(snap.DailySeries["tokens_total"], core.TimePoint{Date: d, Value: float64(dailyTokenTotals[d])}) - snap.DailySeries["cost"] = append(snap.DailySeries["cost"], core.TimePoint{Date: d, Value: dailyCost[d]}) - } - - allModels := make(map[string]int64) - for _, dm := range dailyModelTokens { - for model, tokens := range dm { - allModels[model] += int64(tokens) - } - } - type mVol struct { - name string - total int64 - } - var mv []mVol - for m, t := range allModels { - mv = append(mv, mVol{m, t}) - } - sort.Slice(mv, func(i, j int) bool { return mv[i].total > mv[j].total }) - limit := 5 - if len(mv) < limit { - limit = len(mv) - } - for i := 0; i < limit; i++ { - model := mv[i].name - key := fmt.Sprintf("tokens_%s", sanitizeModelName(model)) - for _, d := range dates { - tokens := dailyModelTokens[d][model] - snap.DailySeries[key] = append(snap.DailySeries[key], - core.TimePoint{Date: d, Value: float64(tokens)}) - } - } - } - - if len(dates) > 0 { - clientNames := make(map[string]bool) - for _, byClient := range dailyClientTokens { - for client := range byClient { - clientNames[client] = true - } - } - for client := range clientNames { - key := "tokens_client_" + client - for _, d := range dates { - snap.DailySeries[key] = append(snap.DailySeries[key], core.TimePoint{ - Date: d, - Value: dailyClientTokens[d][client], - }) - } - } - } - - if todayCostUSD > 0 { - snap.Metrics["today_api_cost"] = core.Metric{ - Used: core.Float64Ptr(todayCostUSD), - Unit: "USD", - Window: "since midnight", - } - } - if todayInputTokens > 0 { - in := float64(todayInputTokens) - snap.Metrics["today_input_tokens"] = core.Metric{ - Used: &in, - Unit: "tokens", - Window: "since midnight", - } - } - if todayOutputTokens > 0 { - out := float64(todayOutputTokens) - snap.Metrics["today_output_tokens"] = core.Metric{ - Used: &out, - Unit: "tokens", - Window: "since midnight", - } - } - if todayCacheRead > 0 { - cacheRead := float64(todayCacheRead) - snap.Metrics["today_cache_read_tokens"] = core.Metric{ - Used: &cacheRead, - Unit: "tokens", - Window: "since midnight", - } - } - if todayCacheCreate > 0 { - cacheCreate := float64(todayCacheCreate) - snap.Metrics["today_cache_create_tokens"] = core.Metric{ - Used: &cacheCreate, - Unit: "tokens", - Window: "since midnight", - } - } - if todayMessages > 0 { - msgs := float64(todayMessages) - setMetricMax(snap, "messages_today", msgs, "messages", "since midnight") - } - if len(todaySessions) > 0 { - setMetricMax(snap, "sessions_today", float64(len(todaySessions)), "sessions", "since midnight") - } - if todayToolCalls > 0 { - setMetricMax(snap, "tool_calls_today", float64(todayToolCalls), "calls", "since midnight") - } - if todayReasoning > 0 { - v := float64(todayReasoning) - snap.Metrics["today_reasoning_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "since midnight", - } - } - if todayCacheCreate5m > 0 { - v := float64(todayCacheCreate5m) - snap.Metrics["today_cache_create_5m_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "since midnight", - } - } - if todayCacheCreate1h > 0 { - v := float64(todayCacheCreate1h) - snap.Metrics["today_cache_create_1h_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "since midnight", - } - } - if todayWebSearch > 0 { - v := float64(todayWebSearch) - snap.Metrics["today_web_search_requests"] = core.Metric{ - Used: &v, - Unit: "requests", - Window: "since midnight", - } - } - if todayWebFetch > 0 { - v := float64(todayWebFetch) - snap.Metrics["today_web_fetch_requests"] = core.Metric{ - Used: &v, - Unit: "requests", - Window: "since midnight", - } - } - - if weeklyCostUSD > 0 { - snap.Metrics["7d_api_cost"] = core.Metric{ - Used: core.Float64Ptr(weeklyCostUSD), - Unit: "USD", - Window: "rolling 7 days", - } - } - if weeklyMessages > 0 { - wm := float64(weeklyMessages) - snap.Metrics["7d_messages"] = core.Metric{ - Used: &wm, - Unit: "messages", - Window: "rolling 7 days", - } - wIn := float64(weeklyInputTokens) - snap.Metrics["7d_input_tokens"] = core.Metric{ - Used: &wIn, - Unit: "tokens", - Window: "rolling 7 days", - } - wOut := float64(weeklyOutputTokens) - snap.Metrics["7d_output_tokens"] = core.Metric{ - Used: &wOut, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyCacheRead > 0 { - v := float64(weeklyCacheRead) - snap.Metrics["7d_cache_read_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyCacheCreate > 0 { - v := float64(weeklyCacheCreate) - snap.Metrics["7d_cache_create_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyCacheCreate5m > 0 { - v := float64(weeklyCacheCreate5m) - snap.Metrics["7d_cache_create_5m_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyCacheCreate1h > 0 { - v := float64(weeklyCacheCreate1h) - snap.Metrics["7d_cache_create_1h_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyReasoning > 0 { - v := float64(weeklyReasoning) - snap.Metrics["7d_reasoning_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyToolCalls > 0 { - setMetricMax(snap, "7d_tool_calls", float64(weeklyToolCalls), "calls", "rolling 7 days") - } - if weeklyWebSearch > 0 { - v := float64(weeklyWebSearch) - snap.Metrics["7d_web_search_requests"] = core.Metric{ - Used: &v, - Unit: "requests", - Window: "rolling 7 days", - } - } - if weeklyWebFetch > 0 { - v := float64(weeklyWebFetch) - snap.Metrics["7d_web_fetch_requests"] = core.Metric{ - Used: &v, - Unit: "requests", - Window: "rolling 7 days", - } - } - if len(weeklySessions) > 0 { - setMetricMax(snap, "7d_sessions", float64(len(weeklySessions)), "sessions", "rolling 7 days") - } - - if todayMessages > 0 { - snap.Raw["jsonl_today_date"] = today - snap.Raw["jsonl_today_messages"] = fmt.Sprintf("%d", todayMessages) - snap.Raw["jsonl_today_input_tokens"] = fmt.Sprintf("%d", todayInputTokens) - snap.Raw["jsonl_today_output_tokens"] = fmt.Sprintf("%d", todayOutputTokens) - snap.Raw["jsonl_today_cache_read_tokens"] = fmt.Sprintf("%d", todayCacheRead) - snap.Raw["jsonl_today_cache_create_tokens"] = fmt.Sprintf("%d", todayCacheCreate) - snap.Raw["jsonl_today_reasoning_tokens"] = fmt.Sprintf("%d", todayReasoning) - snap.Raw["jsonl_today_web_search_requests"] = fmt.Sprintf("%d", todayWebSearch) - snap.Raw["jsonl_today_web_fetch_requests"] = fmt.Sprintf("%d", todayWebFetch) - - models := lo.Keys(todayModels) - sort.Strings(models) - snap.Raw["jsonl_today_models"] = strings.Join(models, ", ") - } - - if inCurrentBlock { - snap.Metrics["5h_block_cost"] = core.Metric{ - Used: core.Float64Ptr(blockCostUSD), - Unit: "USD", - Window: fmt.Sprintf("%s – %s", currentBlockStart.Format("15:04"), currentBlockEnd.Format("15:04")), - } - - blockIn := float64(blockInputTokens) - snap.Metrics["5h_block_input"] = core.Metric{ - Used: &blockIn, - Unit: "tokens", - Window: "current 5h block", - } - - blockOut := float64(blockOutputTokens) - snap.Metrics["5h_block_output"] = core.Metric{ - Used: &blockOut, - Unit: "tokens", - Window: "current 5h block", - } - - blockMsgs := float64(blockMessages) - snap.Metrics["5h_block_msgs"] = core.Metric{ - Used: &blockMsgs, - Unit: "messages", - Window: "current 5h block", - } - if blockCacheRead > 0 { - setMetricMax(snap, "5h_block_cache_read_tokens", float64(blockCacheRead), "tokens", "current 5h block") - } - if blockCacheCreate > 0 { - setMetricMax(snap, "5h_block_cache_create_tokens", float64(blockCacheCreate), "tokens", "current 5h block") - } - - remaining := currentBlockEnd.Sub(now) - if remaining > 0 { - snap.Resets["billing_block"] = currentBlockEnd - snap.Raw["block_time_remaining"] = fmt.Sprintf("%s", remaining.Round(time.Minute)) - - elapsed := now.Sub(currentBlockStart) - progress := math.Min(elapsed.Seconds()/billingBlockDuration.Seconds()*100, 100) - snap.Raw["block_progress_pct"] = fmt.Sprintf("%.0f", progress) - } - - snap.Raw["block_start"] = currentBlockStart.Format(time.RFC3339) - snap.Raw["block_end"] = currentBlockEnd.Format(time.RFC3339) - - blockModelList := lo.Keys(blockModels) - sort.Strings(blockModelList) - snap.Raw["block_models"] = strings.Join(blockModelList, ", ") - - elapsed := now.Sub(currentBlockStart) - if elapsed > time.Minute && blockCostUSD > 0 { - burnRate := blockCostUSD / elapsed.Hours() - snap.Metrics["burn_rate"] = core.Metric{ - Used: core.Float64Ptr(burnRate), - Unit: "USD/h", - Window: "current 5h block", - } - snap.Raw["burn_rate"] = fmt.Sprintf("$%.2f/hour", burnRate) - } - } - - if allTimeCostUSD > 0 { - snap.Metrics["all_time_api_cost"] = core.Metric{ - Used: core.Float64Ptr(allTimeCostUSD), - Unit: "USD", - Window: "all-time estimate", - } - } - if allTimeInputTokens > 0 { - setMetricMax(snap, "all_time_input_tokens", float64(allTimeInputTokens), "tokens", "all-time estimate") - } - if allTimeOutputTokens > 0 { - setMetricMax(snap, "all_time_output_tokens", float64(allTimeOutputTokens), "tokens", "all-time estimate") - } - if allTimeCacheRead > 0 { - setMetricMax(snap, "all_time_cache_read_tokens", float64(allTimeCacheRead), "tokens", "all-time estimate") - } - if allTimeCacheCreate > 0 { - setMetricMax(snap, "all_time_cache_create_tokens", float64(allTimeCacheCreate), "tokens", "all-time estimate") - } - if allTimeCacheCreate5m > 0 { - setMetricMax(snap, "all_time_cache_create_5m_tokens", float64(allTimeCacheCreate5m), "tokens", "all-time estimate") - } - if allTimeCacheCreate1h > 0 { - setMetricMax(snap, "all_time_cache_create_1h_tokens", float64(allTimeCacheCreate1h), "tokens", "all-time estimate") - } - if allTimeReasoning > 0 { - setMetricMax(snap, "all_time_reasoning_tokens", float64(allTimeReasoning), "tokens", "all-time estimate") - } - if allTimeToolCalls > 0 { - setMetricMax(snap, "all_time_tool_calls", float64(allTimeToolCalls), "calls", "all-time estimate") - setMetricMax(snap, "tool_calls_total", float64(allTimeToolCalls), "calls", "all-time estimate") - setMetricMax(snap, "tool_completed", float64(allTimeToolCalls), "calls", "all-time estimate") - setMetricMax(snap, "tool_success_rate", 100.0, "%", "all-time estimate") - } - if len(seenUsageKeys) > 0 { - setMetricMax(snap, "total_prompts", float64(len(seenUsageKeys)), "prompts", "all-time estimate") - } - if len(changedFiles) > 0 { - setMetricMax(snap, "composer_files_changed", float64(len(changedFiles)), "files", "all-time estimate") - } - if allTimeLinesAdded > 0 { - setMetricMax(snap, "composer_lines_added", float64(allTimeLinesAdded), "lines", "all-time estimate") - } - if allTimeLinesRemoved > 0 { - setMetricMax(snap, "composer_lines_removed", float64(allTimeLinesRemoved), "lines", "all-time estimate") - } - if allTimeCommitCount > 0 { - setMetricMax(snap, "scored_commits", float64(allTimeCommitCount), "commits", "all-time estimate") - } - if allTimeLinesAdded > 0 || allTimeLinesRemoved > 0 { - hundred := 100.0 - zero := 0.0 - snap.Metrics["ai_code_percentage"] = core.Metric{ - Used: &hundred, - Remaining: &zero, - Limit: &hundred, - Unit: "%", - Window: "all-time estimate", - } - } - for lang, count := range languageUsageCounts { - if count <= 0 { - continue - } - setMetricMax(snap, "lang_"+sanitizeModelName(lang), float64(count), "requests", "all-time estimate") - } - for toolName, count := range toolUsageCounts { - if count <= 0 { - continue - } - setMetricMax(snap, "tool_"+sanitizeModelName(toolName), float64(count), "calls", "all-time estimate") - } - if allTimeWebSearch > 0 { - setMetricMax(snap, "all_time_web_search_requests", float64(allTimeWebSearch), "requests", "all-time estimate") - } - if allTimeWebFetch > 0 { - setMetricMax(snap, "all_time_web_fetch_requests", float64(allTimeWebFetch), "requests", "all-time estimate") - } - - snap.Raw["tool_usage"] = summarizeCountMap(toolUsageCounts, 6) - snap.Raw["language_usage"] = summarizeCountMap(languageUsageCounts, 8) - snap.Raw["project_usage"] = summarizeTotalsMap(projectTotals, true, 6) - snap.Raw["agent_usage"] = summarizeTotalsMap(agentTotals, false, 4) - snap.Raw["service_tier_usage"] = summarizeFloatMap(serviceTierTotals, "tok", 4) - snap.Raw["inference_geo_usage"] = summarizeFloatMap(inferenceGeoTotals, "tok", 4) - if allTimeCacheRead > 0 || allTimeCacheCreate > 0 { - snap.Raw["cache_usage"] = fmt.Sprintf("read %s · create %s (1h %s, 5m %s)", - shortTokenCount(float64(allTimeCacheRead)), - shortTokenCount(float64(allTimeCacheCreate)), - shortTokenCount(float64(allTimeCacheCreate1h)), - shortTokenCount(float64(allTimeCacheCreate5m)), - ) - } - snap.Raw["project_count"] = fmt.Sprintf("%d", len(projectTotals)) - snap.Raw["tool_count"] = fmt.Sprintf("%d", len(toolUsageCounts)) - - snap.Raw["jsonl_total_entries"] = fmt.Sprintf("%d", allTimeEntries) - snap.Raw["jsonl_total_blocks"] = fmt.Sprintf("%d", len(blockStartCandidates)) - snap.Raw["jsonl_unique_requests"] = fmt.Sprintf("%d", len(seenUsageKeys)) - buildModelUsageSummaryRaw(snap) - - return nil -} diff --git a/internal/providers/claude_code/conversation_usage.go b/internal/providers/claude_code/conversation_usage.go new file mode 100644 index 0000000..1d77e7f --- /dev/null +++ b/internal/providers/claude_code/conversation_usage.go @@ -0,0 +1,829 @@ +package claude_code + +import ( + "fmt" + "math" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/samber/lo" +) + +func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, snap *core.UsageSnapshot) error { + jsonlFiles := collectJSONLFiles(projectsDir) + if altProjectsDir != "" { + jsonlFiles = append(jsonlFiles, collectJSONLFiles(altProjectsDir)...) + } + jsonlFiles = lo.Uniq(lo.Compact(jsonlFiles)) + sort.Strings(jsonlFiles) + + if len(jsonlFiles) == 0 { + return fmt.Errorf("no JSONL conversation files found") + } + + snap.Raw["jsonl_files_found"] = fmt.Sprintf("%d", len(jsonlFiles)) + + now := time.Now() + today := now.Format("2006-01-02") + todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + weekStart := now.Add(-7 * 24 * time.Hour) + + var ( + todayCostUSD float64 + todayInputTokens int + todayOutputTokens int + todayCacheRead int + todayCacheCreate int + todayMessages int + todayModels = make(map[string]bool) + + weeklyCostUSD float64 + weeklyInputTokens int + weeklyOutputTokens int + weeklyMessages int + + currentBlockStart time.Time + currentBlockEnd time.Time + blockCostUSD float64 + blockInputTokens int + blockOutputTokens int + blockCacheRead int + blockCacheCreate int + blockMessages int + blockModels = make(map[string]bool) + inCurrentBlock bool + + allTimeCostUSD float64 + allTimeEntries int + ) + + blockStartCandidates := []time.Time{} + + var allUsages []conversationRecord + modelTotals := make(map[string]*modelUsageTotals) + clientTotals := make(map[string]*modelUsageTotals) + projectTotals := make(map[string]*modelUsageTotals) + agentTotals := make(map[string]*modelUsageTotals) + serviceTierTotals := make(map[string]float64) + inferenceGeoTotals := make(map[string]float64) + toolUsageCounts := make(map[string]int) + languageUsageCounts := make(map[string]int) + changedFiles := make(map[string]bool) + seenCommitCommands := make(map[string]bool) + clientSessions := make(map[string]map[string]bool) + projectSessions := make(map[string]map[string]bool) + agentSessions := make(map[string]map[string]bool) + seenUsageKeys := make(map[string]bool) + seenToolKeys := make(map[string]bool) + dailyClientTokens := make(map[string]map[string]float64) + dailyTokenTotals := make(map[string]int) + dailyMessages := make(map[string]int) + dailyCost := make(map[string]float64) + dailyModelTokens := make(map[string]map[string]int) + todaySessions := make(map[string]bool) + weeklySessions := make(map[string]bool) + var ( + todayCacheCreate5m int + todayCacheCreate1h int + todayReasoning int + todayToolCalls int + todayWebSearch int + todayWebFetch int + weeklyCacheRead int + weeklyCacheCreate int + weeklyCacheCreate5m int + weeklyCacheCreate1h int + weeklyReasoning int + weeklyToolCalls int + weeklyWebSearch int + weeklyWebFetch int + allTimeInputTokens int + allTimeOutputTokens int + allTimeCacheRead int + allTimeCacheCreate int + allTimeCacheCreate5m int + allTimeCacheCreate1h int + allTimeReasoning int + allTimeToolCalls int + allTimeWebSearch int + allTimeWebFetch int + allTimeLinesAdded int + allTimeLinesRemoved int + allTimeCommitCount int + ) + + ensureTotals := func(m map[string]*modelUsageTotals, key string) *modelUsageTotals { + if _, ok := m[key]; !ok { + m[key] = &modelUsageTotals{} + } + return m[key] + } + ensureSessionSet := func(m map[string]map[string]bool, key string) map[string]bool { + if _, ok := m[key]; !ok { + m[key] = make(map[string]bool) + } + return m[key] + } + normalizeAgent := func(path string) string { + if strings.Contains(path, string(filepath.Separator)+"subagents"+string(filepath.Separator)) { + return "subagents" + } + return "main" + } + normalizeProject := func(cwd, sourcePath string) string { + if cwd != "" { + base := filepath.Base(cwd) + if base != "" && base != "." && base != string(filepath.Separator) { + return sanitizeModelName(base) + } + return sanitizeModelName(cwd) + } + dir := filepath.Base(filepath.Dir(sourcePath)) + if dir == "" || dir == "." { + return "unknown" + } + return sanitizeModelName(dir) + } + for _, fpath := range jsonlFiles { + allUsages = append(allUsages, parseConversationRecords(fpath)...) + } + + sort.Slice(allUsages, func(i, j int) bool { + return allUsages[i].timestamp.Before(allUsages[j].timestamp) + }) + + seenForBlock := make(map[string]bool) + for _, u := range allUsages { + if u.usage == nil { + continue + } + key := conversationUsageDedupKey(u) + if key != "" { + if seenForBlock[key] { + continue + } + seenForBlock[key] = true + } + if currentBlockEnd.IsZero() || u.timestamp.After(currentBlockEnd) { + currentBlockStart = floorToHour(u.timestamp) + currentBlockEnd = currentBlockStart.Add(billingBlockDuration) + blockStartCandidates = append(blockStartCandidates, currentBlockStart) + } + } + + inCurrentBlock = false + if !currentBlockEnd.IsZero() && now.Before(currentBlockEnd) && (now.Equal(currentBlockStart) || now.After(currentBlockStart)) { + inCurrentBlock = true + } + + for _, u := range allUsages { + for idx, item := range u.content { + if item.Type != "tool_use" { + continue + } + toolKey := conversationToolDedupKey(u, idx, item) + if seenToolKeys[toolKey] { + continue + } + seenToolKeys[toolKey] = true + toolName := strings.ToLower(strings.TrimSpace(item.Name)) + if toolName == "" { + toolName = "unknown" + } + toolUsageCounts[toolName]++ + allTimeToolCalls++ + + pathCandidates := extractToolPathCandidates(item.Input) + for _, candidate := range pathCandidates { + if lang := inferLanguageFromPath(candidate); lang != "" { + languageUsageCounts[lang]++ + } + if isMutatingTool(toolName) { + changedFiles[candidate] = true + } + } + if isMutatingTool(toolName) { + added, removed := estimateToolLineDelta(toolName, item.Input) + allTimeLinesAdded += added + allTimeLinesRemoved += removed + } + if cmd := extractToolCommand(item.Input); cmd != "" && strings.Contains(strings.ToLower(cmd), "git commit") { + if !seenCommitCommands[cmd] { + seenCommitCommands[cmd] = true + allTimeCommitCount++ + } + } + + if u.timestamp.After(todayStart) || u.timestamp.Equal(todayStart) { + todayToolCalls++ + } + if u.timestamp.After(weekStart) || u.timestamp.Equal(weekStart) { + weeklyToolCalls++ + } + } + + if u.usage == nil { + continue + } + usageKey := conversationUsageDedupKey(u) + if usageKey != "" && seenUsageKeys[usageKey] { + continue + } + if usageKey != "" { + seenUsageKeys[usageKey] = true + } + + modelID := sanitizeModelName(u.model) + modelTotalsEntry := ensureTotals(modelTotals, modelID) + projectID := normalizeProject(u.cwd, u.sourcePath) + clientID := projectID + clientTotalsEntry := ensureTotals(clientTotals, clientID) + projectTotalsEntry := ensureTotals(projectTotals, projectID) + agentID := normalizeAgent(u.sourcePath) + agentTotalsEntry := ensureTotals(agentTotals, agentID) + + if u.sessionID != "" { + ensureSessionSet(clientSessions, clientID)[u.sessionID] = true + ensureSessionSet(projectSessions, projectID)[u.sessionID] = true + ensureSessionSet(agentSessions, agentID)[u.sessionID] = true + if u.timestamp.After(todayStart) || u.timestamp.Equal(todayStart) { + todaySessions[u.sessionID] = true + } + if u.timestamp.After(weekStart) || u.timestamp.Equal(weekStart) { + weeklySessions[u.sessionID] = true + } + } + + cost := estimateCost(u.model, u.usage) + allTimeCostUSD += cost + allTimeEntries++ + modelTotalsEntry.input += float64(u.usage.InputTokens) + modelTotalsEntry.output += float64(u.usage.OutputTokens) + modelTotalsEntry.cached += float64(u.usage.CacheReadInputTokens) + modelTotalsEntry.cacheCreate += float64(u.usage.CacheCreationInputTokens) + modelTotalsEntry.reasoning += float64(u.usage.ReasoningTokens) + modelTotalsEntry.cost += cost + if u.usage.CacheCreation != nil { + modelTotalsEntry.cache5m += float64(u.usage.CacheCreation.Ephemeral5mInputTokens) + modelTotalsEntry.cache1h += float64(u.usage.CacheCreation.Ephemeral1hInputTokens) + allTimeCacheCreate5m += u.usage.CacheCreation.Ephemeral5mInputTokens + allTimeCacheCreate1h += u.usage.CacheCreation.Ephemeral1hInputTokens + } + if u.usage.ServerToolUse != nil { + modelTotalsEntry.webSearch += float64(u.usage.ServerToolUse.WebSearchRequests) + modelTotalsEntry.webFetch += float64(u.usage.ServerToolUse.WebFetchRequests) + } + + tokenVolume := float64(u.usage.InputTokens + u.usage.OutputTokens + u.usage.CacheReadInputTokens + u.usage.CacheCreationInputTokens + u.usage.ReasoningTokens) + clientTotalsEntry.input += float64(u.usage.InputTokens) + clientTotalsEntry.output += float64(u.usage.OutputTokens) + clientTotalsEntry.cached += float64(u.usage.CacheReadInputTokens) + clientTotalsEntry.cacheCreate += float64(u.usage.CacheCreationInputTokens) + clientTotalsEntry.reasoning += float64(u.usage.ReasoningTokens) + clientTotalsEntry.cost += cost + clientTotalsEntry.sessions = float64(len(clientSessions[clientID])) + + projectTotalsEntry.input += float64(u.usage.InputTokens) + projectTotalsEntry.output += float64(u.usage.OutputTokens) + projectTotalsEntry.cached += float64(u.usage.CacheReadInputTokens) + projectTotalsEntry.cacheCreate += float64(u.usage.CacheCreationInputTokens) + projectTotalsEntry.reasoning += float64(u.usage.ReasoningTokens) + projectTotalsEntry.cost += cost + projectTotalsEntry.sessions = float64(len(projectSessions[projectID])) + + agentTotalsEntry.input += float64(u.usage.InputTokens) + agentTotalsEntry.output += float64(u.usage.OutputTokens) + agentTotalsEntry.cached += float64(u.usage.CacheReadInputTokens) + agentTotalsEntry.cacheCreate += float64(u.usage.CacheCreationInputTokens) + agentTotalsEntry.reasoning += float64(u.usage.ReasoningTokens) + agentTotalsEntry.cost += cost + agentTotalsEntry.sessions = float64(len(agentSessions[agentID])) + + allTimeInputTokens += u.usage.InputTokens + allTimeOutputTokens += u.usage.OutputTokens + allTimeCacheRead += u.usage.CacheReadInputTokens + allTimeCacheCreate += u.usage.CacheCreationInputTokens + allTimeReasoning += u.usage.ReasoningTokens + if u.usage.ServerToolUse != nil { + allTimeWebSearch += u.usage.ServerToolUse.WebSearchRequests + allTimeWebFetch += u.usage.ServerToolUse.WebFetchRequests + } + + day := u.timestamp.Format("2006-01-02") + dailyTokenTotals[day] += u.usage.InputTokens + u.usage.OutputTokens + dailyMessages[day]++ + dailyCost[day] += cost + if dailyModelTokens[day] == nil { + dailyModelTokens[day] = make(map[string]int) + } + dailyModelTokens[day][u.model] += u.usage.InputTokens + u.usage.OutputTokens + if dailyClientTokens[day] == nil { + dailyClientTokens[day] = make(map[string]float64) + } + dailyClientTokens[day][clientID] += tokenVolume + + if tier := strings.ToLower(strings.TrimSpace(u.usage.ServiceTier)); tier != "" { + serviceTierTotals[tier] += tokenVolume + } + if geo := strings.ToLower(strings.TrimSpace(u.usage.InferenceGeo)); geo != "" { + inferenceGeoTotals[geo] += tokenVolume + } + + if u.timestamp.After(todayStart) || u.timestamp.Equal(todayStart) { + todayCostUSD += cost + todayInputTokens += u.usage.InputTokens + todayOutputTokens += u.usage.OutputTokens + todayCacheRead += u.usage.CacheReadInputTokens + todayCacheCreate += u.usage.CacheCreationInputTokens + todayReasoning += u.usage.ReasoningTokens + if u.usage.CacheCreation != nil { + todayCacheCreate5m += u.usage.CacheCreation.Ephemeral5mInputTokens + todayCacheCreate1h += u.usage.CacheCreation.Ephemeral1hInputTokens + } + if u.usage.ServerToolUse != nil { + todayWebSearch += u.usage.ServerToolUse.WebSearchRequests + todayWebFetch += u.usage.ServerToolUse.WebFetchRequests + } + todayMessages++ + todayModels[modelID] = true + } + + if u.timestamp.After(weekStart) || u.timestamp.Equal(weekStart) { + weeklyCostUSD += cost + weeklyInputTokens += u.usage.InputTokens + weeklyOutputTokens += u.usage.OutputTokens + weeklyCacheRead += u.usage.CacheReadInputTokens + weeklyCacheCreate += u.usage.CacheCreationInputTokens + weeklyReasoning += u.usage.ReasoningTokens + if u.usage.CacheCreation != nil { + weeklyCacheCreate5m += u.usage.CacheCreation.Ephemeral5mInputTokens + weeklyCacheCreate1h += u.usage.CacheCreation.Ephemeral1hInputTokens + } + if u.usage.ServerToolUse != nil { + weeklyWebSearch += u.usage.ServerToolUse.WebSearchRequests + weeklyWebFetch += u.usage.ServerToolUse.WebFetchRequests + } + weeklyMessages++ + } + + if inCurrentBlock && (u.timestamp.After(currentBlockStart) || u.timestamp.Equal(currentBlockStart)) && u.timestamp.Before(currentBlockEnd) { + blockCostUSD += cost + blockInputTokens += u.usage.InputTokens + blockOutputTokens += u.usage.OutputTokens + blockCacheRead += u.usage.CacheReadInputTokens + blockCacheCreate += u.usage.CacheCreationInputTokens + blockMessages++ + blockModels[modelID] = true + } + } + + for model, totals := range modelTotals { + modelPrefix := "model_" + model + setMetricMax(snap, modelPrefix+"_input_tokens", totals.input, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_output_tokens", totals.output, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cached_tokens", totals.cached, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cache_creation_tokens", totals.cacheCreate, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cache_creation_5m_tokens", totals.cache5m, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cache_creation_1h_tokens", totals.cache1h, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_reasoning_tokens", totals.reasoning, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_web_search_requests", totals.webSearch, "requests", "all-time estimate") + setMetricMax(snap, modelPrefix+"_web_fetch_requests", totals.webFetch, "requests", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cost_usd", totals.cost, "USD", "all-time estimate") + } + + for client, totals := range clientTotals { + key := "client_" + client + setMetricMax(snap, key+"_input_tokens", totals.input, "tokens", "all-time") + setMetricMax(snap, key+"_output_tokens", totals.output, "tokens", "all-time") + setMetricMax(snap, key+"_cached_tokens", totals.cached, "tokens", "all-time") + setMetricMax(snap, key+"_reasoning_tokens", totals.reasoning, "tokens", "all-time") + setMetricMax(snap, key+"_total_tokens", totals.input+totals.output+totals.cached+totals.cacheCreate+totals.reasoning, "tokens", "all-time") + setMetricMax(snap, key+"_sessions", totals.sessions, "sessions", "all-time") + } + + if snap.DailySeries == nil { + snap.DailySeries = make(map[string][]core.TimePoint) + } + dates := lo.Keys(dailyTokenTotals) + sort.Strings(dates) + + if len(snap.DailySeries["messages"]) == 0 && len(dates) > 0 { + for _, d := range dates { + snap.DailySeries["messages"] = append(snap.DailySeries["messages"], core.TimePoint{Date: d, Value: float64(dailyMessages[d])}) + snap.DailySeries["tokens_total"] = append(snap.DailySeries["tokens_total"], core.TimePoint{Date: d, Value: float64(dailyTokenTotals[d])}) + snap.DailySeries["cost"] = append(snap.DailySeries["cost"], core.TimePoint{Date: d, Value: dailyCost[d]}) + } + + allModels := make(map[string]int64) + for _, dm := range dailyModelTokens { + for model, tokens := range dm { + allModels[model] += int64(tokens) + } + } + type mVol struct { + name string + total int64 + } + var mv []mVol + for m, t := range allModels { + mv = append(mv, mVol{m, t}) + } + sort.Slice(mv, func(i, j int) bool { return mv[i].total > mv[j].total }) + limit := 5 + if len(mv) < limit { + limit = len(mv) + } + for i := 0; i < limit; i++ { + model := mv[i].name + key := fmt.Sprintf("tokens_%s", sanitizeModelName(model)) + for _, d := range dates { + tokens := dailyModelTokens[d][model] + snap.DailySeries[key] = append(snap.DailySeries[key], + core.TimePoint{Date: d, Value: float64(tokens)}) + } + } + } + + if len(dates) > 0 { + clientNames := make(map[string]bool) + for _, byClient := range dailyClientTokens { + for client := range byClient { + clientNames[client] = true + } + } + for client := range clientNames { + key := "tokens_client_" + client + for _, d := range dates { + snap.DailySeries[key] = append(snap.DailySeries[key], core.TimePoint{ + Date: d, + Value: dailyClientTokens[d][client], + }) + } + } + } + + if todayCostUSD > 0 { + snap.Metrics["today_api_cost"] = core.Metric{ + Used: core.Float64Ptr(todayCostUSD), + Unit: "USD", + Window: "since midnight", + } + } + if todayInputTokens > 0 { + in := float64(todayInputTokens) + snap.Metrics["today_input_tokens"] = core.Metric{ + Used: &in, + Unit: "tokens", + Window: "since midnight", + } + } + if todayOutputTokens > 0 { + out := float64(todayOutputTokens) + snap.Metrics["today_output_tokens"] = core.Metric{ + Used: &out, + Unit: "tokens", + Window: "since midnight", + } + } + if todayCacheRead > 0 { + cacheRead := float64(todayCacheRead) + snap.Metrics["today_cache_read_tokens"] = core.Metric{ + Used: &cacheRead, + Unit: "tokens", + Window: "since midnight", + } + } + if todayCacheCreate > 0 { + cacheCreate := float64(todayCacheCreate) + snap.Metrics["today_cache_create_tokens"] = core.Metric{ + Used: &cacheCreate, + Unit: "tokens", + Window: "since midnight", + } + } + if todayMessages > 0 { + msgs := float64(todayMessages) + setMetricMax(snap, "messages_today", msgs, "messages", "since midnight") + } + if len(todaySessions) > 0 { + setMetricMax(snap, "sessions_today", float64(len(todaySessions)), "sessions", "since midnight") + } + if todayToolCalls > 0 { + setMetricMax(snap, "tool_calls_today", float64(todayToolCalls), "calls", "since midnight") + } + if todayReasoning > 0 { + v := float64(todayReasoning) + snap.Metrics["today_reasoning_tokens"] = core.Metric{ + Used: &v, + Unit: "tokens", + Window: "since midnight", + } + } + if todayCacheCreate5m > 0 { + v := float64(todayCacheCreate5m) + snap.Metrics["today_cache_create_5m_tokens"] = core.Metric{ + Used: &v, + Unit: "tokens", + Window: "since midnight", + } + } + if todayCacheCreate1h > 0 { + v := float64(todayCacheCreate1h) + snap.Metrics["today_cache_create_1h_tokens"] = core.Metric{ + Used: &v, + Unit: "tokens", + Window: "since midnight", + } + } + if todayWebSearch > 0 { + v := float64(todayWebSearch) + snap.Metrics["today_web_search_requests"] = core.Metric{ + Used: &v, + Unit: "requests", + Window: "since midnight", + } + } + if todayWebFetch > 0 { + v := float64(todayWebFetch) + snap.Metrics["today_web_fetch_requests"] = core.Metric{ + Used: &v, + Unit: "requests", + Window: "since midnight", + } + } + + if weeklyCostUSD > 0 { + snap.Metrics["7d_api_cost"] = core.Metric{ + Used: core.Float64Ptr(weeklyCostUSD), + Unit: "USD", + Window: "rolling 7 days", + } + } + if weeklyMessages > 0 { + wm := float64(weeklyMessages) + snap.Metrics["7d_messages"] = core.Metric{ + Used: &wm, + Unit: "messages", + Window: "rolling 7 days", + } + wIn := float64(weeklyInputTokens) + snap.Metrics["7d_input_tokens"] = core.Metric{ + Used: &wIn, + Unit: "tokens", + Window: "rolling 7 days", + } + wOut := float64(weeklyOutputTokens) + snap.Metrics["7d_output_tokens"] = core.Metric{ + Used: &wOut, + Unit: "tokens", + Window: "rolling 7 days", + } + } + if weeklyCacheRead > 0 { + v := float64(weeklyCacheRead) + snap.Metrics["7d_cache_read_tokens"] = core.Metric{ + Used: &v, + Unit: "tokens", + Window: "rolling 7 days", + } + } + if weeklyCacheCreate > 0 { + v := float64(weeklyCacheCreate) + snap.Metrics["7d_cache_create_tokens"] = core.Metric{ + Used: &v, + Unit: "tokens", + Window: "rolling 7 days", + } + } + if weeklyCacheCreate5m > 0 { + v := float64(weeklyCacheCreate5m) + snap.Metrics["7d_cache_create_5m_tokens"] = core.Metric{ + Used: &v, + Unit: "tokens", + Window: "rolling 7 days", + } + } + if weeklyCacheCreate1h > 0 { + v := float64(weeklyCacheCreate1h) + snap.Metrics["7d_cache_create_1h_tokens"] = core.Metric{ + Used: &v, + Unit: "tokens", + Window: "rolling 7 days", + } + } + if weeklyReasoning > 0 { + v := float64(weeklyReasoning) + snap.Metrics["7d_reasoning_tokens"] = core.Metric{ + Used: &v, + Unit: "tokens", + Window: "rolling 7 days", + } + } + if weeklyToolCalls > 0 { + setMetricMax(snap, "7d_tool_calls", float64(weeklyToolCalls), "calls", "rolling 7 days") + } + if weeklyWebSearch > 0 { + v := float64(weeklyWebSearch) + snap.Metrics["7d_web_search_requests"] = core.Metric{ + Used: &v, + Unit: "requests", + Window: "rolling 7 days", + } + } + if weeklyWebFetch > 0 { + v := float64(weeklyWebFetch) + snap.Metrics["7d_web_fetch_requests"] = core.Metric{ + Used: &v, + Unit: "requests", + Window: "rolling 7 days", + } + } + if len(weeklySessions) > 0 { + setMetricMax(snap, "7d_sessions", float64(len(weeklySessions)), "sessions", "rolling 7 days") + } + + if todayMessages > 0 { + snap.Raw["jsonl_today_date"] = today + snap.Raw["jsonl_today_messages"] = fmt.Sprintf("%d", todayMessages) + snap.Raw["jsonl_today_input_tokens"] = fmt.Sprintf("%d", todayInputTokens) + snap.Raw["jsonl_today_output_tokens"] = fmt.Sprintf("%d", todayOutputTokens) + snap.Raw["jsonl_today_cache_read_tokens"] = fmt.Sprintf("%d", todayCacheRead) + snap.Raw["jsonl_today_cache_create_tokens"] = fmt.Sprintf("%d", todayCacheCreate) + snap.Raw["jsonl_today_reasoning_tokens"] = fmt.Sprintf("%d", todayReasoning) + snap.Raw["jsonl_today_web_search_requests"] = fmt.Sprintf("%d", todayWebSearch) + snap.Raw["jsonl_today_web_fetch_requests"] = fmt.Sprintf("%d", todayWebFetch) + + models := lo.Keys(todayModels) + sort.Strings(models) + snap.Raw["jsonl_today_models"] = strings.Join(models, ", ") + } + + if inCurrentBlock { + snap.Metrics["5h_block_cost"] = core.Metric{ + Used: core.Float64Ptr(blockCostUSD), + Unit: "USD", + Window: fmt.Sprintf("%s – %s", currentBlockStart.Format("15:04"), currentBlockEnd.Format("15:04")), + } + + blockIn := float64(blockInputTokens) + snap.Metrics["5h_block_input"] = core.Metric{ + Used: &blockIn, + Unit: "tokens", + Window: "current 5h block", + } + + blockOut := float64(blockOutputTokens) + snap.Metrics["5h_block_output"] = core.Metric{ + Used: &blockOut, + Unit: "tokens", + Window: "current 5h block", + } + + blockMsgs := float64(blockMessages) + snap.Metrics["5h_block_msgs"] = core.Metric{ + Used: &blockMsgs, + Unit: "messages", + Window: "current 5h block", + } + if blockCacheRead > 0 { + setMetricMax(snap, "5h_block_cache_read_tokens", float64(blockCacheRead), "tokens", "current 5h block") + } + if blockCacheCreate > 0 { + setMetricMax(snap, "5h_block_cache_create_tokens", float64(blockCacheCreate), "tokens", "current 5h block") + } + + remaining := currentBlockEnd.Sub(now) + if remaining > 0 { + snap.Resets["billing_block"] = currentBlockEnd + snap.Raw["block_time_remaining"] = fmt.Sprintf("%s", remaining.Round(time.Minute)) + + elapsed := now.Sub(currentBlockStart) + progress := math.Min(elapsed.Seconds()/billingBlockDuration.Seconds()*100, 100) + snap.Raw["block_progress_pct"] = fmt.Sprintf("%.0f", progress) + } + + snap.Raw["block_start"] = currentBlockStart.Format(time.RFC3339) + snap.Raw["block_end"] = currentBlockEnd.Format(time.RFC3339) + + blockModelList := lo.Keys(blockModels) + sort.Strings(blockModelList) + snap.Raw["block_models"] = strings.Join(blockModelList, ", ") + + elapsed := now.Sub(currentBlockStart) + if elapsed > time.Minute && blockCostUSD > 0 { + burnRate := blockCostUSD / elapsed.Hours() + snap.Metrics["burn_rate"] = core.Metric{ + Used: core.Float64Ptr(burnRate), + Unit: "USD/h", + Window: "current 5h block", + } + snap.Raw["burn_rate"] = fmt.Sprintf("$%.2f/hour", burnRate) + } + } + + if allTimeCostUSD > 0 { + snap.Metrics["all_time_api_cost"] = core.Metric{ + Used: core.Float64Ptr(allTimeCostUSD), + Unit: "USD", + Window: "all-time estimate", + } + } + if allTimeInputTokens > 0 { + setMetricMax(snap, "all_time_input_tokens", float64(allTimeInputTokens), "tokens", "all-time estimate") + } + if allTimeOutputTokens > 0 { + setMetricMax(snap, "all_time_output_tokens", float64(allTimeOutputTokens), "tokens", "all-time estimate") + } + if allTimeCacheRead > 0 { + setMetricMax(snap, "all_time_cache_read_tokens", float64(allTimeCacheRead), "tokens", "all-time estimate") + } + if allTimeCacheCreate > 0 { + setMetricMax(snap, "all_time_cache_create_tokens", float64(allTimeCacheCreate), "tokens", "all-time estimate") + } + if allTimeCacheCreate5m > 0 { + setMetricMax(snap, "all_time_cache_create_5m_tokens", float64(allTimeCacheCreate5m), "tokens", "all-time estimate") + } + if allTimeCacheCreate1h > 0 { + setMetricMax(snap, "all_time_cache_create_1h_tokens", float64(allTimeCacheCreate1h), "tokens", "all-time estimate") + } + if allTimeReasoning > 0 { + setMetricMax(snap, "all_time_reasoning_tokens", float64(allTimeReasoning), "tokens", "all-time estimate") + } + if allTimeToolCalls > 0 { + setMetricMax(snap, "all_time_tool_calls", float64(allTimeToolCalls), "calls", "all-time estimate") + setMetricMax(snap, "tool_calls_total", float64(allTimeToolCalls), "calls", "all-time estimate") + setMetricMax(snap, "tool_completed", float64(allTimeToolCalls), "calls", "all-time estimate") + setMetricMax(snap, "tool_success_rate", 100.0, "%", "all-time estimate") + } + if len(seenUsageKeys) > 0 { + setMetricMax(snap, "total_prompts", float64(len(seenUsageKeys)), "prompts", "all-time estimate") + } + if len(changedFiles) > 0 { + setMetricMax(snap, "composer_files_changed", float64(len(changedFiles)), "files", "all-time estimate") + } + if allTimeLinesAdded > 0 { + setMetricMax(snap, "composer_lines_added", float64(allTimeLinesAdded), "lines", "all-time estimate") + } + if allTimeLinesRemoved > 0 { + setMetricMax(snap, "composer_lines_removed", float64(allTimeLinesRemoved), "lines", "all-time estimate") + } + if allTimeCommitCount > 0 { + setMetricMax(snap, "scored_commits", float64(allTimeCommitCount), "commits", "all-time estimate") + } + if allTimeLinesAdded > 0 || allTimeLinesRemoved > 0 { + hundred := 100.0 + zero := 0.0 + snap.Metrics["ai_code_percentage"] = core.Metric{ + Used: &hundred, + Remaining: &zero, + Limit: &hundred, + Unit: "%", + Window: "all-time estimate", + } + } + for lang, count := range languageUsageCounts { + if count <= 0 { + continue + } + setMetricMax(snap, "lang_"+sanitizeModelName(lang), float64(count), "requests", "all-time estimate") + } + for toolName, count := range toolUsageCounts { + if count <= 0 { + continue + } + setMetricMax(snap, "tool_"+sanitizeModelName(toolName), float64(count), "calls", "all-time estimate") + } + if allTimeWebSearch > 0 { + setMetricMax(snap, "all_time_web_search_requests", float64(allTimeWebSearch), "requests", "all-time estimate") + } + if allTimeWebFetch > 0 { + setMetricMax(snap, "all_time_web_fetch_requests", float64(allTimeWebFetch), "requests", "all-time estimate") + } + + snap.Raw["tool_usage"] = summarizeCountMap(toolUsageCounts, 6) + snap.Raw["language_usage"] = summarizeCountMap(languageUsageCounts, 8) + snap.Raw["project_usage"] = summarizeTotalsMap(projectTotals, true, 6) + snap.Raw["agent_usage"] = summarizeTotalsMap(agentTotals, false, 4) + snap.Raw["service_tier_usage"] = summarizeFloatMap(serviceTierTotals, "tok", 4) + snap.Raw["inference_geo_usage"] = summarizeFloatMap(inferenceGeoTotals, "tok", 4) + if allTimeCacheRead > 0 || allTimeCacheCreate > 0 { + snap.Raw["cache_usage"] = fmt.Sprintf("read %s · create %s (1h %s, 5m %s)", + shortTokenCount(float64(allTimeCacheRead)), + shortTokenCount(float64(allTimeCacheCreate)), + shortTokenCount(float64(allTimeCacheCreate1h)), + shortTokenCount(float64(allTimeCacheCreate5m)), + ) + } + snap.Raw["project_count"] = fmt.Sprintf("%d", len(projectTotals)) + snap.Raw["tool_count"] = fmt.Sprintf("%d", len(toolUsageCounts)) + + snap.Raw["jsonl_total_entries"] = fmt.Sprintf("%d", allTimeEntries) + snap.Raw["jsonl_total_blocks"] = fmt.Sprintf("%d", len(blockStartCandidates)) + snap.Raw["jsonl_unique_requests"] = fmt.Sprintf("%d", len(seenUsageKeys)) + buildModelUsageSummaryRaw(snap) + + return nil +} diff --git a/internal/tui/model.go b/internal/tui/model.go index 4d00d87..bb9b4d4 100644 --- a/internal/tui/model.go +++ b/internal/tui/model.go @@ -143,6 +143,7 @@ type Model struct { detailTab int // active tab index in the detail panel (0=All) tileOffset int // vertical scroll offset for selected dashboard tile row expandedModelMixTiles map[string]bool + tileBodyCache map[string][]string warnThreshold float64 critThreshold float64 @@ -195,6 +196,7 @@ func NewModel( providerEnabled: make(map[string]bool), accountProviders: make(map[string]string), expandedModelMixTiles: make(map[string]bool), + tileBodyCache: make(map[string][]string), daemon: daemonState{status: DaemonConnecting}, timeWindow: timeWindow, } diff --git a/internal/tui/model_input.go b/internal/tui/model_input.go index 4f44aee..7811a99 100644 --- a/internal/tui/model_input.go +++ b/internal/tui/model_input.go @@ -16,6 +16,7 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { case tea.WindowSizeMsg: m.width = msg.Width m.height = msg.Height + m.tileBodyCache = make(map[string][]string) return m, nil case DaemonStatusMsg: @@ -59,6 +60,7 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } m.snapshots = msg.Snapshots m.refreshing = false + m.tileBodyCache = make(map[string][]string) if msg.RequestID > m.lastSnapshotRequestID { m.lastSnapshotRequestID = msg.RequestID } diff --git a/internal/tui/settings_modal.go b/internal/tui/settings_modal.go index 7ae88aa..fc3a253 100644 --- a/internal/tui/settings_modal.go +++ b/internal/tui/settings_modal.go @@ -690,151 +690,6 @@ func centerPanelVertically(panel string, targetHeight int) string { return strings.Repeat("\n", top) + panel + strings.Repeat("\n", bottom) } -func settingsWidgetSectionsPreviewSnapshot() core.UsageSnapshot { - usedMetric := func(used float64, unit, window string) core.Metric { - return core.Metric{ - Used: &used, - Unit: unit, - Window: window, - } - } - limitMetric := func(limit, used float64, unit, window string) core.Metric { - remaining := limit - used - return core.Metric{ - Limit: &limit, - Used: &used, - Remaining: &remaining, - Unit: unit, - Window: window, - } - } - - snap := core.NewUsageSnapshot(settingsWidgetPreviewProviderID, "claude-preview") - snap.Status = core.StatusOK - snap.Message = "Settings preview" - snap.Attributes = map[string]string{ - "telemetry_view": "canonical", - } - snap.Metrics = map[string]core.Metric{ - "usage_five_hour": limitMetric(200, 62, "requests", "5h"), - "usage_seven_day": limitMetric(5000, 1730, "requests", "7d"), - "today_api_cost": usedMetric(5.20, "USD", "1d"), - "7d_api_cost": usedMetric(28.40, "USD", "7d"), - "all_time_api_cost": usedMetric(412.30, "USD", "all"), - "messages_today": usedMetric(37, "requests", "1d"), - "sessions_today": usedMetric(6, "sessions", "1d"), - "tool_calls_today": usedMetric(52, "requests", "1d"), - "7d_tool_calls": usedMetric(281, "requests", "7d"), - "today_input_tokens": usedMetric(182000, "tokens", "1d"), - "today_output_tokens": usedMetric(64000, "tokens", "1d"), - "7d_input_tokens": usedMetric(1230000, "tokens", "7d"), - "7d_output_tokens": usedMetric(421000, "tokens", "7d"), - "model_claude_sonnet_4_5_input_tokens": usedMetric(820000, "tokens", "7d"), - "model_claude_sonnet_4_5_output_tokens": usedMetric(286000, "tokens", "7d"), - "model_claude_sonnet_4_5_requests": usedMetric(932, "requests", "7d"), - "model_claude_sonnet_4_5_cost_usd": usedMetric(22.30, "USD", "7d"), - "model_claude_haiku_3_5_input_tokens": usedMetric(210000, "tokens", "7d"), - "model_claude_haiku_3_5_output_tokens": usedMetric(83000, "tokens", "7d"), - "model_claude_haiku_3_5_requests": usedMetric(511, "requests", "7d"), - "model_claude_haiku_3_5_cost_usd": usedMetric(4.10, "USD", "7d"), - "client_claude_code_total_tokens": usedMetric(900000, "tokens", "7d"), - "client_claude_code_requests": usedMetric(1020, "requests", "7d"), - "client_claude_code_sessions": usedMetric(19, "sessions", "7d"), - "client_ide_total_tokens": usedMetric(330000, "tokens", "7d"), - "client_ide_requests": usedMetric(423, "requests", "7d"), - "client_ide_sessions": usedMetric(11, "sessions", "7d"), - "tool_edit": usedMetric(32, "requests", "7d"), - "tool_bash": usedMetric(18, "requests", "7d"), - "tool_read": usedMetric(24, "requests", "7d"), - "tool_success_rate": usedMetric(94, "percent", "7d"), - "mcp_github_total": usedMetric(16, "requests", "7d"), - "mcp_github_search_repositories": usedMetric(9, "requests", "7d"), - "mcp_github_get_pull_request": usedMetric(7, "requests", "7d"), - "lang_go": usedMetric(58, "requests", "7d"), - "lang_typescript": usedMetric(35, "requests", "7d"), - "lang_markdown": usedMetric(14, "requests", "7d"), - "composer_lines_added": usedMetric(980, "lines", "7d"), - "composer_lines_removed": usedMetric(420, "lines", "7d"), - "composer_files_changed": usedMetric(37, "files", "7d"), - "scored_commits": usedMetric(9, "commits", "7d"), - "ai_code_percentage": usedMetric(63, "percent", "7d"), - "total_prompts": usedMetric(241, "requests", "7d"), - "interface_bash": usedMetric(31, "requests", "7d"), - "interface_edit": usedMetric(44, "requests", "7d"), - "provider_anthropic_input_tokens": usedMetric(1100000, "tokens", "7d"), - "provider_anthropic_output_tokens": usedMetric(369000, "tokens", "7d"), - "provider_anthropic_requests": usedMetric(1450, "requests", "7d"), - "provider_anthropic_cost_usd": usedMetric(26.40, "USD", "7d"), - "upstream_aws_bedrock_input_tokens": usedMetric(510000, "tokens", "7d"), - "upstream_aws_bedrock_output_tokens": usedMetric(177000, "tokens", "7d"), - "upstream_aws_bedrock_requests": usedMetric(742, "requests", "7d"), - "upstream_aws_bedrock_cost_usd": usedMetric(12.40, "USD", "7d"), - "upstream_anthropic_input_tokens": usedMetric(590000, "tokens", "7d"), - "upstream_anthropic_output_tokens": usedMetric(192000, "tokens", "7d"), - "upstream_anthropic_requests": usedMetric(708, "requests", "7d"), - "upstream_anthropic_cost_usd": usedMetric(14.00, "USD", "7d"), - } - snap.DailySeries = map[string][]core.TimePoint{ - "analytics_cost": { - {Date: "2026-03-01", Value: 2.8}, - {Date: "2026-03-02", Value: 3.2}, - {Date: "2026-03-03", Value: 4.1}, - {Date: "2026-03-04", Value: 3.7}, - {Date: "2026-03-05", Value: 5.2}, - }, - "analytics_requests": { - {Date: "2026-03-01", Value: 210}, - {Date: "2026-03-02", Value: 238}, - {Date: "2026-03-03", Value: 290}, - {Date: "2026-03-04", Value: 256}, - {Date: "2026-03-05", Value: 311}, - }, - "usage_model_claude_sonnet_4_5": { - {Date: "2026-03-01", Value: 154}, - {Date: "2026-03-02", Value: 183}, - {Date: "2026-03-03", Value: 201}, - {Date: "2026-03-04", Value: 176}, - {Date: "2026-03-05", Value: 218}, - }, - "usage_model_claude_haiku_3_5": { - {Date: "2026-03-01", Value: 91}, - {Date: "2026-03-02", Value: 88}, - {Date: "2026-03-03", Value: 103}, - {Date: "2026-03-04", Value: 97}, - {Date: "2026-03-05", Value: 111}, - }, - "usage_client_claude_code": { - {Date: "2026-03-01", Value: 160}, - {Date: "2026-03-02", Value: 182}, - {Date: "2026-03-03", Value: 211}, - {Date: "2026-03-04", Value: 189}, - {Date: "2026-03-05", Value: 229}, - }, - "usage_client_ide": { - {Date: "2026-03-01", Value: 63}, - {Date: "2026-03-02", Value: 71}, - {Date: "2026-03-03", Value: 79}, - {Date: "2026-03-04", Value: 67}, - {Date: "2026-03-05", Value: 82}, - }, - "usage_source_bedrock": { - {Date: "2026-03-01", Value: 108}, - {Date: "2026-03-02", Value: 114}, - {Date: "2026-03-03", Value: 128}, - {Date: "2026-03-04", Value: 121}, - {Date: "2026-03-05", Value: 133}, - }, - "usage_source_claude": { - {Date: "2026-03-01", Value: 102}, - {Date: "2026-03-02", Value: 124}, - {Date: "2026-03-03", Value: 146}, - {Date: "2026-03-04", Value: 135}, - {Date: "2026-03-05", Value: 152}, - }, - } - return snap -} - func (m Model) renderSettingsThemeBody(w, h int) string { themes := AvailableThemes() activeThemeIdx := ActiveThemeIndex() diff --git a/internal/tui/settings_modal_preview.go b/internal/tui/settings_modal_preview.go new file mode 100644 index 0000000..37e44ec --- /dev/null +++ b/internal/tui/settings_modal_preview.go @@ -0,0 +1,148 @@ +package tui + +import "github.com/janekbaraniewski/openusage/internal/core" + +func settingsWidgetSectionsPreviewSnapshot() core.UsageSnapshot { + usedMetric := func(used float64, unit, window string) core.Metric { + return core.Metric{ + Used: &used, + Unit: unit, + Window: window, + } + } + limitMetric := func(limit, used float64, unit, window string) core.Metric { + remaining := limit - used + return core.Metric{ + Limit: &limit, + Used: &used, + Remaining: &remaining, + Unit: unit, + Window: window, + } + } + + snap := core.NewUsageSnapshot(settingsWidgetPreviewProviderID, "claude-preview") + snap.Status = core.StatusOK + snap.Message = "Settings preview" + snap.Attributes = map[string]string{ + "telemetry_view": "canonical", + } + snap.Metrics = map[string]core.Metric{ + "usage_five_hour": limitMetric(200, 62, "requests", "5h"), + "usage_seven_day": limitMetric(5000, 1730, "requests", "7d"), + "today_api_cost": usedMetric(5.20, "USD", "1d"), + "7d_api_cost": usedMetric(28.40, "USD", "7d"), + "all_time_api_cost": usedMetric(412.30, "USD", "all"), + "messages_today": usedMetric(37, "requests", "1d"), + "sessions_today": usedMetric(6, "sessions", "1d"), + "tool_calls_today": usedMetric(52, "requests", "1d"), + "7d_tool_calls": usedMetric(281, "requests", "7d"), + "today_input_tokens": usedMetric(182000, "tokens", "1d"), + "today_output_tokens": usedMetric(64000, "tokens", "1d"), + "7d_input_tokens": usedMetric(1230000, "tokens", "7d"), + "7d_output_tokens": usedMetric(421000, "tokens", "7d"), + "model_claude_sonnet_4_5_input_tokens": usedMetric(820000, "tokens", "7d"), + "model_claude_sonnet_4_5_output_tokens": usedMetric(286000, "tokens", "7d"), + "model_claude_sonnet_4_5_requests": usedMetric(932, "requests", "7d"), + "model_claude_sonnet_4_5_cost_usd": usedMetric(22.30, "USD", "7d"), + "model_claude_haiku_3_5_input_tokens": usedMetric(210000, "tokens", "7d"), + "model_claude_haiku_3_5_output_tokens": usedMetric(83000, "tokens", "7d"), + "model_claude_haiku_3_5_requests": usedMetric(511, "requests", "7d"), + "model_claude_haiku_3_5_cost_usd": usedMetric(4.10, "USD", "7d"), + "client_claude_code_total_tokens": usedMetric(900000, "tokens", "7d"), + "client_claude_code_requests": usedMetric(1020, "requests", "7d"), + "client_claude_code_sessions": usedMetric(19, "sessions", "7d"), + "client_ide_total_tokens": usedMetric(330000, "tokens", "7d"), + "client_ide_requests": usedMetric(423, "requests", "7d"), + "client_ide_sessions": usedMetric(11, "sessions", "7d"), + "tool_edit": usedMetric(32, "requests", "7d"), + "tool_bash": usedMetric(18, "requests", "7d"), + "tool_read": usedMetric(24, "requests", "7d"), + "tool_success_rate": usedMetric(94, "percent", "7d"), + "mcp_github_total": usedMetric(16, "requests", "7d"), + "mcp_github_search_repositories": usedMetric(9, "requests", "7d"), + "mcp_github_get_pull_request": usedMetric(7, "requests", "7d"), + "lang_go": usedMetric(58, "requests", "7d"), + "lang_typescript": usedMetric(35, "requests", "7d"), + "lang_markdown": usedMetric(14, "requests", "7d"), + "composer_lines_added": usedMetric(980, "lines", "7d"), + "composer_lines_removed": usedMetric(420, "lines", "7d"), + "composer_files_changed": usedMetric(37, "files", "7d"), + "scored_commits": usedMetric(9, "commits", "7d"), + "ai_code_percentage": usedMetric(63, "percent", "7d"), + "total_prompts": usedMetric(241, "requests", "7d"), + "interface_bash": usedMetric(31, "requests", "7d"), + "interface_edit": usedMetric(44, "requests", "7d"), + "provider_anthropic_input_tokens": usedMetric(1100000, "tokens", "7d"), + "provider_anthropic_output_tokens": usedMetric(369000, "tokens", "7d"), + "provider_anthropic_requests": usedMetric(1450, "requests", "7d"), + "provider_anthropic_cost_usd": usedMetric(26.40, "USD", "7d"), + "upstream_aws_bedrock_input_tokens": usedMetric(510000, "tokens", "7d"), + "upstream_aws_bedrock_output_tokens": usedMetric(177000, "tokens", "7d"), + "upstream_aws_bedrock_requests": usedMetric(742, "requests", "7d"), + "upstream_aws_bedrock_cost_usd": usedMetric(12.40, "USD", "7d"), + "upstream_anthropic_input_tokens": usedMetric(590000, "tokens", "7d"), + "upstream_anthropic_output_tokens": usedMetric(192000, "tokens", "7d"), + "upstream_anthropic_requests": usedMetric(708, "requests", "7d"), + "upstream_anthropic_cost_usd": usedMetric(14.00, "USD", "7d"), + } + snap.DailySeries = map[string][]core.TimePoint{ + "analytics_cost": { + {Date: "2026-03-01", Value: 2.8}, + {Date: "2026-03-02", Value: 3.2}, + {Date: "2026-03-03", Value: 4.1}, + {Date: "2026-03-04", Value: 3.7}, + {Date: "2026-03-05", Value: 5.2}, + }, + "analytics_requests": { + {Date: "2026-03-01", Value: 210}, + {Date: "2026-03-02", Value: 238}, + {Date: "2026-03-03", Value: 290}, + {Date: "2026-03-04", Value: 256}, + {Date: "2026-03-05", Value: 311}, + }, + "usage_model_claude_sonnet_4_5": { + {Date: "2026-03-01", Value: 154}, + {Date: "2026-03-02", Value: 183}, + {Date: "2026-03-03", Value: 201}, + {Date: "2026-03-04", Value: 176}, + {Date: "2026-03-05", Value: 218}, + }, + "usage_model_claude_haiku_3_5": { + {Date: "2026-03-01", Value: 91}, + {Date: "2026-03-02", Value: 88}, + {Date: "2026-03-03", Value: 103}, + {Date: "2026-03-04", Value: 97}, + {Date: "2026-03-05", Value: 111}, + }, + "usage_client_claude_code": { + {Date: "2026-03-01", Value: 160}, + {Date: "2026-03-02", Value: 182}, + {Date: "2026-03-03", Value: 211}, + {Date: "2026-03-04", Value: 189}, + {Date: "2026-03-05", Value: 229}, + }, + "usage_client_ide": { + {Date: "2026-03-01", Value: 63}, + {Date: "2026-03-02", Value: 71}, + {Date: "2026-03-03", Value: 79}, + {Date: "2026-03-04", Value: 67}, + {Date: "2026-03-05", Value: 82}, + }, + "usage_source_bedrock": { + {Date: "2026-03-01", Value: 108}, + {Date: "2026-03-02", Value: 114}, + {Date: "2026-03-03", Value: 128}, + {Date: "2026-03-04", Value: 121}, + {Date: "2026-03-05", Value: 133}, + }, + "usage_source_claude": { + {Date: "2026-03-01", Value: 102}, + {Date: "2026-03-02", Value: 124}, + {Date: "2026-03-03", Value: 146}, + {Date: "2026-03-04", Value: 135}, + {Date: "2026-03-05", Value: 152}, + }, + } + return snap +} diff --git a/internal/tui/tiles.go b/internal/tui/tiles.go index dce92e1..cddd69e 100644 --- a/internal/tui/tiles.go +++ b/internal/tui/tiles.go @@ -479,200 +479,15 @@ func (m Model) renderTile(snap core.UsageSnapshot, selected, modelMixExpanded bo if m.tileShouldRenderLoading(snap) { return renderWithBody(m.buildTileLoadingBody(innerW, bodyBudget, snap)) } - - type section struct { - lines []string - } - sectionsByID := make(map[core.DashboardStandardSection]section) - withSectionPadding := func(lines []string) []string { - if len(lines) == 0 { - return nil - } - s := []string{""} - s = append(s, lines...) - return s - } - addUsedKeys := func(dst map[string]bool, src map[string]bool) map[string]bool { - if len(src) == 0 { - return dst - } - if dst == nil { - dst = make(map[string]bool, len(src)) - } - for k := range src { - dst[k] = true - } - return dst - } - appendOtherGroup := func(dst []string, lines []string) []string { - if len(lines) == 0 { - return dst - } - if len(dst) > 0 { - dst = append(dst, "") - } - dst = append(dst, lines...) - return dst - } - - topUsageLines := m.buildTileGaugeLines(snap, widget, innerW) - if di.summary != "" { - topUsageLines = append(topUsageLines, tileHeroStyle.Render(truncate(di.summary))) - } - if di.detail != "" { - topUsageLines = append(topUsageLines, tileSummaryStyle.Render(truncate(di.detail))) - } - if wl := windowActivityLine(snap, m.timeWindow); wl != "" { - topUsageLines = append(topUsageLines, dimStyle.Render(truncate(wl))) - } - if len(topUsageLines) > 0 { - sectionsByID[core.DashboardSectionTopUsageProgress] = section{withSectionPadding(topUsageLines)} - } - - compactMetricLines, compactMetricKeys := buildTileCompactMetricSummaryLines(snap, widget, innerW) - - modelBurnLines, modelBurnKeys := buildProviderModelCompositionLines(snap, innerW, modelMixExpanded) - if len(modelBurnLines) > 0 { - sectionsByID[core.DashboardSectionModelBurn] = section{withSectionPadding(modelBurnLines)} - } - compactMetricKeys = addUsedKeys(compactMetricKeys, modelBurnKeys) - - var clientBurnLines []string - var clientBurnKeys map[string]bool - if widget.ShowClientComposition { - clientBurnLines, clientBurnKeys = buildProviderClientCompositionLinesWithWidget(snap, innerW, modelMixExpanded, widget) - if len(clientBurnLines) > 0 { - sectionsByID[core.DashboardSectionClientBurn] = section{withSectionPadding(clientBurnLines)} - } - } - compactMetricKeys = addUsedKeys(compactMetricKeys, clientBurnKeys) - - projectBreakdownLines, projectBreakdownKeys := buildProviderProjectBreakdownLines(snap, innerW, modelMixExpanded) - if len(projectBreakdownLines) > 0 { - sectionsByID[core.DashboardSectionProjectBreakdown] = section{withSectionPadding(projectBreakdownLines)} - } - compactMetricKeys = addUsedKeys(compactMetricKeys, projectBreakdownKeys) - - var toolBurnLines []string - var toolBurnKeys map[string]bool - if widget.ShowToolComposition { - toolBurnLines, toolBurnKeys = buildProviderToolCompositionLines(snap, innerW, modelMixExpanded, widget) - } - compactMetricKeys = addUsedKeys(compactMetricKeys, toolBurnKeys) - - var actualToolLines []string - var actualToolKeys map[string]bool - if widget.ShowActualToolUsage { - actualToolLines, actualToolKeys = buildActualToolUsageLines(snap, innerW, modelMixExpanded) - } - compactMetricKeys = addUsedKeys(compactMetricKeys, actualToolKeys) - if len(actualToolLines) > 0 { - sectionsByID[core.DashboardSectionToolUsage] = section{withSectionPadding(actualToolLines)} - } else if len(toolBurnLines) > 0 { - sectionsByID[core.DashboardSectionToolUsage] = section{withSectionPadding(toolBurnLines)} - } - - var mcpUsageLines []string - var mcpUsageKeys map[string]bool - if widget.ShowMCPUsage { - mcpUsageLines, mcpUsageKeys = buildMCPUsageLines(snap, innerW, modelMixExpanded) - if len(mcpUsageLines) > 0 { - sectionsByID[core.DashboardSectionMCPUsage] = section{withSectionPadding(mcpUsageLines)} - } - } - compactMetricKeys = addUsedKeys(compactMetricKeys, mcpUsageKeys) - - var langBurnLines []string - var langBurnKeys map[string]bool - if widget.ShowLanguageComposition { - langBurnLines, langBurnKeys = buildProviderLanguageCompositionLines(snap, innerW, modelMixExpanded) - if len(langBurnLines) > 0 { - sectionsByID[core.DashboardSectionLanguageBurn] = section{withSectionPadding(langBurnLines)} - } - } - compactMetricKeys = addUsedKeys(compactMetricKeys, langBurnKeys) - - var codeStatsLines []string - var codeStatsKeys map[string]bool - if widget.ShowCodeStatsComposition { - codeStatsLines, codeStatsKeys = buildProviderCodeStatsLines(snap, widget, innerW) - if len(codeStatsLines) > 0 { - sectionsByID[core.DashboardSectionCodeStats] = section{withSectionPadding(codeStatsLines)} - } - } - compactMetricKeys = addUsedKeys(compactMetricKeys, codeStatsKeys) - - dailyUsageLines := buildProviderDailyTrendLines(snap, innerW) - if len(dailyUsageLines) > 0 { - sectionsByID[core.DashboardSectionDailyUsage] = section{withSectionPadding(dailyUsageLines)} - } - - upstreamProviderLines, upstreamProviderKeys := buildUpstreamProviderCompositionLines(snap, innerW, modelMixExpanded) - if len(upstreamProviderLines) > 0 { - sectionsByID[core.DashboardSectionUpstreamProviders] = section{withSectionPadding(upstreamProviderLines)} - } - compactMetricKeys = addUsedKeys(compactMetricKeys, upstreamProviderKeys) - - providerBurnLines, providerBurnKeys := buildProviderVendorCompositionLines(snap, innerW, modelMixExpanded) - if len(providerBurnLines) > 0 { - sectionsByID[core.DashboardSectionProviderBurn] = section{withSectionPadding(providerBurnLines)} - } - compactMetricKeys = addUsedKeys(compactMetricKeys, providerBurnKeys) - - var otherLines []string - otherLines = appendOtherGroup(otherLines, compactMetricLines) - - geminiQuotaLines, geminiQuotaKeys := buildGeminiOtherQuotaLines(snap, innerW) - otherLines = appendOtherGroup(otherLines, geminiQuotaLines) - compactMetricKeys = addUsedKeys(compactMetricKeys, geminiQuotaKeys) - - metricLines := m.buildTileMetricLines(snap, widget, innerW, compactMetricKeys) - otherLines = appendOtherGroup(otherLines, metricLines) - - if snap.Message != "" && snap.Status != core.StatusError { - msg := snap.Message - if len(msg) > innerW-3 { - msg = msg[:innerW-6] + "..." - } - otherLines = appendOtherGroup(otherLines, []string{ - lipgloss.NewStyle().Foreground(colorSubtext).Italic(true).Render(msg), - }) - } - - metaLines := buildTileMetaLines(snap, innerW) - otherLines = appendOtherGroup(otherLines, metaLines) - + fullBody := m.cachedTileBodyLines(snap, widget, di, innerW, modelMixExpanded) if len(headerMeta) == 0 { resetLines := buildTileResetLines(snap, widget, innerW, m.animFrame) - otherLines = appendOtherGroup(otherLines, resetLines) - } - if len(otherLines) > 0 { - sectionsByID[core.DashboardSectionOtherData] = section{withSectionPadding(otherLines)} - } - - var sections []section - for _, sectionID := range widget.EffectiveStandardSectionOrder() { - if sectionID == core.DashboardSectionHeader { - continue - } - sec, ok := sectionsByID[sectionID] - if ok && len(sec.lines) > 0 { - sections = append(sections, sec) - continue - } - if m.hideSectionsWithNoData { - continue - } - emptyLines := buildEmptyTileSectionLines(sectionID, widget) - if len(emptyLines) == 0 { - continue + if len(resetLines) > 0 { + if len(fullBody) > 0 { + fullBody = append(fullBody, "") + } + fullBody = append(fullBody, resetLines...) } - sections = append(sections, section{withSectionPadding(emptyLines)}) - } - - var fullBody []string - for _, sec := range sections { - fullBody = append(fullBody, sec.lines...) } if bodyBudget < 0 { diff --git a/internal/tui/tiles_cache.go b/internal/tui/tiles_cache.go new file mode 100644 index 0000000..f170752 --- /dev/null +++ b/internal/tui/tiles_cache.go @@ -0,0 +1,269 @@ +package tui + +import ( + "fmt" + "strconv" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (m *Model) cachedTileBodyLines( + snap core.UsageSnapshot, + widget core.DashboardWidget, + di providerDisplayInfo, + innerW int, + modelMixExpanded bool, +) []string { + key := tileBodyCacheKey(snap, widget, m.timeWindow, innerW, modelMixExpanded, m.hideSectionsWithNoData) + if lines, ok := m.tileBodyCache[key]; ok { + return append([]string(nil), lines...) + } + + lines := m.buildTileBodyLines(snap, widget, di, innerW, modelMixExpanded) + if m.tileBodyCache == nil { + m.tileBodyCache = make(map[string][]string) + } + m.tileBodyCache[key] = append([]string(nil), lines...) + return append([]string(nil), lines...) +} + +func tileBodyCacheKey( + snap core.UsageSnapshot, + widget core.DashboardWidget, + window core.TimeWindow, + innerW int, + modelMixExpanded bool, + hideEmpty bool, +) string { + return strings.Join([]string{ + snap.ProviderID, + snap.AccountID, + string(snap.Status), + strconv.FormatInt(snap.Timestamp.UnixNano(), 10), + strconv.Itoa(len(snap.Metrics)), + strconv.Itoa(len(snap.Raw)), + strconv.Itoa(len(snap.DailySeries)), + strconv.Itoa(len(snap.Resets)), + string(window), + strconv.Itoa(innerW), + strconv.FormatBool(modelMixExpanded), + strconv.FormatBool(hideEmpty), + tileWidgetCacheKey(widget), + }, "|") +} + +func tileWidgetCacheKey(widget core.DashboardWidget) string { + parts := make([]string, 0, len(widget.EffectiveStandardSectionOrder())+10) + for _, section := range widget.EffectiveStandardSectionOrder() { + parts = append(parts, string(section)) + } + parts = append(parts, + fmt.Sprintf("client:%t", widget.ShowClientComposition), + fmt.Sprintf("tool:%t", widget.ShowToolComposition), + fmt.Sprintf("actual:%t", widget.ShowActualToolUsage), + fmt.Sprintf("mcp:%t", widget.ShowMCPUsage), + fmt.Sprintf("lang:%t", widget.ShowLanguageComposition), + fmt.Sprintf("code:%t", widget.ShowCodeStatsComposition), + fmt.Sprintf("fold_iface:%t", widget.ClientCompositionIncludeInterfaces), + fmt.Sprintf("hide_zero:%t", widget.SuppressZeroNonUsageMetrics), + "client_heading:"+widget.ClientCompositionHeading, + "tool_heading:"+widget.ToolCompositionHeading, + ) + return strings.Join(parts, ",") +} + +func (m *Model) buildTileBodyLines( + snap core.UsageSnapshot, + widget core.DashboardWidget, + di providerDisplayInfo, + innerW int, + modelMixExpanded bool, +) []string { + truncate := func(s string) string { + if lipglossWidth := len([]rune(s)); lipglossWidth > innerW { + return s[:innerW-1] + "…" + } + return s + } + + type section struct { + lines []string + } + sectionsByID := make(map[core.DashboardStandardSection]section) + withSectionPadding := func(lines []string) []string { + if len(lines) == 0 { + return nil + } + s := []string{""} + s = append(s, lines...) + return s + } + addUsedKeys := func(dst map[string]bool, src map[string]bool) map[string]bool { + if len(src) == 0 { + return dst + } + if dst == nil { + dst = make(map[string]bool, len(src)) + } + for k := range src { + dst[k] = true + } + return dst + } + appendOtherGroup := func(dst []string, lines []string) []string { + if len(lines) == 0 { + return dst + } + if len(dst) > 0 { + dst = append(dst, "") + } + dst = append(dst, lines...) + return dst + } + + topUsageLines := m.buildTileGaugeLines(snap, widget, innerW) + if di.summary != "" { + topUsageLines = append(topUsageLines, tileHeroStyle.Render(truncate(di.summary))) + } + if di.detail != "" { + topUsageLines = append(topUsageLines, tileSummaryStyle.Render(truncate(di.detail))) + } + if wl := windowActivityLine(snap, m.timeWindow); wl != "" { + topUsageLines = append(topUsageLines, dimStyle.Render(truncate(wl))) + } + if len(topUsageLines) > 0 { + sectionsByID[core.DashboardSectionTopUsageProgress] = section{withSectionPadding(topUsageLines)} + } + + compactMetricLines, compactMetricKeys := buildTileCompactMetricSummaryLines(snap, widget, innerW) + + modelBurnLines, modelBurnKeys := buildProviderModelCompositionLines(snap, innerW, modelMixExpanded) + if len(modelBurnLines) > 0 { + sectionsByID[core.DashboardSectionModelBurn] = section{withSectionPadding(modelBurnLines)} + } + compactMetricKeys = addUsedKeys(compactMetricKeys, modelBurnKeys) + + if widget.ShowClientComposition { + clientBurnLines, clientBurnKeys := buildProviderClientCompositionLinesWithWidget(snap, innerW, modelMixExpanded, widget) + if len(clientBurnLines) > 0 { + sectionsByID[core.DashboardSectionClientBurn] = section{withSectionPadding(clientBurnLines)} + } + compactMetricKeys = addUsedKeys(compactMetricKeys, clientBurnKeys) + } + + projectBreakdownLines, projectBreakdownKeys := buildProviderProjectBreakdownLines(snap, innerW, modelMixExpanded) + if len(projectBreakdownLines) > 0 { + sectionsByID[core.DashboardSectionProjectBreakdown] = section{withSectionPadding(projectBreakdownLines)} + } + compactMetricKeys = addUsedKeys(compactMetricKeys, projectBreakdownKeys) + + var toolBurnLines []string + if widget.ShowToolComposition { + var toolBurnKeys map[string]bool + toolBurnLines, toolBurnKeys = buildProviderToolCompositionLines(snap, innerW, modelMixExpanded, widget) + compactMetricKeys = addUsedKeys(compactMetricKeys, toolBurnKeys) + } + + actualToolLines, actualToolKeys := buildActualToolUsageLines(snap, innerW, modelMixExpanded) + compactMetricKeys = addUsedKeys(compactMetricKeys, actualToolKeys) + if len(actualToolLines) > 0 { + sectionsByID[core.DashboardSectionToolUsage] = section{withSectionPadding(actualToolLines)} + } else if len(toolBurnLines) > 0 { + sectionsByID[core.DashboardSectionToolUsage] = section{withSectionPadding(toolBurnLines)} + } + + if widget.ShowMCPUsage { + mcpUsageLines, mcpUsageKeys := buildMCPUsageLines(snap, innerW, modelMixExpanded) + if len(mcpUsageLines) > 0 { + sectionsByID[core.DashboardSectionMCPUsage] = section{withSectionPadding(mcpUsageLines)} + } + compactMetricKeys = addUsedKeys(compactMetricKeys, mcpUsageKeys) + } + + if widget.ShowLanguageComposition { + langBurnLines, langBurnKeys := buildProviderLanguageCompositionLines(snap, innerW, modelMixExpanded) + if len(langBurnLines) > 0 { + sectionsByID[core.DashboardSectionLanguageBurn] = section{withSectionPadding(langBurnLines)} + } + compactMetricKeys = addUsedKeys(compactMetricKeys, langBurnKeys) + } + + if widget.ShowCodeStatsComposition { + codeStatsLines, codeStatsKeys := buildProviderCodeStatsLines(snap, widget, innerW) + if len(codeStatsLines) > 0 { + sectionsByID[core.DashboardSectionCodeStats] = section{withSectionPadding(codeStatsLines)} + } + compactMetricKeys = addUsedKeys(compactMetricKeys, codeStatsKeys) + } + + dailyUsageLines := buildProviderDailyTrendLines(snap, innerW) + if len(dailyUsageLines) > 0 { + sectionsByID[core.DashboardSectionDailyUsage] = section{withSectionPadding(dailyUsageLines)} + } + + upstreamProviderLines, upstreamProviderKeys := buildUpstreamProviderCompositionLines(snap, innerW, modelMixExpanded) + if len(upstreamProviderLines) > 0 { + sectionsByID[core.DashboardSectionUpstreamProviders] = section{withSectionPadding(upstreamProviderLines)} + } + compactMetricKeys = addUsedKeys(compactMetricKeys, upstreamProviderKeys) + + providerBurnLines, providerBurnKeys := buildProviderVendorCompositionLines(snap, innerW, modelMixExpanded) + if len(providerBurnLines) > 0 { + sectionsByID[core.DashboardSectionProviderBurn] = section{withSectionPadding(providerBurnLines)} + } + compactMetricKeys = addUsedKeys(compactMetricKeys, providerBurnKeys) + + var otherLines []string + otherLines = appendOtherGroup(otherLines, compactMetricLines) + + geminiQuotaLines, geminiQuotaKeys := buildGeminiOtherQuotaLines(snap, innerW) + otherLines = appendOtherGroup(otherLines, geminiQuotaLines) + compactMetricKeys = addUsedKeys(compactMetricKeys, geminiQuotaKeys) + + metricLines := m.buildTileMetricLines(snap, widget, innerW, compactMetricKeys) + otherLines = appendOtherGroup(otherLines, metricLines) + + if snap.Message != "" && snap.Status != core.StatusError { + msg := snap.Message + if len(msg) > innerW-3 { + msg = msg[:innerW-6] + "..." + } + otherLines = appendOtherGroup(otherLines, []string{ + lipglossNewItalic(msg), + }) + } + + metaLines := buildTileMetaLines(snap, innerW) + otherLines = appendOtherGroup(otherLines, metaLines) + if len(otherLines) > 0 { + sectionsByID[core.DashboardSectionOtherData] = section{withSectionPadding(otherLines)} + } + + var fullBody []string + for _, sectionID := range widget.EffectiveStandardSectionOrder() { + if sectionID == core.DashboardSectionHeader { + continue + } + sec, ok := sectionsByID[sectionID] + if ok && len(sec.lines) > 0 { + fullBody = append(fullBody, sec.lines...) + continue + } + if m.hideSectionsWithNoData { + continue + } + emptyLines := buildEmptyTileSectionLines(sectionID, widget) + if len(emptyLines) == 0 { + continue + } + fullBody = append(fullBody, withSectionPadding(emptyLines)...) + } + + return fullBody +} + +func lipglossNewItalic(msg string) string { + return lipgloss.NewStyle().Foreground(colorSubtext).Italic(true).Render(msg) +} From 6b1353271b858c8b116b739d547ddd00c66a6f2b Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 18:55:37 +0100 Subject: [PATCH 25/32] refactor: tighten account config docs and trim config test setup --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 6 +- internal/config/config_test.go | 67 ++----------------- internal/config/test_helpers_test.go | 27 ++++++++ internal/core/provider.go | 18 ++--- 4 files changed, 46 insertions(+), 72 deletions(-) create mode 100644 internal/config/test_helpers_test.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index ae7fdaa..4f52045 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -67,19 +67,21 @@ This table captures every issue found in this pass. It is broad and high-signal, | R47 | Fixed | Claude Code conversation aggregation split | `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/conversation_usage.go` | Claude Code's JSONL conversation aggregation, block-window estimation, and local tool/session projection no longer live inline with provider setup and API plumbing. The main provider file is now mostly provider wiring and API-side flow. | Keep future conversation-record projections in the dedicated conversation unit. | | R48 | Fixed | Tile render-path derivation caching | `internal/tui/model.go`, `internal/tui/model_input.go`, `internal/tui/tiles.go`, `internal/tui/tiles_cache.go` | Tile body derivation is now cached per snapshot/update state instead of rebuilding the full composition section stack on every render frame. Dynamic header and reset animation still render live, while static body composition is reused until snapshots or size change. | Apply the same pattern selectively to detail/analytics only where profiling or repeated drift justifies it. | | R49 | Fixed | Settings modal preview-data split | `internal/tui/settings_modal.go`, `internal/tui/settings_modal_preview.go` | The large preview snapshot fixture for widget-section configuration moved out of the main settings modal behavior file, reducing render/input coupling inside `settings_modal.go`. | Continue moving purely preview/demo helpers out of modal behavior files. | +| R50 | Fixed | Account-config contract comments aligned with runtime | `internal/core/provider.go` | `AccountConfig` comments no longer claim that `Binary` and `BaseURL` are valid primary homes for provider-local data paths. The type now documents the actual runtime contract: provider-local paths belong in `Paths`, with legacy compatibility handled inside provider packages. | A typed runtime-hints structure is still the next hardening step. | +| R51 | Fixed | Config test file helper extraction | `internal/config/config_test.go`, `internal/config/test_helpers_test.go` | Repeated `settings.json` temp-file creation/loading boilerplate in the config test suite now goes through shared helpers for the common cases, shrinking some of the easiest-to-repeat fixture noise. | Continue the same pattern in the remaining large test files and higher-noise config cases. | ## Action Table | ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | | --- | --- | --- | --- | --- | --- | --- | -| A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire path-related legacy comments/compatibility in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | +| A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow and the comments now match that behavior, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire the remaining compatibility shape in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | | A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/tui/settings_modal_preview.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, tile-body composition is cached, and settings preview/layout pieces are separated, but TUI state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | | A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Composition bars, provider tile fallback/rate-limit selection, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | | A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go` | Cursor, OpenRouter, Codex, Copilot, and Claude Code are now materially decomposed, but several providers still combine large parsing/projection flows in very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | -| A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. They are valuable but expensive to navigate and update. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | +| A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. The config suite now has basic shared file helpers, but the larger provider/telemetry suites still carry too much duplicated fixture setup. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | | A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/tui/settings_modal_preview.go`, `internal/tui/tiles_composition.go` | TUI logic is split across more focused files now, but several files are still individually large and still mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | | A15 | P3 | Performance optimization follow-through in render path | `internal/tui/model.go`, `internal/tui/tiles.go`, `internal/tui/tiles_cache.go`, `internal/tui/detail.go`, `internal/tui/analytics.go` | Tile body composition is now cached per snapshot/update state, but detail and analytics still rebuild some derived structures on each render path. | Extend caching only to the remaining high-cost detail/analytics derivations if profiling or repeated churn justifies it. | Lower render cost without over-caching the whole UI. | diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 88e50c0..3779619 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -61,9 +61,6 @@ func TestLoadFrom_MissingFile(t *testing.T) { } func TestLoadFrom_ValidFile(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "settings.json") - content := `{ "ui": { "refresh_interval_seconds": 10, @@ -87,14 +84,7 @@ func TestLoadFrom_ValidFile(t *testing.T) { } ] }` - if err := os.WriteFile(path, []byte(content), 0o644); err != nil { - t.Fatalf("writing test config: %v", err) - } - - cfg, err := LoadFrom(path) - if err != nil { - t.Fatalf("LoadFrom() error: %v", err) - } + cfg := loadConfigJSON(t, content) if cfg.UI.RefreshIntervalSeconds != 10 { t.Errorf("refresh = %d, want 10", cfg.UI.RefreshIntervalSeconds) @@ -120,14 +110,7 @@ func TestLoadFrom_ValidFile(t *testing.T) { } func TestLoadFrom_InvalidJSON(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "settings.json") - - if err := os.WriteFile(path, []byte(`{not json`), 0o644); err != nil { - t.Fatal(err) - } - - cfg, err := LoadFrom(path) + cfg, err := LoadFrom(writeSettingsJSON(t, `{not json`)) if err == nil { t.Fatal("expected error for invalid JSON") } @@ -137,36 +120,14 @@ func TestLoadFrom_InvalidJSON(t *testing.T) { } func TestLoadFrom_EmptyThemeFallsBackToDefault(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "settings.json") - - data := []byte(`{"theme":"","experimental":{"analytics":true}}`) - if err := os.WriteFile(path, data, 0o644); err != nil { - t.Fatal(err) - } - - cfg, err := LoadFrom(path) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + cfg := loadConfigJSON(t, `{"theme":"","experimental":{"analytics":true}}`) if cfg.Theme != "Gruvbox" { t.Errorf("expected default theme for empty string, got %q", cfg.Theme) } } func TestLoadFrom_ZeroThresholdsGetDefaults(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "settings.json") - - data := []byte(`{"ui":{"refresh_interval_seconds":0,"warn_threshold":0,"crit_threshold":0}}`) - if err := os.WriteFile(path, data, 0o644); err != nil { - t.Fatal(err) - } - - cfg, err := LoadFrom(path) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + cfg := loadConfigJSON(t, `{"ui":{"refresh_interval_seconds":0,"warn_threshold":0,"crit_threshold":0}}`) if cfg.UI.RefreshIntervalSeconds != 30 { t.Errorf("refresh = %d, want 30 (default for zero)", cfg.UI.RefreshIntervalSeconds) } @@ -729,30 +690,14 @@ func TestSaveDashboardHideSectionsWithNoDataTo(t *testing.T) { } func TestLoadFrom_DashboardViewTabs(t *testing.T) { - path := filepath.Join(t.TempDir(), "settings.json") - if err := os.WriteFile(path, []byte(`{"dashboard":{"view":"tabs"}}`), 0o644); err != nil { - t.Fatal(err) - } - - cfg, err := LoadFrom(path) - if err != nil { - t.Fatal(err) - } + cfg := loadConfigJSON(t, `{"dashboard":{"view":"tabs"}}`) if cfg.Dashboard.View != DashboardViewTabs { t.Errorf("dashboard.view = %q, want %q", cfg.Dashboard.View, DashboardViewTabs) } } func TestLoadFrom_DashboardLegacyListMapsToSplit(t *testing.T) { - path := filepath.Join(t.TempDir(), "settings.json") - if err := os.WriteFile(path, []byte(`{"dashboard":{"view":"list"}}`), 0o644); err != nil { - t.Fatal(err) - } - - cfg, err := LoadFrom(path) - if err != nil { - t.Fatal(err) - } + cfg := loadConfigJSON(t, `{"dashboard":{"view":"list"}}`) if cfg.Dashboard.View != DashboardViewSplit { t.Errorf("dashboard.view = %q, want %q", cfg.Dashboard.View, DashboardViewSplit) } diff --git a/internal/config/test_helpers_test.go b/internal/config/test_helpers_test.go new file mode 100644 index 0000000..7be1229 --- /dev/null +++ b/internal/config/test_helpers_test.go @@ -0,0 +1,27 @@ +package config + +import ( + "os" + "path/filepath" + "testing" +) + +func writeSettingsJSON(t *testing.T, content string) string { + t.Helper() + + path := filepath.Join(t.TempDir(), "settings.json") + if err := os.WriteFile(path, []byte(content), 0o644); err != nil { + t.Fatalf("write settings.json: %v", err) + } + return path +} + +func loadConfigJSON(t *testing.T, content string) Config { + t.Helper() + + cfg, err := LoadFrom(writeSettingsJSON(t, content)) + if err != nil { + t.Fatalf("LoadFrom() error: %v", err) + } + return cfg +} diff --git a/internal/core/provider.go b/internal/core/provider.go index aee5267..1bf01dd 100644 --- a/internal/core/provider.go +++ b/internal/core/provider.go @@ -13,19 +13,19 @@ type AccountConfig struct { APIKeyEnv string `json:"api_key_env,omitempty"` // env var name holding the API key ProbeModel string `json:"probe_model,omitempty"` // model to use for probe requests - // Binary stores a CLI binary path (copilot, gemini_cli) or a primary data - // file path (cursor tracking DB, claude_code stats-cache.json). - // Prefer using Paths for new providers. + // Binary stores a CLI binary path for providers that execute a local command. + // Provider-specific local data paths belong in Paths. Legacy Binary-based + // data-path compatibility is handled inside the affected provider packages. Binary string `json:"binary,omitempty"` - // BaseURL stores an API base URL (openrouter, codex, ollama) or a secondary - // data file path (cursor state.vscdb, claude_code .claude.json). - // Prefer using Paths for new providers. + // BaseURL stores an HTTP API base URL for providers with configurable + // endpoints. Provider-specific local data paths belong in Paths. Legacy + // BaseURL-based data-path compatibility is handled inside provider packages. BaseURL string `json:"base_url,omitempty"` - // Paths holds named provider-specific paths/URLs, replacing the overloaded - // Binary and BaseURL fields. Keys are provider-defined (e.g. "tracking_db", - // "state_db", "stats_cache", "account_config"). + // Paths holds named provider-specific paths/URLs that are not part of the + // shared account contract. Keys are provider-defined (for example + // "tracking_db", "state_db", "stats_cache", "account_config"). Paths map[string]string `json:"paths,omitempty"` Token string `json:"-"` // runtime-only: access token (never persisted) From ed4f0c1290aa51a8fa7eee118df23ba36f0dafb7 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 21:10:32 +0100 Subject: [PATCH 26/32] refactor: split gemini session parsing and tighten context seams --- cmd/openusage/dashboard.go | 8 +- internal/core/analytics_normalize.go | 8 +- internal/core/usage_breakdowns.go | 747 +-------- internal/core/usage_breakdowns_domains.go | 735 +++++++++ internal/daemon/server_http.go | 4 +- internal/daemon/server_read_model.go | 5 +- internal/dashboardapp/service.go | 17 +- internal/providers/gemini_cli/gemini_cli.go | 1340 ---------------- .../providers/gemini_cli/session_usage.go | 1344 +++++++++++++++++ internal/telemetry/provider_links.go | 9 +- internal/telemetry/usage_view_materialize.go | 4 +- internal/tui/analytics_data.go | 6 +- 12 files changed, 2115 insertions(+), 2112 deletions(-) create mode 100644 internal/core/usage_breakdowns_domains.go create mode 100644 internal/providers/gemini_cli/session_usage.go diff --git a/cmd/openusage/dashboard.go b/cmd/openusage/dashboard.go index c79d5e2..70797b2 100644 --- a/cmd/openusage/dashboard.go +++ b/cmd/openusage/dashboard.go @@ -32,6 +32,9 @@ func runDashboard(cfg config.Config) { timeWindow := core.ParseTimeWindow(cfg.Data.TimeWindow) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + model := tui.NewModel( cfg.UI.WarnThreshold, cfg.UI.CritThreshold, @@ -40,7 +43,7 @@ func runDashboard(cfg config.Config) { cachedAccounts, timeWindow, ) - model.SetServices(dashboardapp.NewService()) + model.SetServices(dashboardapp.NewService(ctx)) socketPath := daemon.ResolveSocketPath() @@ -51,9 +54,6 @@ func runDashboard(cfg config.Config) { ) viewRuntime.SetTimeWindow(timeWindow) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - var program *tea.Program dispatcher := &snapshotDispatcher{} diff --git a/internal/core/analytics_normalize.go b/internal/core/analytics_normalize.go index 1054784..6cd1620 100644 --- a/internal/core/analytics_normalize.go +++ b/internal/core/analytics_normalize.go @@ -1,11 +1,10 @@ package core import ( - "sort" + "maps" + "slices" "strings" "time" - - "github.com/samber/lo" ) func normalizeAnalyticsDailySeries(s *UsageSnapshot) { @@ -150,8 +149,7 @@ func normalizeSeriesPoints(points []TimePoint) []TimePoint { } agg[date] += p.Value } - keys := lo.Keys(agg) - sort.Strings(keys) + keys := slices.Sorted(maps.Keys(agg)) out := make([]TimePoint, 0, len(keys)) for _, k := range keys { out = append(out, TimePoint{Date: k, Value: agg[k]}) diff --git a/internal/core/usage_breakdowns.go b/internal/core/usage_breakdowns.go index 86c0d37..038b23a 100644 --- a/internal/core/usage_breakdowns.go +++ b/internal/core/usage_breakdowns.go @@ -191,735 +191,6 @@ func ExtractMCPUsage(s UsageSnapshot) ([]MCPServerUsageEntry, map[string]bool) { return out, usedKeys } -func ExtractProjectUsage(s UsageSnapshot) ([]ProjectUsageEntry, map[string]bool) { - byProject := make(map[string]*ProjectUsageEntry) - usedKeys := make(map[string]bool) - seriesByProject := make(map[string]map[string]float64) - - ensure := func(name string) *ProjectUsageEntry { - if _, ok := byProject[name]; !ok { - byProject[name] = &ProjectUsageEntry{Name: name} - } - return byProject[name] - } - - for key, metric := range s.Metrics { - if metric.Used == nil { - continue - } - name, field, ok := parseProjectMetricKey(key) - if !ok { - continue - } - project := ensure(name) - switch field { - case "requests": - project.Requests = *metric.Used - case "requests_today": - project.Requests1d = *metric.Used - } - usedKeys[key] = true - } - - for key, points := range s.DailySeries { - if !strings.HasPrefix(key, "usage_project_") { - continue - } - name := strings.TrimSpace(strings.TrimPrefix(key, "usage_project_")) - if name == "" || len(points) == 0 { - continue - } - mergeBreakdownSeriesByDay(seriesByProject, name, points) - } - - for name, pointsByDay := range seriesByProject { - project := ensure(name) - project.Series = breakdownSortedSeries(pointsByDay) - if project.Requests <= 0 { - project.Requests = sumBreakdownSeries(project.Series) - } - } - - out := make([]ProjectUsageEntry, 0, len(byProject)) - for _, project := range byProject { - if project.Requests <= 0 && len(project.Series) == 0 { - continue - } - out = append(out, *project) - } - sort.Slice(out, func(i, j int) bool { - if out[i].Requests != out[j].Requests { - return out[i].Requests > out[j].Requests - } - return out[i].Name < out[j].Name - }) - return out, usedKeys -} - -func ExtractModelBreakdown(s UsageSnapshot) ([]ModelBreakdownEntry, map[string]bool) { - type agg struct { - cost float64 - input float64 - output float64 - requests float64 - requests1d float64 - series []TimePoint - } - byModel := make(map[string]*agg) - usedKeys := make(map[string]bool) - - ensure := func(name string) *agg { - if _, ok := byModel[name]; !ok { - byModel[name] = &agg{} - } - return byModel[name] - } - - recordInput := func(name string, value float64, key string) { - ensure(name).input += value - usedKeys[key] = true - } - recordOutput := func(name string, value float64, key string) { - ensure(name).output += value - usedKeys[key] = true - } - recordCost := func(name string, value float64, key string) { - ensure(name).cost += value - usedKeys[key] = true - } - recordRequests := func(name string, value float64, key string) { - ensure(name).requests += value - usedKeys[key] = true - } - recordRequests1d := func(name string, value float64, key string) { - ensure(name).requests1d += value - usedKeys[key] = true - } - - for key, metric := range s.Metrics { - if metric.Used == nil { - continue - } - switch { - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_requests_today"): - recordRequests1d(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_requests_today"), *metric.Used, key) - case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_requests"): - recordRequests(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_requests"), *metric.Used, key) - default: - rawModel, kind, ok := parseModelMetricKey(key) - if !ok { - continue - } - switch kind { - case modelMetricInput: - recordInput(rawModel, *metric.Used, key) - case modelMetricOutput: - recordOutput(rawModel, *metric.Used, key) - case modelMetricCostUSD: - recordCost(rawModel, *metric.Used, key) - } - } - } - - for key, points := range s.DailySeries { - if !strings.HasPrefix(key, "usage_model_") || len(points) == 0 { - continue - } - name := strings.TrimSpace(strings.TrimPrefix(key, "usage_model_")) - if name == "" { - continue - } - entry := ensure(name) - entry.series = points - if entry.requests <= 0 { - entry.requests = sumBreakdownSeries(points) - } - } - - out := make([]ModelBreakdownEntry, 0, len(byModel)) - for name, entry := range byModel { - if entry.cost <= 0 && entry.input <= 0 && entry.output <= 0 && entry.requests <= 0 && len(entry.series) == 0 { - continue - } - out = append(out, ModelBreakdownEntry{ - Name: name, - Cost: entry.cost, - Input: entry.input, - Output: entry.output, - Requests: entry.requests, - Requests1d: entry.requests1d, - Series: entry.series, - }) - } - sort.Slice(out, func(i, j int) bool { - ti := out[i].Input + out[i].Output - tj := out[j].Input + out[j].Output - if ti != tj { - return ti > tj - } - if out[i].Cost != out[j].Cost { - return out[i].Cost > out[j].Cost - } - if out[i].Requests != out[j].Requests { - return out[i].Requests > out[j].Requests - } - return out[i].Name < out[j].Name - }) - return out, usedKeys -} - -func ExtractProviderBreakdown(s UsageSnapshot) ([]ProviderBreakdownEntry, map[string]bool) { - type agg struct { - cost float64 - input float64 - output float64 - requests float64 - } - type fieldState struct { - cost bool - input bool - output bool - requests bool - } - byProvider := make(map[string]*agg) - usedKeys := make(map[string]bool) - fieldsByProvider := make(map[string]*fieldState) - - ensure := func(name string) *agg { - if _, ok := byProvider[name]; !ok { - byProvider[name] = &agg{} - } - return byProvider[name] - } - ensureFields := func(name string) *fieldState { - if _, ok := fieldsByProvider[name]; !ok { - fieldsByProvider[name] = &fieldState{} - } - return fieldsByProvider[name] - } - recordCost := func(name string, value float64, key string) { - ensure(name).cost += value - ensureFields(name).cost = true - usedKeys[key] = true - } - recordInput := func(name string, value float64, key string) { - ensure(name).input += value - ensureFields(name).input = true - usedKeys[key] = true - } - recordOutput := func(name string, value float64, key string) { - ensure(name).output += value - ensureFields(name).output = true - usedKeys[key] = true - } - recordRequests := func(name string, value float64, key string) { - ensure(name).requests += value - ensureFields(name).requests = true - usedKeys[key] = true - } - - for key, metric := range s.Metrics { - if metric.Used == nil || !strings.HasPrefix(key, "provider_") { - continue - } - switch { - case strings.HasSuffix(key, "_cost_usd"): - recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost_usd"), *metric.Used, key) - case strings.HasSuffix(key, "_cost") && !strings.HasSuffix(key, "_byok_cost"): - recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost"), *metric.Used, key) - case strings.HasSuffix(key, "_input_tokens"): - recordInput(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_input_tokens"), *metric.Used, key) - case strings.HasSuffix(key, "_output_tokens"): - recordOutput(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_output_tokens"), *metric.Used, key) - case strings.HasSuffix(key, "_requests"): - recordRequests(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_requests"), *metric.Used, key) - } - } - for key, metric := range s.Metrics { - if metric.Used == nil || !strings.HasPrefix(key, "provider_") || !strings.HasSuffix(key, "_byok_cost") { - continue - } - base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_byok_cost") - if base == "" || ensureFields(base).cost { - continue - } - recordCost(base, *metric.Used, key) - } - - meta := snapshotBreakdownMetaEntries(s) - for key, raw := range meta { - if usedKeys[key] || !strings.HasPrefix(key, "provider_") { - continue - } - switch { - case strings.HasSuffix(key, "_cost") && !strings.HasSuffix(key, "_byok_cost"): - value, ok := parseBreakdownNumeric(raw) - if !ok { - continue - } - base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost") - if base == "" || ensureFields(base).cost { - continue - } - recordCost(base, value, key) - case strings.HasSuffix(key, "_input_tokens"), strings.HasSuffix(key, "_prompt_tokens"): - value, ok := parseBreakdownNumeric(raw) - if !ok { - continue - } - base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_input_tokens") - base = strings.TrimSuffix(base, "_prompt_tokens") - if base == "" || ensureFields(base).input { - continue - } - recordInput(base, value, key) - case strings.HasSuffix(key, "_output_tokens"), strings.HasSuffix(key, "_completion_tokens"): - value, ok := parseBreakdownNumeric(raw) - if !ok { - continue - } - base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_output_tokens") - base = strings.TrimSuffix(base, "_completion_tokens") - if base == "" || ensureFields(base).output { - continue - } - recordOutput(base, value, key) - case strings.HasSuffix(key, "_requests"): - value, ok := parseBreakdownNumeric(raw) - if !ok { - continue - } - base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_requests") - if base == "" || ensureFields(base).requests { - continue - } - recordRequests(base, value, key) - } - } - for key, raw := range meta { - if usedKeys[key] || !strings.HasPrefix(key, "provider_") || !strings.HasSuffix(key, "_byok_cost") { - continue - } - value, ok := parseBreakdownNumeric(raw) - if !ok { - continue - } - base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_byok_cost") - if base == "" || ensureFields(base).cost { - continue - } - recordCost(base, value, key) - } - - out := make([]ProviderBreakdownEntry, 0, len(byProvider)) - for name, entry := range byProvider { - if entry.cost <= 0 && entry.input <= 0 && entry.output <= 0 && entry.requests <= 0 { - continue - } - out = append(out, ProviderBreakdownEntry{ - Name: name, - Cost: entry.cost, - Input: entry.input, - Output: entry.output, - Requests: entry.requests, - }) - } - sort.Slice(out, func(i, j int) bool { - ti := out[i].Input + out[i].Output - tj := out[j].Input + out[j].Output - if ti != tj { - return ti > tj - } - if out[i].Cost != out[j].Cost { - return out[i].Cost > out[j].Cost - } - if out[i].Requests != out[j].Requests { - return out[i].Requests > out[j].Requests - } - return out[i].Name < out[j].Name - }) - return out, usedKeys -} - -func ExtractUpstreamProviderBreakdown(s UsageSnapshot) ([]ProviderBreakdownEntry, map[string]bool) { - type agg struct { - cost float64 - input float64 - output float64 - requests float64 - } - byProvider := make(map[string]*agg) - usedKeys := make(map[string]bool) - - ensure := func(name string) *agg { - if _, ok := byProvider[name]; !ok { - byProvider[name] = &agg{} - } - return byProvider[name] - } - - for key, metric := range s.Metrics { - if metric.Used == nil || !strings.HasPrefix(key, "upstream_") { - continue - } - switch { - case strings.HasSuffix(key, "_cost_usd"): - ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_cost_usd")).cost += *metric.Used - usedKeys[key] = true - case strings.HasSuffix(key, "_input_tokens"): - ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_input_tokens")).input += *metric.Used - usedKeys[key] = true - case strings.HasSuffix(key, "_output_tokens"): - ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_output_tokens")).output += *metric.Used - usedKeys[key] = true - case strings.HasSuffix(key, "_requests"): - ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_requests")).requests += *metric.Used - usedKeys[key] = true - } - } - - out := make([]ProviderBreakdownEntry, 0, len(byProvider)) - for name, entry := range byProvider { - out = append(out, ProviderBreakdownEntry{ - Name: name, - Cost: entry.cost, - Input: entry.input, - Output: entry.output, - Requests: entry.requests, - }) - } - sort.Slice(out, func(i, j int) bool { - ti := out[i].Input + out[i].Output - tj := out[j].Input + out[j].Output - if ti != tj { - return ti > tj - } - if out[i].Requests != out[j].Requests { - return out[i].Requests > out[j].Requests - } - return out[i].Name < out[j].Name - }) - if len(out) == 0 { - return nil, nil - } - return out, usedKeys -} - -func ExtractClientBreakdown(s UsageSnapshot) ([]ClientBreakdownEntry, map[string]bool) { - byClient := make(map[string]*ClientBreakdownEntry) - usedKeys := make(map[string]bool) - tokenSeriesByClient := make(map[string]map[string]float64) - usageClientSeriesByClient := make(map[string]map[string]float64) - usageSourceSeriesByClient := make(map[string]map[string]float64) - hasAllTimeRequests := make(map[string]bool) - requestsTodayFallback := make(map[string]float64) - hasAnyClientMetrics := false - - ensure := func(name string) *ClientBreakdownEntry { - if _, ok := byClient[name]; !ok { - byClient[name] = &ClientBreakdownEntry{Name: name} - } - return byClient[name] - } - - for key, metric := range s.Metrics { - if metric.Used == nil { - continue - } - if strings.HasPrefix(key, "client_") { - name, field, ok := parseClientMetricKey(key) - if !ok { - continue - } - name = canonicalizeClientBucket(name) - hasAnyClientMetrics = true - client := ensure(name) - switch field { - case "total_tokens": - client.Total = *metric.Used - case "input_tokens": - client.Input = *metric.Used - case "output_tokens": - client.Output = *metric.Used - case "cached_tokens": - client.Cached = *metric.Used - case "reasoning_tokens": - client.Reasoning = *metric.Used - case "requests": - client.Requests = *metric.Used - hasAllTimeRequests[name] = true - case "sessions": - client.Sessions = *metric.Used - } - usedKeys[key] = true - continue - } - if strings.HasPrefix(key, "source_") { - sourceName, field, ok := parseSourceMetricKey(key) - if !ok { - continue - } - clientName := canonicalizeClientBucket(sourceName) - client := ensure(clientName) - switch field { - case "requests": - client.Requests += *metric.Used - hasAllTimeRequests[clientName] = true - case "requests_today": - requestsTodayFallback[clientName] += *metric.Used - } - usedKeys[key] = true - } - } - - for clientName, value := range requestsTodayFallback { - if hasAllTimeRequests[clientName] { - continue - } - client := ensure(clientName) - if client.Requests <= 0 { - client.Requests = value - } - } - - hasAnyClientSeries := false - for key := range s.DailySeries { - if strings.HasPrefix(key, "tokens_client_") || strings.HasPrefix(key, "usage_client_") { - hasAnyClientSeries = true - break - } - } - - for key, points := range s.DailySeries { - if len(points) == 0 { - continue - } - switch { - case strings.HasPrefix(key, "tokens_client_"): - name := canonicalizeClientBucket(strings.TrimPrefix(key, "tokens_client_")) - if name == "" { - continue - } - mergeBreakdownSeriesByDay(tokenSeriesByClient, name, points) - case strings.HasPrefix(key, "usage_client_"): - name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_client_")) - if name == "" { - continue - } - mergeBreakdownSeriesByDay(usageClientSeriesByClient, name, points) - case strings.HasPrefix(key, "usage_source_"): - if hasAnyClientMetrics || hasAnyClientSeries { - continue - } - name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_source_")) - if name == "" { - continue - } - mergeBreakdownSeriesByDay(usageSourceSeriesByClient, name, points) - } - } - - for name, pointsByDay := range tokenSeriesByClient { - client := ensure(name) - client.Series = breakdownSortedSeries(pointsByDay) - client.SeriesKind = "tokens" - if client.Total <= 0 { - client.Total = sumBreakdownSeries(client.Series) - } - } - for name, pointsByDay := range usageClientSeriesByClient { - client := ensure(name) - if client.SeriesKind == "tokens" { - continue - } - client.Series = breakdownSortedSeries(pointsByDay) - client.SeriesKind = "requests" - if client.Requests <= 0 { - client.Requests = sumBreakdownSeries(client.Series) - } - } - for name, pointsByDay := range usageSourceSeriesByClient { - client := ensure(name) - if client.SeriesKind != "" { - continue - } - client.Series = breakdownSortedSeries(pointsByDay) - client.SeriesKind = "requests" - if client.Requests <= 0 { - client.Requests = sumBreakdownSeries(client.Series) - } - } - - out := make([]ClientBreakdownEntry, 0, len(byClient)) - for _, client := range byClient { - if breakdownClientValue(*client) <= 0 && client.Sessions <= 0 && client.Requests <= 0 && len(client.Series) == 0 { - continue - } - out = append(out, *client) - } - sort.Slice(out, func(i, j int) bool { - vi := breakdownClientTokenValue(out[i]) - vj := breakdownClientTokenValue(out[j]) - if vi != vj { - return vi > vj - } - if out[i].Requests != out[j].Requests { - return out[i].Requests > out[j].Requests - } - if out[i].Sessions != out[j].Sessions { - return out[i].Sessions > out[j].Sessions - } - return out[i].Name < out[j].Name - }) - return out, usedKeys -} - -func ExtractInterfaceClientBreakdown(s UsageSnapshot) ([]ClientBreakdownEntry, map[string]bool) { - byName := make(map[string]*ClientBreakdownEntry) - usedKeys := make(map[string]bool) - usageSeriesByName := make(map[string]map[string]float64) - - ensure := func(name string) *ClientBreakdownEntry { - if _, ok := byName[name]; !ok { - byName[name] = &ClientBreakdownEntry{Name: name} - } - return byName[name] - } - - for key, metric := range s.Metrics { - if metric.Used == nil || !strings.HasPrefix(key, "interface_") { - continue - } - name := canonicalizeClientBucket(strings.TrimPrefix(key, "interface_")) - if name == "" { - continue - } - ensure(name).Requests += *metric.Used - usedKeys[key] = true - } - - for key, points := range s.DailySeries { - if len(points) == 0 { - continue - } - switch { - case strings.HasPrefix(key, "usage_client_"): - name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_client_")) - if name != "" { - mergeBreakdownSeriesByDay(usageSeriesByName, name, points) - } - case strings.HasPrefix(key, "usage_source_"): - name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_source_")) - if name != "" { - mergeBreakdownSeriesByDay(usageSeriesByName, name, points) - } - } - } - - for name, pointsByDay := range usageSeriesByName { - entry := ensure(name) - entry.Series = breakdownSortedSeries(pointsByDay) - entry.SeriesKind = "requests" - if entry.Requests <= 0 { - entry.Requests = sumBreakdownSeries(entry.Series) - } - } - - out := make([]ClientBreakdownEntry, 0, len(byName)) - for _, entry := range byName { - if entry.Requests <= 0 && len(entry.Series) == 0 { - continue - } - out = append(out, *entry) - } - sort.Slice(out, func(i, j int) bool { - if out[i].Requests != out[j].Requests { - return out[i].Requests > out[j].Requests - } - return out[i].Name < out[j].Name - }) - if len(out) == 0 { - return nil, nil - } - return out, usedKeys -} - -var actualToolAggregateKeys = map[string]bool{ - "tool_calls_total": true, - "tool_completed": true, - "tool_errored": true, - "tool_cancelled": true, - "tool_success_rate": true, -} - -func ExtractActualToolUsage(s UsageSnapshot) ([]ActualToolUsageEntry, map[string]bool) { - byTool := make(map[string]float64) - usedKeys := make(map[string]bool) - - for key, metric := range s.Metrics { - if metric.Used == nil { - continue - } - if !strings.HasPrefix(key, "tool_") { - continue - } - if actualToolAggregateKeys[key] { - usedKeys[key] = true - continue - } - if strings.HasSuffix(key, "_today") || strings.HasSuffix(key, "_1d") || strings.HasSuffix(key, "_7d") || strings.HasSuffix(key, "_30d") { - usedKeys[key] = true - continue - } - name := strings.TrimPrefix(key, "tool_") - if name == "" { - continue - } - if IsMCPToolMetricName(name) { - usedKeys[key] = true - continue - } - byTool[name] += *metric.Used - usedKeys[key] = true - } - - if len(byTool) == 0 { - return nil, usedKeys - } - - out := make([]ActualToolUsageEntry, 0, len(byTool)) - for name, calls := range byTool { - if calls <= 0 { - continue - } - out = append(out, ActualToolUsageEntry{ - RawName: name, - Calls: calls, - }) - } - sort.Slice(out, func(i, j int) bool { - if out[i].Calls != out[j].Calls { - return out[i].Calls > out[j].Calls - } - return out[i].RawName < out[j].RawName - }) - return out, usedKeys -} - -func IsMCPToolMetricName(name string) bool { - normalized := strings.ToLower(strings.TrimSpace(name)) - if normalized == "" { - return false - } - if strings.HasPrefix(normalized, "mcp_") { - return true - } - if strings.Contains(normalized, "_mcp_server_") || strings.Contains(normalized, "-mcp-server-") { - return true - } - return strings.HasSuffix(normalized, "_mcp") -} - func parseProjectMetricKey(key string) (name, field string, ok bool) { const prefix = "project_" if !strings.HasPrefix(key, prefix) { @@ -951,23 +222,7 @@ func mergeBreakdownSeriesByDay(seriesByName map[string]map[string]float64, name } func breakdownSortedSeries(pointsByDay map[string]float64) []TimePoint { - if len(pointsByDay) == 0 { - return nil - } - days := make([]string, 0, len(pointsByDay)) - for day := range pointsByDay { - days = append(days, day) - } - sort.Strings(days) - - points := make([]TimePoint, 0, len(days)) - for _, day := range days { - points = append(points, TimePoint{ - Date: day, - Value: pointsByDay[day], - }) - } - return points + return SortedTimePoints(pointsByDay) } func sumBreakdownSeries(points []TimePoint) float64 { diff --git a/internal/core/usage_breakdowns_domains.go b/internal/core/usage_breakdowns_domains.go new file mode 100644 index 0000000..bdc502c --- /dev/null +++ b/internal/core/usage_breakdowns_domains.go @@ -0,0 +1,735 @@ +package core + +import ( + "sort" + "strings" +) + +func ExtractProjectUsage(s UsageSnapshot) ([]ProjectUsageEntry, map[string]bool) { + byProject := make(map[string]*ProjectUsageEntry) + usedKeys := make(map[string]bool) + seriesByProject := make(map[string]map[string]float64) + + ensure := func(name string) *ProjectUsageEntry { + if _, ok := byProject[name]; !ok { + byProject[name] = &ProjectUsageEntry{Name: name} + } + return byProject[name] + } + + for key, metric := range s.Metrics { + if metric.Used == nil { + continue + } + name, field, ok := parseProjectMetricKey(key) + if !ok { + continue + } + project := ensure(name) + switch field { + case "requests": + project.Requests = *metric.Used + case "requests_today": + project.Requests1d = *metric.Used + } + usedKeys[key] = true + } + + for key, points := range s.DailySeries { + if !strings.HasPrefix(key, "usage_project_") { + continue + } + name := strings.TrimSpace(strings.TrimPrefix(key, "usage_project_")) + if name == "" || len(points) == 0 { + continue + } + mergeBreakdownSeriesByDay(seriesByProject, name, points) + } + + for name, pointsByDay := range seriesByProject { + project := ensure(name) + project.Series = breakdownSortedSeries(pointsByDay) + if project.Requests <= 0 { + project.Requests = sumBreakdownSeries(project.Series) + } + } + + out := make([]ProjectUsageEntry, 0, len(byProject)) + for _, project := range byProject { + if project.Requests <= 0 && len(project.Series) == 0 { + continue + } + out = append(out, *project) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func ExtractModelBreakdown(s UsageSnapshot) ([]ModelBreakdownEntry, map[string]bool) { + type agg struct { + cost float64 + input float64 + output float64 + requests float64 + requests1d float64 + series []TimePoint + } + byModel := make(map[string]*agg) + usedKeys := make(map[string]bool) + + ensure := func(name string) *agg { + if _, ok := byModel[name]; !ok { + byModel[name] = &agg{} + } + return byModel[name] + } + + recordInput := func(name string, value float64, key string) { + ensure(name).input += value + usedKeys[key] = true + } + recordOutput := func(name string, value float64, key string) { + ensure(name).output += value + usedKeys[key] = true + } + recordCost := func(name string, value float64, key string) { + ensure(name).cost += value + usedKeys[key] = true + } + recordRequests := func(name string, value float64, key string) { + ensure(name).requests += value + usedKeys[key] = true + } + recordRequests1d := func(name string, value float64, key string) { + ensure(name).requests1d += value + usedKeys[key] = true + } + + for key, metric := range s.Metrics { + if metric.Used == nil { + continue + } + switch { + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_requests_today"): + recordRequests1d(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_requests_today"), *metric.Used, key) + case strings.HasPrefix(key, "model_") && strings.HasSuffix(key, "_requests"): + recordRequests(strings.TrimSuffix(strings.TrimPrefix(key, "model_"), "_requests"), *metric.Used, key) + default: + rawModel, kind, ok := parseModelMetricKey(key) + if !ok { + continue + } + switch kind { + case modelMetricInput: + recordInput(rawModel, *metric.Used, key) + case modelMetricOutput: + recordOutput(rawModel, *metric.Used, key) + case modelMetricCostUSD: + recordCost(rawModel, *metric.Used, key) + } + } + } + + for key, points := range s.DailySeries { + if !strings.HasPrefix(key, "usage_model_") || len(points) == 0 { + continue + } + name := strings.TrimSpace(strings.TrimPrefix(key, "usage_model_")) + if name == "" { + continue + } + entry := ensure(name) + entry.series = points + if entry.requests <= 0 { + entry.requests = sumBreakdownSeries(points) + } + } + + out := make([]ModelBreakdownEntry, 0, len(byModel)) + for name, entry := range byModel { + if entry.cost <= 0 && entry.input <= 0 && entry.output <= 0 && entry.requests <= 0 && len(entry.series) == 0 { + continue + } + out = append(out, ModelBreakdownEntry{ + Name: name, + Cost: entry.cost, + Input: entry.input, + Output: entry.output, + Requests: entry.requests, + Requests1d: entry.requests1d, + Series: entry.series, + }) + } + sort.Slice(out, func(i, j int) bool { + ti := out[i].Input + out[i].Output + tj := out[j].Input + out[j].Output + if ti != tj { + return ti > tj + } + if out[i].Cost != out[j].Cost { + return out[i].Cost > out[j].Cost + } + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func ExtractProviderBreakdown(s UsageSnapshot) ([]ProviderBreakdownEntry, map[string]bool) { + type agg struct { + cost float64 + input float64 + output float64 + requests float64 + } + type fieldState struct { + cost bool + input bool + output bool + requests bool + } + byProvider := make(map[string]*agg) + usedKeys := make(map[string]bool) + fieldsByProvider := make(map[string]*fieldState) + + ensure := func(name string) *agg { + if _, ok := byProvider[name]; !ok { + byProvider[name] = &agg{} + } + return byProvider[name] + } + ensureFields := func(name string) *fieldState { + if _, ok := fieldsByProvider[name]; !ok { + fieldsByProvider[name] = &fieldState{} + } + return fieldsByProvider[name] + } + recordCost := func(name string, value float64, key string) { + ensure(name).cost += value + ensureFields(name).cost = true + usedKeys[key] = true + } + recordInput := func(name string, value float64, key string) { + ensure(name).input += value + ensureFields(name).input = true + usedKeys[key] = true + } + recordOutput := func(name string, value float64, key string) { + ensure(name).output += value + ensureFields(name).output = true + usedKeys[key] = true + } + recordRequests := func(name string, value float64, key string) { + ensure(name).requests += value + ensureFields(name).requests = true + usedKeys[key] = true + } + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "provider_") { + continue + } + switch { + case strings.HasSuffix(key, "_cost_usd"): + recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost_usd"), *metric.Used, key) + case strings.HasSuffix(key, "_cost") && !strings.HasSuffix(key, "_byok_cost"): + recordCost(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost"), *metric.Used, key) + case strings.HasSuffix(key, "_input_tokens"): + recordInput(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_input_tokens"), *metric.Used, key) + case strings.HasSuffix(key, "_output_tokens"): + recordOutput(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_output_tokens"), *metric.Used, key) + case strings.HasSuffix(key, "_requests"): + recordRequests(strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_requests"), *metric.Used, key) + } + } + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "provider_") || !strings.HasSuffix(key, "_byok_cost") { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_byok_cost") + if base == "" || ensureFields(base).cost { + continue + } + recordCost(base, *metric.Used, key) + } + + meta := snapshotBreakdownMetaEntries(s) + for key, raw := range meta { + if usedKeys[key] || !strings.HasPrefix(key, "provider_") { + continue + } + switch { + case strings.HasSuffix(key, "_cost") && !strings.HasSuffix(key, "_byok_cost"): + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_cost") + if base == "" || ensureFields(base).cost { + continue + } + recordCost(base, value, key) + case strings.HasSuffix(key, "_input_tokens"), strings.HasSuffix(key, "_prompt_tokens"): + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_input_tokens") + base = strings.TrimSuffix(base, "_prompt_tokens") + if base == "" || ensureFields(base).input { + continue + } + recordInput(base, value, key) + case strings.HasSuffix(key, "_output_tokens"), strings.HasSuffix(key, "_completion_tokens"): + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_output_tokens") + base = strings.TrimSuffix(base, "_completion_tokens") + if base == "" || ensureFields(base).output { + continue + } + recordOutput(base, value, key) + case strings.HasSuffix(key, "_requests"): + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_requests") + if base == "" || ensureFields(base).requests { + continue + } + recordRequests(base, value, key) + } + } + for key, raw := range meta { + if usedKeys[key] || !strings.HasPrefix(key, "provider_") || !strings.HasSuffix(key, "_byok_cost") { + continue + } + value, ok := parseBreakdownNumeric(raw) + if !ok { + continue + } + base := strings.TrimSuffix(strings.TrimPrefix(key, "provider_"), "_byok_cost") + if base == "" || ensureFields(base).cost { + continue + } + recordCost(base, value, key) + } + + out := make([]ProviderBreakdownEntry, 0, len(byProvider)) + for name, entry := range byProvider { + if entry.cost <= 0 && entry.input <= 0 && entry.output <= 0 && entry.requests <= 0 { + continue + } + out = append(out, ProviderBreakdownEntry{ + Name: name, + Cost: entry.cost, + Input: entry.input, + Output: entry.output, + Requests: entry.requests, + }) + } + sort.Slice(out, func(i, j int) bool { + ti := out[i].Input + out[i].Output + tj := out[j].Input + out[j].Output + if ti != tj { + return ti > tj + } + if out[i].Cost != out[j].Cost { + return out[i].Cost > out[j].Cost + } + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func ExtractUpstreamProviderBreakdown(s UsageSnapshot) ([]ProviderBreakdownEntry, map[string]bool) { + type agg struct { + cost float64 + input float64 + output float64 + requests float64 + } + byProvider := make(map[string]*agg) + usedKeys := make(map[string]bool) + + ensure := func(name string) *agg { + if _, ok := byProvider[name]; !ok { + byProvider[name] = &agg{} + } + return byProvider[name] + } + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "upstream_") { + continue + } + switch { + case strings.HasSuffix(key, "_cost_usd"): + ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_cost_usd")).cost += *metric.Used + usedKeys[key] = true + case strings.HasSuffix(key, "_input_tokens"): + ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_input_tokens")).input += *metric.Used + usedKeys[key] = true + case strings.HasSuffix(key, "_output_tokens"): + ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_output_tokens")).output += *metric.Used + usedKeys[key] = true + case strings.HasSuffix(key, "_requests"): + ensure(strings.TrimSuffix(strings.TrimPrefix(key, "upstream_"), "_requests")).requests += *metric.Used + usedKeys[key] = true + } + } + + out := make([]ProviderBreakdownEntry, 0, len(byProvider)) + for name, entry := range byProvider { + out = append(out, ProviderBreakdownEntry{ + Name: name, + Cost: entry.cost, + Input: entry.input, + Output: entry.output, + Requests: entry.requests, + }) + } + sort.Slice(out, func(i, j int) bool { + ti := out[i].Input + out[i].Output + tj := out[j].Input + out[j].Output + if ti != tj { + return ti > tj + } + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + if len(out) == 0 { + return nil, nil + } + return out, usedKeys +} + +func ExtractClientBreakdown(s UsageSnapshot) ([]ClientBreakdownEntry, map[string]bool) { + byClient := make(map[string]*ClientBreakdownEntry) + usedKeys := make(map[string]bool) + tokenSeriesByClient := make(map[string]map[string]float64) + usageClientSeriesByClient := make(map[string]map[string]float64) + usageSourceSeriesByClient := make(map[string]map[string]float64) + hasAllTimeRequests := make(map[string]bool) + requestsTodayFallback := make(map[string]float64) + hasAnyClientMetrics := false + + ensure := func(name string) *ClientBreakdownEntry { + if _, ok := byClient[name]; !ok { + byClient[name] = &ClientBreakdownEntry{Name: name} + } + return byClient[name] + } + + for key, metric := range s.Metrics { + if metric.Used == nil { + continue + } + if strings.HasPrefix(key, "client_") { + name, field, ok := parseClientMetricKey(key) + if !ok { + continue + } + name = canonicalizeClientBucket(name) + hasAnyClientMetrics = true + client := ensure(name) + switch field { + case "total_tokens": + client.Total = *metric.Used + case "input_tokens": + client.Input = *metric.Used + case "output_tokens": + client.Output = *metric.Used + case "cached_tokens": + client.Cached = *metric.Used + case "reasoning_tokens": + client.Reasoning = *metric.Used + case "requests": + client.Requests = *metric.Used + hasAllTimeRequests[name] = true + case "sessions": + client.Sessions = *metric.Used + } + usedKeys[key] = true + continue + } + if strings.HasPrefix(key, "source_") { + sourceName, field, ok := parseSourceMetricKey(key) + if !ok { + continue + } + clientName := canonicalizeClientBucket(sourceName) + client := ensure(clientName) + switch field { + case "requests": + client.Requests += *metric.Used + hasAllTimeRequests[clientName] = true + case "requests_today": + requestsTodayFallback[clientName] += *metric.Used + } + usedKeys[key] = true + } + } + + for clientName, value := range requestsTodayFallback { + if hasAllTimeRequests[clientName] { + continue + } + client := ensure(clientName) + if client.Requests <= 0 { + client.Requests = value + } + } + + hasAnyClientSeries := false + for key := range s.DailySeries { + if strings.HasPrefix(key, "tokens_client_") || strings.HasPrefix(key, "usage_client_") { + hasAnyClientSeries = true + break + } + } + + for key, points := range s.DailySeries { + if len(points) == 0 { + continue + } + switch { + case strings.HasPrefix(key, "tokens_client_"): + name := canonicalizeClientBucket(strings.TrimPrefix(key, "tokens_client_")) + if name == "" { + continue + } + mergeBreakdownSeriesByDay(tokenSeriesByClient, name, points) + case strings.HasPrefix(key, "usage_client_"): + name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_client_")) + if name == "" { + continue + } + mergeBreakdownSeriesByDay(usageClientSeriesByClient, name, points) + case strings.HasPrefix(key, "usage_source_"): + if hasAnyClientMetrics || hasAnyClientSeries { + continue + } + name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_source_")) + if name == "" { + continue + } + mergeBreakdownSeriesByDay(usageSourceSeriesByClient, name, points) + } + } + + for name, pointsByDay := range tokenSeriesByClient { + client := ensure(name) + client.Series = breakdownSortedSeries(pointsByDay) + client.SeriesKind = "tokens" + if client.Total <= 0 { + client.Total = sumBreakdownSeries(client.Series) + } + } + for name, pointsByDay := range usageClientSeriesByClient { + client := ensure(name) + if client.SeriesKind == "tokens" { + continue + } + client.Series = breakdownSortedSeries(pointsByDay) + client.SeriesKind = "requests" + if client.Requests <= 0 { + client.Requests = sumBreakdownSeries(client.Series) + } + } + for name, pointsByDay := range usageSourceSeriesByClient { + client := ensure(name) + if client.SeriesKind != "" { + continue + } + client.Series = breakdownSortedSeries(pointsByDay) + client.SeriesKind = "requests" + if client.Requests <= 0 { + client.Requests = sumBreakdownSeries(client.Series) + } + } + + out := make([]ClientBreakdownEntry, 0, len(byClient)) + for _, client := range byClient { + if breakdownClientValue(*client) <= 0 && client.Sessions <= 0 && client.Requests <= 0 && len(client.Series) == 0 { + continue + } + out = append(out, *client) + } + sort.Slice(out, func(i, j int) bool { + vi := breakdownClientTokenValue(out[i]) + vj := breakdownClientTokenValue(out[j]) + if vi != vj { + return vi > vj + } + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + if out[i].Sessions != out[j].Sessions { + return out[i].Sessions > out[j].Sessions + } + return out[i].Name < out[j].Name + }) + return out, usedKeys +} + +func ExtractInterfaceClientBreakdown(s UsageSnapshot) ([]ClientBreakdownEntry, map[string]bool) { + byName := make(map[string]*ClientBreakdownEntry) + usedKeys := make(map[string]bool) + usageSeriesByName := make(map[string]map[string]float64) + + ensure := func(name string) *ClientBreakdownEntry { + if _, ok := byName[name]; !ok { + byName[name] = &ClientBreakdownEntry{Name: name} + } + return byName[name] + } + + for key, metric := range s.Metrics { + if metric.Used == nil || !strings.HasPrefix(key, "interface_") { + continue + } + name := canonicalizeClientBucket(strings.TrimPrefix(key, "interface_")) + if name == "" { + continue + } + ensure(name).Requests += *metric.Used + usedKeys[key] = true + } + + for key, points := range s.DailySeries { + if len(points) == 0 { + continue + } + switch { + case strings.HasPrefix(key, "usage_client_"): + name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_client_")) + if name != "" { + mergeBreakdownSeriesByDay(usageSeriesByName, name, points) + } + case strings.HasPrefix(key, "usage_source_"): + name := canonicalizeClientBucket(strings.TrimPrefix(key, "usage_source_")) + if name != "" { + mergeBreakdownSeriesByDay(usageSeriesByName, name, points) + } + } + } + + for name, pointsByDay := range usageSeriesByName { + entry := ensure(name) + entry.Series = breakdownSortedSeries(pointsByDay) + entry.SeriesKind = "requests" + if entry.Requests <= 0 { + entry.Requests = sumBreakdownSeries(entry.Series) + } + } + + out := make([]ClientBreakdownEntry, 0, len(byName)) + for _, entry := range byName { + if entry.Requests <= 0 && len(entry.Series) == 0 { + continue + } + out = append(out, *entry) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Requests != out[j].Requests { + return out[i].Requests > out[j].Requests + } + return out[i].Name < out[j].Name + }) + if len(out) == 0 { + return nil, nil + } + return out, usedKeys +} + +var actualToolAggregateKeys = map[string]bool{ + "tool_calls_total": true, + "tool_completed": true, + "tool_errored": true, + "tool_cancelled": true, + "tool_success_rate": true, +} + +func ExtractActualToolUsage(s UsageSnapshot) ([]ActualToolUsageEntry, map[string]bool) { + byTool := make(map[string]float64) + usedKeys := make(map[string]bool) + + for key, metric := range s.Metrics { + if metric.Used == nil { + continue + } + if !strings.HasPrefix(key, "tool_") { + continue + } + if actualToolAggregateKeys[key] { + usedKeys[key] = true + continue + } + if strings.HasSuffix(key, "_today") || strings.HasSuffix(key, "_1d") || strings.HasSuffix(key, "_7d") || strings.HasSuffix(key, "_30d") { + usedKeys[key] = true + continue + } + name := strings.TrimPrefix(key, "tool_") + if name == "" { + continue + } + if IsMCPToolMetricName(name) { + usedKeys[key] = true + continue + } + byTool[name] += *metric.Used + usedKeys[key] = true + } + + if len(byTool) == 0 { + return nil, usedKeys + } + + out := make([]ActualToolUsageEntry, 0, len(byTool)) + for name, calls := range byTool { + if calls <= 0 { + continue + } + out = append(out, ActualToolUsageEntry{ + RawName: name, + Calls: calls, + }) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Calls != out[j].Calls { + return out[i].Calls > out[j].Calls + } + return out[i].RawName < out[j].RawName + }) + return out, usedKeys +} + +func IsMCPToolMetricName(name string) bool { + normalized := strings.ToLower(strings.TrimSpace(name)) + if normalized == "" { + return false + } + if strings.HasPrefix(normalized, "mcp_") { + return true + } + if strings.Contains(normalized, "_mcp_server_") || strings.Contains(normalized, "-mcp-server-") { + return true + } + return strings.HasSuffix(normalized, "_mcp") +} diff --git a/internal/daemon/server_http.go b/internal/daemon/server_http.go index 054a8df..983e257 100644 --- a/internal/daemon/server_http.go +++ b/internal/daemon/server_http.go @@ -126,7 +126,7 @@ func (s *Service) handleReadModel(w http.ResponseWriter, r *http.Request) { } writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: cached}) if time.Since(cachedAt) > 2*time.Second { - s.refreshReadModelCacheAsync(s.backgroundContext(), cacheKey, req, 60*time.Second) + s.refreshReadModelCacheAsync(s.serviceContext(r.Context()), cacheKey, req, 60*time.Second) } return } @@ -144,7 +144,7 @@ func (s *Service) handleReadModel(w http.ResponseWriter, r *http.Request) { s.warnf("read_model_cache_miss_compute_error", "error=%v", err) } - s.refreshReadModelCacheAsync(s.backgroundContext(), cacheKey, req, 60*time.Second) + s.refreshReadModelCacheAsync(s.serviceContext(r.Context()), cacheKey, req, 60*time.Second) snapshots = ReadModelTemplatesFromRequest(req, DisabledAccountsFromConfig()) writeJSON(w, http.StatusOK, ReadModelResponse{Snapshots: snapshots}) durationMs := time.Since(started).Milliseconds() diff --git a/internal/daemon/server_read_model.go b/internal/daemon/server_read_model.go index b78da89..9f46d09 100644 --- a/internal/daemon/server_read_model.go +++ b/internal/daemon/server_read_model.go @@ -52,10 +52,13 @@ func (s *Service) refreshReadModelCacheAsync( }() } -func (s *Service) backgroundContext() context.Context { +func (s *Service) serviceContext(fallback context.Context) context.Context { if s != nil && s.ctx != nil { return s.ctx } + if fallback != nil { + return fallback + } return context.Background() } diff --git a/internal/dashboardapp/service.go b/internal/dashboardapp/service.go index c1524e2..d6691e7 100644 --- a/internal/dashboardapp/service.go +++ b/internal/dashboardapp/service.go @@ -11,10 +11,15 @@ import ( "github.com/janekbaraniewski/openusage/internal/providers" ) -type Service struct{} +type Service struct { + ctx context.Context +} -func NewService() *Service { - return &Service{} +func NewService(ctx context.Context) *Service { + if ctx == nil { + ctx = context.Background() + } + return &Service{ctx: ctx} } func (s *Service) SaveTheme(themeName string) error { @@ -53,7 +58,11 @@ func (s *Service) ValidateAPIKey(accountID, providerID, apiKey string) (bool, st return false, "unknown provider" } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + parent := context.Background() + if s != nil && s.ctx != nil { + parent = s.ctx + } + ctx, cancel := context.WithTimeout(parent, 5*time.Second) defer cancel() snap, err := provider.Fetch(ctx, core.AccountConfig{ diff --git a/internal/providers/gemini_cli/gemini_cli.go b/internal/providers/gemini_cli/gemini_cli.go index 99d4a9d..821229b 100644 --- a/internal/providers/gemini_cli/gemini_cli.go +++ b/internal/providers/gemini_cli/gemini_cli.go @@ -7,13 +7,11 @@ import ( "fmt" "io" "log" - "maps" "net/http" "net/url" "os" "os/exec" "path/filepath" - "slices" "sort" "strconv" "strings" @@ -1037,1341 +1035,3 @@ func applyGeminiMCPMetadata(snap *core.UsageSnapshot, settings geminiSettings, e snap.Raw["mcp_servers_disabled"] = summary } } - -func mapKeysSorted(values map[string]bool) []string { - if len(values) == 0 { - return nil - } - out := slices.Sorted(maps.Keys(values)) - return slices.DeleteFunc(out, func(key string) bool { return strings.TrimSpace(key) == "" }) -} - -func formatGeminiNameList(values []string, max int) string { - if len(values) == 0 { - return "" - } - limit := max - if limit <= 0 || limit > len(values) { - limit = len(values) - } - out := strings.Join(values[:limit], ", ") - if len(values) > limit { - out += fmt.Sprintf(", +%d more", len(values)-limit) - } - return out -} - -func (t geminiMessageToken) toUsage() tokenUsage { - total := t.Total - if total <= 0 { - total = t.Input + t.Output + t.Cached + t.Thoughts + t.Tool - } - return tokenUsage{ - InputTokens: t.Input, - CachedInputTokens: t.Cached, - OutputTokens: t.Output, - ReasoningTokens: t.Thoughts, - ToolTokens: t.Tool, - TotalTokens: total, - } -} - -func (p *Provider) readSessionUsageBreakdowns(tmpDir string, snap *core.UsageSnapshot) (int, error) { - files, err := findGeminiSessionFiles(tmpDir) - if err != nil { - return 0, err - } - if len(files) == 0 { - return 0, nil - } - - modelTotals := make(map[string]tokenUsage) - clientTotals := make(map[string]tokenUsage) - toolTotals := make(map[string]int) - languageUsageCounts := make(map[string]int) - changedFiles := make(map[string]bool) - commitCommands := make(map[string]bool) - modelDaily := make(map[string]map[string]float64) - clientDaily := make(map[string]map[string]float64) - clientSessions := make(map[string]int) - modelRequests := make(map[string]int) - modelSessions := make(map[string]int) - - dailyMessages := make(map[string]float64) - dailySessions := make(map[string]float64) - dailyToolCalls := make(map[string]float64) - dailyTokens := make(map[string]float64) - dailyInputTokens := make(map[string]float64) - dailyOutputTokens := make(map[string]float64) - dailyCachedTokens := make(map[string]float64) - dailyReasoningTokens := make(map[string]float64) - dailyToolTokens := make(map[string]float64) - - sessionIDs := make(map[string]bool) - sessionCount := 0 - totalMessages := 0 - totalTurns := 0 - totalToolCalls := 0 - totalInfoMessages := 0 - totalErrorMessages := 0 - totalAssistantMessages := 0 - totalToolSuccess := 0 - totalToolFailed := 0 - totalToolErrored := 0 - totalToolCancelled := 0 - quotaLimitEvents := 0 - modelLinesAdded := 0 - modelLinesRemoved := 0 - modelCharsAdded := 0 - modelCharsRemoved := 0 - userLinesAdded := 0 - userLinesRemoved := 0 - userCharsAdded := 0 - userCharsRemoved := 0 - diffStatEvents := 0 - inferredCommitCount := 0 - - var lastModelName string - var lastModelTokens int - foundLatest := false - - for _, path := range files { - chat, err := readGeminiChatFile(path) - if err != nil { - continue - } - - sessionID := strings.TrimSpace(chat.SessionID) - if sessionID == "" { - sessionID = path - } - if sessionIDs[sessionID] { - continue - } - sessionIDs[sessionID] = true - sessionCount++ - - clientName := normalizeClientName("CLI") - clientSessions[clientName]++ - - sessionDay := dayFromSession(chat.StartTime, chat.LastUpdated) - if sessionDay != "" { - dailySessions[sessionDay]++ - } - - var previous tokenUsage - var hasPrevious bool - fileHasUsage := false - sessionModels := make(map[string]bool) - - for _, msg := range chat.Messages { - day := dayFromTimestamp(msg.Timestamp) - if day == "" { - day = sessionDay - } - - switch strings.ToLower(strings.TrimSpace(msg.Type)) { - case "info": - totalInfoMessages++ - case "error": - totalErrorMessages++ - case "gemini", "assistant", "model": - totalAssistantMessages++ - } - - if isQuotaLimitMessage(msg.Content) { - quotaLimitEvents++ - } - - if strings.EqualFold(msg.Type, "user") { - totalMessages++ - if day != "" { - dailyMessages[day]++ - } - } - - if len(msg.ToolCalls) > 0 { - totalToolCalls += len(msg.ToolCalls) - if day != "" { - dailyToolCalls[day] += float64(len(msg.ToolCalls)) - } - for _, tc := range msg.ToolCalls { - toolName := strings.TrimSpace(tc.Name) - if toolName != "" { - toolTotals[toolName]++ - } - - status := strings.ToLower(strings.TrimSpace(tc.Status)) - switch { - case status == "" || status == "success" || status == "succeeded" || status == "ok" || status == "completed": - totalToolSuccess++ - case status == "cancelled" || status == "canceled": - totalToolCancelled++ - totalToolFailed++ - default: - totalToolErrored++ - totalToolFailed++ - } - - toolLower := strings.ToLower(toolName) - successfulToolCall := isGeminiToolCallSuccessful(status) - for _, path := range extractGeminiToolPaths(tc.Args) { - if successfulToolCall { - if lang := inferGeminiLanguageFromPath(path); lang != "" { - languageUsageCounts[lang]++ - } - } - if successfulToolCall && isGeminiMutatingTool(toolLower) { - changedFiles[path] = true - } - } - - if successfulToolCall && isGeminiMutatingTool(toolLower) { - if diff, ok := extractGeminiToolDiffStat(tc.ResultDisplay); ok { - modelLinesAdded += diff.ModelAddedLines - modelLinesRemoved += diff.ModelRemovedLines - modelCharsAdded += diff.ModelAddedChars - modelCharsRemoved += diff.ModelRemovedChars - userLinesAdded += diff.UserAddedLines - userLinesRemoved += diff.UserRemovedLines - userCharsAdded += diff.UserAddedChars - userCharsRemoved += diff.UserRemovedChars - diffStatEvents++ - } else { - added, removed := estimateGeminiToolLineDelta(tc.Args) - modelLinesAdded += added - modelLinesRemoved += removed - } - } - - if !successfulToolCall { - continue - } - cmd := strings.ToLower(extractGeminiToolCommand(tc.Args)) - if strings.Contains(cmd, "git commit") { - if !commitCommands[cmd] { - commitCommands[cmd] = true - inferredCommitCount++ - } - } else if strings.Contains(toolLower, "commit") { - inferredCommitCount++ - } - } - } - if msg.Tokens == nil { - continue - } - - modelName := normalizeModelName(msg.Model) - total := msg.Tokens.toUsage() - - // Track latest model usage from the most recent session file - if !foundLatest { - lastModelName = modelName - lastModelTokens = total.TotalTokens - fileHasUsage = true - } - modelRequests[modelName]++ - sessionModels[modelName] = true - - delta := total - if hasPrevious { - delta = usageDelta(total, previous) - if !validUsageDelta(delta) { - delta = total - } - } - previous = total - hasPrevious = true - - if delta.TotalTokens <= 0 { - continue - } - - addUsage(modelTotals, modelName, delta) - addUsage(clientTotals, clientName, delta) - - if day != "" { - addDailyUsage(modelDaily, modelName, day, float64(delta.TotalTokens)) - addDailyUsage(clientDaily, clientName, day, float64(delta.TotalTokens)) - dailyTokens[day] += float64(delta.TotalTokens) - dailyInputTokens[day] += float64(delta.InputTokens) - dailyOutputTokens[day] += float64(delta.OutputTokens) - dailyCachedTokens[day] += float64(delta.CachedInputTokens) - dailyReasoningTokens[day] += float64(delta.ReasoningTokens) - dailyToolTokens[day] += float64(delta.ToolTokens) - } - - totalTurns++ - } - - for modelName := range sessionModels { - modelSessions[modelName]++ - } - - if fileHasUsage { - foundLatest = true - } - } - - if sessionCount == 0 { - return 0, nil - } - - if lastModelName != "" && lastModelTokens > 0 { - limit := getModelContextLimit(lastModelName) - if limit > 0 { - used := float64(lastModelTokens) - lim := float64(limit) - snap.Metrics["context_window"] = core.Metric{ - Used: &used, - Limit: &lim, - Unit: "tokens", - Window: "current", - } - snap.Raw["active_model"] = lastModelName - } - } - - emitBreakdownMetrics("model", modelTotals, modelDaily, snap) - emitBreakdownMetrics("client", clientTotals, clientDaily, snap) - emitClientSessionMetrics(clientSessions, snap) - emitModelRequestMetrics(modelRequests, modelSessions, snap) - emitToolMetrics(toolTotals, snap) - if languageSummary := formatNamedCountMap(languageUsageCounts, "req"); languageSummary != "" { - snap.Raw["language_usage"] = languageSummary - } - for lang, count := range languageUsageCounts { - if count <= 0 { - continue - } - setUsedMetric(snap, "lang_"+sanitizeMetricName(lang), float64(count), "requests", defaultUsageWindowLabel) - } - - storeSeries(snap, "messages", dailyMessages) - storeSeries(snap, "sessions", dailySessions) - storeSeries(snap, "tool_calls", dailyToolCalls) - storeSeries(snap, "tokens_total", dailyTokens) - storeSeries(snap, "requests", dailyMessages) - storeSeries(snap, "analytics_requests", dailyMessages) - storeSeries(snap, "analytics_tokens", dailyTokens) - storeSeries(snap, "tokens_input", dailyInputTokens) - storeSeries(snap, "tokens_output", dailyOutputTokens) - storeSeries(snap, "tokens_cached", dailyCachedTokens) - storeSeries(snap, "tokens_reasoning", dailyReasoningTokens) - storeSeries(snap, "tokens_tool", dailyToolTokens) - - setUsedMetric(snap, "total_messages", float64(totalMessages), "messages", defaultUsageWindowLabel) - setUsedMetric(snap, "total_sessions", float64(sessionCount), "sessions", defaultUsageWindowLabel) - setUsedMetric(snap, "total_turns", float64(totalTurns), "turns", defaultUsageWindowLabel) - setUsedMetric(snap, "total_tool_calls", float64(totalToolCalls), "calls", defaultUsageWindowLabel) - setUsedMetric(snap, "total_info_messages", float64(totalInfoMessages), "messages", defaultUsageWindowLabel) - setUsedMetric(snap, "total_error_messages", float64(totalErrorMessages), "messages", defaultUsageWindowLabel) - setUsedMetric(snap, "total_assistant_messages", float64(totalAssistantMessages), "messages", defaultUsageWindowLabel) - setUsedMetric(snap, "tool_calls_success", float64(totalToolSuccess), "calls", defaultUsageWindowLabel) - setUsedMetric(snap, "tool_calls_failed", float64(totalToolFailed), "calls", defaultUsageWindowLabel) - setUsedMetric(snap, "tool_calls_total", float64(totalToolCalls), "calls", defaultUsageWindowLabel) - setUsedMetric(snap, "tool_completed", float64(totalToolSuccess), "calls", defaultUsageWindowLabel) - setUsedMetric(snap, "tool_errored", float64(totalToolErrored), "calls", defaultUsageWindowLabel) - setUsedMetric(snap, "tool_cancelled", float64(totalToolCancelled), "calls", defaultUsageWindowLabel) - if totalToolCalls > 0 { - successRate := float64(totalToolSuccess) / float64(totalToolCalls) * 100 - setUsedMetric(snap, "tool_success_rate", successRate, "%", defaultUsageWindowLabel) - } - setUsedMetric(snap, "quota_limit_events", float64(quotaLimitEvents), "events", defaultUsageWindowLabel) - setUsedMetric(snap, "total_prompts", float64(totalMessages), "prompts", defaultUsageWindowLabel) - - if cliUsage, ok := clientTotals["CLI"]; ok { - setUsedMetric(snap, "client_cli_messages", float64(totalMessages), "messages", defaultUsageWindowLabel) - setUsedMetric(snap, "client_cli_turns", float64(totalTurns), "turns", defaultUsageWindowLabel) - setUsedMetric(snap, "client_cli_tool_calls", float64(totalToolCalls), "calls", defaultUsageWindowLabel) - setUsedMetric(snap, "client_cli_input_tokens", float64(cliUsage.InputTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "client_cli_output_tokens", float64(cliUsage.OutputTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "client_cli_cached_tokens", float64(cliUsage.CachedInputTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "client_cli_reasoning_tokens", float64(cliUsage.ReasoningTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "client_cli_total_tokens", float64(cliUsage.TotalTokens), "tokens", defaultUsageWindowLabel) - } - - total := aggregateTokenTotals(modelTotals) - setUsedMetric(snap, "total_input_tokens", float64(total.InputTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "total_output_tokens", float64(total.OutputTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "total_cached_tokens", float64(total.CachedInputTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "total_reasoning_tokens", float64(total.ReasoningTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "total_tool_tokens", float64(total.ToolTokens), "tokens", defaultUsageWindowLabel) - setUsedMetric(snap, "total_tokens", float64(total.TotalTokens), "tokens", defaultUsageWindowLabel) - - if total.InputTokens > 0 { - cacheEfficiency := float64(total.CachedInputTokens) / float64(total.InputTokens) * 100 - setPercentMetric(snap, "cache_efficiency", cacheEfficiency, defaultUsageWindowLabel) - } - if total.TotalTokens > 0 { - reasoningShare := float64(total.ReasoningTokens) / float64(total.TotalTokens) * 100 - toolShare := float64(total.ToolTokens) / float64(total.TotalTokens) * 100 - setPercentMetric(snap, "reasoning_share", reasoningShare, defaultUsageWindowLabel) - setPercentMetric(snap, "tool_token_share", toolShare, defaultUsageWindowLabel) - } - if totalTurns > 0 { - avgTokensPerTurn := float64(total.TotalTokens) / float64(totalTurns) - setUsedMetric(snap, "avg_tokens_per_turn", avgTokensPerTurn, "tokens", defaultUsageWindowLabel) - } - if sessionCount > 0 { - avgToolsPerSession := float64(totalToolCalls) / float64(sessionCount) - setUsedMetric(snap, "avg_tools_per_session", avgToolsPerSession, "calls", defaultUsageWindowLabel) - } - - if _, v := latestSeriesValue(dailyMessages); v > 0 { - setUsedMetric(snap, "messages_today", v, "messages", "today") - } - if _, v := latestSeriesValue(dailySessions); v > 0 { - setUsedMetric(snap, "sessions_today", v, "sessions", "today") - } - if _, v := latestSeriesValue(dailyToolCalls); v > 0 { - setUsedMetric(snap, "tool_calls_today", v, "calls", "today") - } - if _, v := latestSeriesValue(dailyTokens); v > 0 { - setUsedMetric(snap, "tokens_today", v, "tokens", "today") - } - if _, v := latestSeriesValue(dailyInputTokens); v > 0 { - setUsedMetric(snap, "today_input_tokens", v, "tokens", "today") - } - if _, v := latestSeriesValue(dailyOutputTokens); v > 0 { - setUsedMetric(snap, "today_output_tokens", v, "tokens", "today") - } - if _, v := latestSeriesValue(dailyCachedTokens); v > 0 { - setUsedMetric(snap, "today_cached_tokens", v, "tokens", "today") - } - if _, v := latestSeriesValue(dailyReasoningTokens); v > 0 { - setUsedMetric(snap, "today_reasoning_tokens", v, "tokens", "today") - } - if _, v := latestSeriesValue(dailyToolTokens); v > 0 { - setUsedMetric(snap, "today_tool_tokens", v, "tokens", "today") - } - - setUsedMetric(snap, "7d_messages", sumLastNDays(dailyMessages, 7), "messages", "7d") - setUsedMetric(snap, "7d_sessions", sumLastNDays(dailySessions, 7), "sessions", "7d") - setUsedMetric(snap, "7d_tool_calls", sumLastNDays(dailyToolCalls, 7), "calls", "7d") - setUsedMetric(snap, "7d_tokens", sumLastNDays(dailyTokens, 7), "tokens", "7d") - setUsedMetric(snap, "7d_input_tokens", sumLastNDays(dailyInputTokens, 7), "tokens", "7d") - setUsedMetric(snap, "7d_output_tokens", sumLastNDays(dailyOutputTokens, 7), "tokens", "7d") - setUsedMetric(snap, "7d_cached_tokens", sumLastNDays(dailyCachedTokens, 7), "tokens", "7d") - setUsedMetric(snap, "7d_reasoning_tokens", sumLastNDays(dailyReasoningTokens, 7), "tokens", "7d") - setUsedMetric(snap, "7d_tool_tokens", sumLastNDays(dailyToolTokens, 7), "tokens", "7d") - - if modelLinesAdded > 0 { - setUsedMetric(snap, "composer_lines_added", float64(modelLinesAdded), "lines", defaultUsageWindowLabel) - } - if modelLinesRemoved > 0 { - setUsedMetric(snap, "composer_lines_removed", float64(modelLinesRemoved), "lines", defaultUsageWindowLabel) - } - if len(changedFiles) > 0 { - setUsedMetric(snap, "composer_files_changed", float64(len(changedFiles)), "files", defaultUsageWindowLabel) - } - if inferredCommitCount > 0 { - setUsedMetric(snap, "scored_commits", float64(inferredCommitCount), "commits", defaultUsageWindowLabel) - } - if userLinesAdded > 0 { - setUsedMetric(snap, "composer_user_lines_added", float64(userLinesAdded), "lines", defaultUsageWindowLabel) - } - if userLinesRemoved > 0 { - setUsedMetric(snap, "composer_user_lines_removed", float64(userLinesRemoved), "lines", defaultUsageWindowLabel) - } - if modelCharsAdded > 0 { - setUsedMetric(snap, "composer_model_chars_added", float64(modelCharsAdded), "chars", defaultUsageWindowLabel) - } - if modelCharsRemoved > 0 { - setUsedMetric(snap, "composer_model_chars_removed", float64(modelCharsRemoved), "chars", defaultUsageWindowLabel) - } - if userCharsAdded > 0 { - setUsedMetric(snap, "composer_user_chars_added", float64(userCharsAdded), "chars", defaultUsageWindowLabel) - } - if userCharsRemoved > 0 { - setUsedMetric(snap, "composer_user_chars_removed", float64(userCharsRemoved), "chars", defaultUsageWindowLabel) - } - if diffStatEvents > 0 { - setUsedMetric(snap, "composer_diffstat_events", float64(diffStatEvents), "calls", defaultUsageWindowLabel) - } - totalModelLineDelta := modelLinesAdded + modelLinesRemoved - totalUserLineDelta := userLinesAdded + userLinesRemoved - if totalModelLineDelta > 0 || totalUserLineDelta > 0 { - totalLineDelta := totalModelLineDelta + totalUserLineDelta - if totalLineDelta > 0 { - aiPct := float64(totalModelLineDelta) / float64(totalLineDelta) * 100 - setPercentMetric(snap, "ai_code_percentage", aiPct, defaultUsageWindowLabel) - } - } - - if quotaLimitEvents > 0 { - snap.Raw["quota_limit_detected"] = "true" - if _, hasQuota := snap.Metrics["quota"]; !hasQuota { - limit := 100.0 - remaining := 0.0 - used := 100.0 - snap.Metrics["quota"] = core.Metric{ - Limit: &limit, - Remaining: &remaining, - Used: &used, - Unit: "%", - Window: "daily", - } - applyQuotaStatus(snap, 0) - } - } - - return sessionCount, nil -} - -func findGeminiSessionFiles(tmpDir string) ([]string, error) { - if strings.TrimSpace(tmpDir) == "" { - return nil, nil - } - if _, err := os.Stat(tmpDir); err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, fmt.Errorf("stat tmp dir: %w", err) - } - - type item struct { - path string - modTime time.Time - } - var files []item - - walkErr := filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { - if err != nil || info == nil || info.IsDir() { - return nil - } - name := info.Name() - if !strings.HasPrefix(name, "session-") || !strings.HasSuffix(name, ".json") { - return nil - } - files = append(files, item{path: path, modTime: info.ModTime()}) - return nil - }) - if walkErr != nil { - return nil, fmt.Errorf("walk gemini tmp dir: %w", walkErr) - } - if len(files) == 0 { - return nil, nil - } - - sort.Slice(files, func(i, j int) bool { - if files[i].modTime.Equal(files[j].modTime) { - return files[i].path > files[j].path - } - return files[i].modTime.After(files[j].modTime) - }) - - return lo.Map(files, func(f item, _ int) string { return f.path }), nil -} - -func readGeminiChatFile(path string) (*geminiChatFile, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var chat geminiChatFile - if err := json.NewDecoder(f).Decode(&chat); err != nil { - return nil, err - } - return &chat, nil -} - -func emitBreakdownMetrics(prefix string, totals map[string]tokenUsage, daily map[string]map[string]float64, snap *core.UsageSnapshot) { - entries := sortUsageEntries(totals) - if len(entries) == 0 { - return - } - - for i, entry := range entries { - if i >= maxBreakdownMetrics { - break - } - keyPrefix := prefix + "_" + sanitizeMetricName(entry.Name) - setUsageMetric(snap, keyPrefix+"_total_tokens", float64(entry.Data.TotalTokens)) - setUsageMetric(snap, keyPrefix+"_input_tokens", float64(entry.Data.InputTokens)) - setUsageMetric(snap, keyPrefix+"_output_tokens", float64(entry.Data.OutputTokens)) - - if entry.Data.CachedInputTokens > 0 { - setUsageMetric(snap, keyPrefix+"_cached_tokens", float64(entry.Data.CachedInputTokens)) - } - if entry.Data.ReasoningTokens > 0 { - setUsageMetric(snap, keyPrefix+"_reasoning_tokens", float64(entry.Data.ReasoningTokens)) - } - - if byDay, ok := daily[entry.Name]; ok { - seriesKey := "tokens_" + prefix + "_" + sanitizeMetricName(entry.Name) - snap.DailySeries[seriesKey] = core.SortedTimePoints(byDay) - } - - if prefix == "model" { - rec := core.ModelUsageRecord{ - RawModelID: entry.Name, - RawSource: "json", - Window: defaultUsageWindowLabel, - InputTokens: core.Float64Ptr(float64(entry.Data.InputTokens)), - OutputTokens: core.Float64Ptr(float64(entry.Data.OutputTokens)), - TotalTokens: core.Float64Ptr(float64(entry.Data.TotalTokens)), - } - if entry.Data.CachedInputTokens > 0 { - rec.CachedTokens = core.Float64Ptr(float64(entry.Data.CachedInputTokens)) - } - if entry.Data.ReasoningTokens > 0 { - rec.ReasoningTokens = core.Float64Ptr(float64(entry.Data.ReasoningTokens)) - } - snap.AppendModelUsage(rec) - } - } - - snap.Raw[prefix+"_usage"] = formatUsageSummary(entries, maxBreakdownRaw) -} - -func emitClientSessionMetrics(clientSessions map[string]int, snap *core.UsageSnapshot) { - type entry struct { - name string - count int - } - var all []entry - for name, count := range clientSessions { - if count > 0 { - all = append(all, entry{name: name, count: count}) - } - } - sort.Slice(all, func(i, j int) bool { - if all[i].count == all[j].count { - return all[i].name < all[j].name - } - return all[i].count > all[j].count - }) - - for i, item := range all { - if i >= maxBreakdownMetrics { - break - } - value := float64(item.count) - snap.Metrics["client_"+sanitizeMetricName(item.name)+"_sessions"] = core.Metric{ - Used: &value, - Unit: "sessions", - Window: defaultUsageWindowLabel, - } - } -} - -func emitModelRequestMetrics(modelRequests, modelSessions map[string]int, snap *core.UsageSnapshot) { - type entry struct { - name string - requests int - sessions int - } - - all := make([]entry, 0, len(modelRequests)) - for name, requests := range modelRequests { - if requests <= 0 { - continue - } - all = append(all, entry{name: name, requests: requests, sessions: modelSessions[name]}) - } - - sort.Slice(all, func(i, j int) bool { - if all[i].requests == all[j].requests { - return all[i].name < all[j].name - } - return all[i].requests > all[j].requests - }) - - for i, item := range all { - if i >= maxBreakdownMetrics { - break - } - keyPrefix := "model_" + sanitizeMetricName(item.name) - req := float64(item.requests) - sess := float64(item.sessions) - snap.Metrics[keyPrefix+"_requests"] = core.Metric{ - Used: &req, - Unit: "requests", - Window: defaultUsageWindowLabel, - } - if item.sessions > 0 { - snap.Metrics[keyPrefix+"_sessions"] = core.Metric{ - Used: &sess, - Unit: "sessions", - Window: defaultUsageWindowLabel, - } - } - } -} - -func emitToolMetrics(toolTotals map[string]int, snap *core.UsageSnapshot) { - type entry struct { - name string - count int - } - var all []entry - for name, count := range toolTotals { - if count > 0 { - all = append(all, entry{name: name, count: count}) - } - } - sort.Slice(all, func(i, j int) bool { - if all[i].count == all[j].count { - return all[i].name < all[j].name - } - return all[i].count > all[j].count - }) - - var parts []string - limit := maxBreakdownRaw - for i, item := range all { - if i < limit { - parts = append(parts, fmt.Sprintf("%s (%d)", item.name, item.count)) - } - - val := float64(item.count) - snap.Metrics["tool_"+sanitizeMetricName(item.name)] = core.Metric{ - Used: &val, - Unit: "calls", - Window: defaultUsageWindowLabel, - } - } - - if len(all) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(all)-limit)) - } - - if len(parts) > 0 { - snap.Raw["tool_usage"] = strings.Join(parts, ", ") - } -} - -func aggregateTokenTotals(modelTotals map[string]tokenUsage) tokenUsage { - var total tokenUsage - for _, usage := range modelTotals { - total.InputTokens += usage.InputTokens - total.CachedInputTokens += usage.CachedInputTokens - total.OutputTokens += usage.OutputTokens - total.ReasoningTokens += usage.ReasoningTokens - total.ToolTokens += usage.ToolTokens - total.TotalTokens += usage.TotalTokens - } - return total -} - -func setUsageMetric(snap *core.UsageSnapshot, key string, value float64) { - if value <= 0 { - return - } - snap.Metrics[key] = core.Metric{ - Used: &value, - Unit: "tokens", - Window: defaultUsageWindowLabel, - } -} - -func addUsage(target map[string]tokenUsage, name string, delta tokenUsage) { - current := target[name] - current.InputTokens += delta.InputTokens - current.CachedInputTokens += delta.CachedInputTokens - current.OutputTokens += delta.OutputTokens - current.ReasoningTokens += delta.ReasoningTokens - current.ToolTokens += delta.ToolTokens - current.TotalTokens += delta.TotalTokens - target[name] = current -} - -func addDailyUsage(target map[string]map[string]float64, name, day string, value float64) { - if day == "" || value <= 0 { - return - } - if target[name] == nil { - target[name] = make(map[string]float64) - } - target[name][day] += value -} - -func sortUsageEntries(values map[string]tokenUsage) []usageEntry { - out := make([]usageEntry, 0, len(values)) - for name, data := range values { - out = append(out, usageEntry{Name: name, Data: data}) - } - sort.Slice(out, func(i, j int) bool { - if out[i].Data.TotalTokens == out[j].Data.TotalTokens { - return out[i].Name < out[j].Name - } - return out[i].Data.TotalTokens > out[j].Data.TotalTokens - }) - return out -} - -func formatUsageSummary(entries []usageEntry, max int) string { - total := 0 - for _, entry := range entries { - total += entry.Data.TotalTokens - } - if total <= 0 { - return "" - } - - limit := max - if limit > len(entries) { - limit = len(entries) - } - - parts := make([]string, 0, limit+1) - for i := 0; i < limit; i++ { - entry := entries[i] - pct := float64(entry.Data.TotalTokens) / float64(total) * 100 - parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, shared.FormatTokenCount(entry.Data.TotalTokens), pct)) - } - if len(entries) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) - } - return strings.Join(parts, ", ") -} - -func formatNamedCountMap(m map[string]int, unit string) string { - if len(m) == 0 { - return "" - } - parts := make([]string, 0, len(m)) - for name, count := range m { - if count <= 0 { - continue - } - parts = append(parts, fmt.Sprintf("%s: %d %s", name, count, unit)) - } - sort.Strings(parts) - return strings.Join(parts, ", ") -} - -func isGeminiToolCallSuccessful(status string) bool { - status = strings.ToLower(strings.TrimSpace(status)) - return status == "" || status == "success" || status == "succeeded" || status == "ok" || status == "completed" -} - -func isGeminiMutatingTool(toolName string) bool { - toolName = strings.ToLower(strings.TrimSpace(toolName)) - if toolName == "" { - return false - } - return strings.Contains(toolName, "edit") || - strings.Contains(toolName, "write") || - strings.Contains(toolName, "create") || - strings.Contains(toolName, "delete") || - strings.Contains(toolName, "rename") || - strings.Contains(toolName, "move") || - strings.Contains(toolName, "replace") -} - -func extractGeminiToolCommand(raw json.RawMessage) string { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return "" - } - var command string - var walk func(v any) - walk = func(v any) { - if command != "" || v == nil { - return - } - switch value := v.(type) { - case map[string]any: - for key, child := range value { - k := strings.ToLower(strings.TrimSpace(key)) - if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { - if s, ok := child.(string); ok { - command = strings.TrimSpace(s) - return - } - } - } - for _, child := range value { - walk(child) - if command != "" { - return - } - } - case []any: - for _, child := range value { - walk(child) - if command != "" { - return - } - } - } - } - walk(payload) - return command -} - -func extractGeminiToolPaths(raw json.RawMessage) []string { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return nil - } - - pathHints := map[string]bool{ - "path": true, "paths": true, "file": true, "files": true, "filepath": true, "file_path": true, - "cwd": true, "dir": true, "directory": true, "target": true, "pattern": true, "glob": true, - "from": true, "to": true, "include": true, "exclude": true, - } - - candidates := make(map[string]bool) - var walk func(v any, hinted bool) - walk = func(v any, hinted bool) { - switch value := v.(type) { - case map[string]any: - for key, child := range value { - k := strings.ToLower(strings.TrimSpace(key)) - childHinted := hinted || pathHints[k] || strings.Contains(k, "path") || strings.Contains(k, "file") - walk(child, childHinted) - } - case []any: - for _, child := range value { - walk(child, hinted) - } - case string: - if !hinted { - return - } - for _, token := range extractGeminiPathTokens(value) { - candidates[token] = true - } - } - } - walk(payload, false) - - out := make([]string, 0, len(candidates)) - for c := range candidates { - out = append(out, c) - } - sort.Strings(out) - return out -} - -func extractGeminiPathTokens(raw string) []string { - raw = strings.TrimSpace(raw) - if raw == "" { - return nil - } - fields := strings.Fields(raw) - if len(fields) == 0 { - fields = []string{raw} - } - - var out []string - for _, field := range fields { - token := strings.Trim(field, "\"'`()[]{}<>,:;") - if token == "" { - continue - } - lower := strings.ToLower(token) - if strings.HasPrefix(lower, "http://") || strings.HasPrefix(lower, "https://") || strings.HasPrefix(lower, "file://") { - continue - } - if strings.HasPrefix(token, "-") { - continue - } - if !strings.Contains(token, "/") && !strings.Contains(token, "\\") && !strings.Contains(token, ".") { - continue - } - token = strings.TrimPrefix(token, "./") - if token == "" { - continue - } - out = append(out, token) - } - return lo.Uniq(out) -} - -func estimateGeminiToolLineDelta(raw json.RawMessage) (added int, removed int) { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return 0, 0 - } - lineCount := func(text string) int { - text = strings.TrimSpace(text) - if text == "" { - return 0 - } - return strings.Count(text, "\n") + 1 - } - var walk func(v any) - walk = func(v any) { - switch value := v.(type) { - case map[string]any: - var oldText, newText string - for _, key := range []string{"old_string", "old_text", "from", "replace"} { - if rawValue, ok := value[key]; ok { - if s, ok := rawValue.(string); ok { - oldText = s - break - } - } - } - for _, key := range []string{"new_string", "new_text", "to", "with"} { - if rawValue, ok := value[key]; ok { - if s, ok := rawValue.(string); ok { - newText = s - break - } - } - } - if oldText != "" || newText != "" { - removed += lineCount(oldText) - added += lineCount(newText) - } - if rawValue, ok := value["content"]; ok { - if s, ok := rawValue.(string); ok { - added += lineCount(s) - } - } - for _, child := range value { - walk(child) - } - case []any: - for _, child := range value { - walk(child) - } - } - } - walk(payload) - return added, removed -} - -func extractGeminiToolDiffStat(raw json.RawMessage) (geminiDiffStat, bool) { - var empty geminiDiffStat - raw = bytes.TrimSpace(raw) - if len(raw) == 0 || bytes.Equal(raw, []byte("null")) { - return empty, false - } - - var root map[string]json.RawMessage - if json.Unmarshal(raw, &root) != nil { - return empty, false - } - diffRaw, ok := root["diffStat"] - if !ok { - return empty, false - } - - var stat geminiDiffStat - if json.Unmarshal(diffRaw, &stat) != nil { - return empty, false - } - - stat.ModelAddedLines = max(0, stat.ModelAddedLines) - stat.ModelRemovedLines = max(0, stat.ModelRemovedLines) - stat.ModelAddedChars = max(0, stat.ModelAddedChars) - stat.ModelRemovedChars = max(0, stat.ModelRemovedChars) - stat.UserAddedLines = max(0, stat.UserAddedLines) - stat.UserRemovedLines = max(0, stat.UserRemovedLines) - stat.UserAddedChars = max(0, stat.UserAddedChars) - stat.UserRemovedChars = max(0, stat.UserRemovedChars) - - if stat.ModelAddedLines == 0 && - stat.ModelRemovedLines == 0 && - stat.ModelAddedChars == 0 && - stat.ModelRemovedChars == 0 && - stat.UserAddedLines == 0 && - stat.UserRemovedLines == 0 && - stat.UserAddedChars == 0 && - stat.UserRemovedChars == 0 { - return empty, false - } - - return stat, true -} - -func inferGeminiLanguageFromPath(path string) string { - p := strings.ToLower(strings.TrimSpace(path)) - if p == "" { - return "" - } - base := strings.ToLower(filepath.Base(p)) - switch base { - case "dockerfile": - return "docker" - case "makefile": - return "make" - } - switch strings.ToLower(filepath.Ext(p)) { - case ".go": - return "go" - case ".py": - return "python" - case ".ts", ".tsx": - return "typescript" - case ".js", ".jsx": - return "javascript" - case ".tf", ".tfvars", ".hcl": - return "terraform" - case ".sh", ".bash", ".zsh", ".fish": - return "shell" - case ".md", ".mdx": - return "markdown" - case ".json": - return "json" - case ".yml", ".yaml": - return "yaml" - case ".sql": - return "sql" - case ".rs": - return "rust" - case ".java": - return "java" - case ".c", ".h": - return "c" - case ".cc", ".cpp", ".cxx", ".hpp": - return "cpp" - case ".rb": - return "ruby" - case ".php": - return "php" - case ".swift": - return "swift" - case ".vue": - return "vue" - case ".svelte": - return "svelte" - case ".toml": - return "toml" - case ".xml": - return "xml" - } - return "" -} - -func usageDelta(current, previous tokenUsage) tokenUsage { - return tokenUsage{ - InputTokens: current.InputTokens - previous.InputTokens, - CachedInputTokens: current.CachedInputTokens - previous.CachedInputTokens, - OutputTokens: current.OutputTokens - previous.OutputTokens, - ReasoningTokens: current.ReasoningTokens - previous.ReasoningTokens, - ToolTokens: current.ToolTokens - previous.ToolTokens, - TotalTokens: current.TotalTokens - previous.TotalTokens, - } -} - -func validUsageDelta(delta tokenUsage) bool { - return delta.InputTokens >= 0 && - delta.CachedInputTokens >= 0 && - delta.OutputTokens >= 0 && - delta.ReasoningTokens >= 0 && - delta.ToolTokens >= 0 && - delta.TotalTokens >= 0 -} - -func normalizeModelName(name string) string { - name = strings.TrimSpace(name) - if name == "" { - return "unknown" - } - return name -} - -func normalizeClientName(name string) string { - name = strings.TrimSpace(name) - if name == "" { - return "Other" - } - return name -} - -func sanitizeMetricName(name string) string { - name = strings.ToLower(strings.TrimSpace(name)) - if name == "" { - return "unknown" - } - - var b strings.Builder - lastUnderscore := false - for _, r := range name { - switch { - case r >= 'a' && r <= 'z': - b.WriteRune(r) - lastUnderscore = false - case r >= '0' && r <= '9': - b.WriteRune(r) - lastUnderscore = false - default: - if !lastUnderscore { - b.WriteByte('_') - lastUnderscore = true - } - } - } - - out := strings.Trim(b.String(), "_") - if out == "" { - return "unknown" - } - return out -} - -// getModelContextLimit returns the known context window size for a given Gemini model. -// Since the Gemini CLI's internal API does not expose model metadata like context limits -// in the session payload, we fallback to static configuration based on public documentation. -// -// Sources: -// - Gemini 1.5 Pro (2M): https://blog.google/technology/ai/google-gemini-update-flash-ai-assistant-io-2024/#gemini-1-5-pro -// - Gemini 1.5 Flash (1M): https://blog.google/technology/ai/google-gemini-update-flash-ai-assistant-io-2024/#gemini-1-5-flash -// - Gemini 2.0 Flash (1M): https://ai.google.dev/gemini-api/docs/models/gemini-v2 -func getModelContextLimit(model string) int { - model = strings.ToLower(model) - switch { - case strings.Contains(model, "1.5-pro"), strings.Contains(model, "1.5-flash-8b"): - return 2_000_000 - case strings.Contains(model, "1.5-flash"): - return 1_000_000 - case strings.Contains(model, "2.0-flash"): - return 1_000_000 - case strings.Contains(model, "gemini-3"), strings.Contains(model, "gemini-exp"): - // Assuming recent experimental/v3 models follow the 2M trend of 1.5 Pro/Exp. - // Subject to change as these are preview models. - return 2_000_000 - case strings.Contains(model, "pro"): - return 32_000 // Legacy Gemini 1.0 Pro - case strings.Contains(model, "flash"): - return 32_000 // Fallback for older flash-like models if any - } - return 0 -} - -func dayFromTimestamp(timestamp string) string { - if timestamp == "" { - return "" - } - for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} { - if parsed, err := time.Parse(layout, timestamp); err == nil { - return parsed.Format("2006-01-02") - } - } - if len(timestamp) >= 10 { - candidate := timestamp[:10] - if _, err := time.Parse("2006-01-02", candidate); err == nil { - return candidate - } - } - return "" -} - -func dayFromSession(startTime, lastUpdated string) string { - if day := dayFromTimestamp(lastUpdated); day != "" { - return day - } - return dayFromTimestamp(startTime) -} - -func storeSeries(snap *core.UsageSnapshot, key string, values map[string]float64) { - if len(values) == 0 { - return - } - snap.DailySeries[key] = core.SortedTimePoints(values) -} - -func latestSeriesValue(values map[string]float64) (string, float64) { - if len(values) == 0 { - return "", 0 - } - dates := lo.Keys(values) - sort.Strings(dates) - last := dates[len(dates)-1] - return last, values[last] -} - -func sumLastNDays(values map[string]float64, days int) float64 { - if len(values) == 0 || days <= 0 { - return 0 - } - lastDate, _ := latestSeriesValue(values) - if lastDate == "" { - return 0 - } - end, err := time.Parse("2006-01-02", lastDate) - if err != nil { - return 0 - } - start := end.AddDate(0, 0, -(days - 1)) - - total := 0.0 - for date, value := range values { - t, err := time.Parse("2006-01-02", date) - if err != nil { - continue - } - if !t.Before(start) && !t.After(end) { - total += value - } - } - return total -} - -func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { - if value <= 0 { - return - } - v := value - snap.Metrics[key] = core.Metric{ - Used: &v, - Unit: unit, - Window: window, - } -} - -func setPercentMetric(snap *core.UsageSnapshot, key string, value float64, window string) { - if value < 0 { - return - } - if value > 100 { - value = 100 - } - v := value - limit := 100.0 - remaining := 100 - value - snap.Metrics[key] = core.Metric{ - Used: &v, - Limit: &limit, - Remaining: &remaining, - Unit: "%", - Window: window, - } -} - -func isQuotaLimitMessage(content json.RawMessage) bool { - text := strings.ToLower(parseMessageContentText(content)) - if text == "" { - return false - } - return strings.Contains(text, "usage limit reached") || - strings.Contains(text, "all pro models") || - strings.Contains(text, "/stats for usage details") -} - -func parseMessageContentText(content json.RawMessage) string { - content = bytes.TrimSpace(content) - if len(content) == 0 { - return "" - } - - var asString string - if content[0] == '"' && json.Unmarshal(content, &asString) == nil { - return asString - } - - var asArray []map[string]any - if content[0] == '[' && json.Unmarshal(content, &asArray) == nil { - var parts []string - for _, item := range asArray { - if text, ok := item["text"].(string); ok && strings.TrimSpace(text) != "" { - parts = append(parts, text) - } - } - if len(parts) > 0 { - return strings.Join(parts, " ") - } - } - - return string(content) -} diff --git a/internal/providers/gemini_cli/session_usage.go b/internal/providers/gemini_cli/session_usage.go new file mode 100644 index 0000000..f50be91 --- /dev/null +++ b/internal/providers/gemini_cli/session_usage.go @@ -0,0 +1,1344 @@ +package gemini_cli + +import ( + "bytes" + "encoding/json" + "fmt" + "maps" + "os" + "path/filepath" + "slices" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" + "github.com/samber/lo" +) + +func mapKeysSorted(values map[string]bool) []string { + if len(values) == 0 { + return nil + } + out := slices.Sorted(maps.Keys(values)) + return slices.DeleteFunc(out, func(key string) bool { return strings.TrimSpace(key) == "" }) +} + +func formatGeminiNameList(values []string, max int) string { + if len(values) == 0 { + return "" + } + limit := max + if limit <= 0 || limit > len(values) { + limit = len(values) + } + out := strings.Join(values[:limit], ", ") + if len(values) > limit { + out += fmt.Sprintf(", +%d more", len(values)-limit) + } + return out +} + +func (t geminiMessageToken) toUsage() tokenUsage { + total := t.Total + if total <= 0 { + total = t.Input + t.Output + t.Cached + t.Thoughts + t.Tool + } + return tokenUsage{ + InputTokens: t.Input, + CachedInputTokens: t.Cached, + OutputTokens: t.Output, + ReasoningTokens: t.Thoughts, + ToolTokens: t.Tool, + TotalTokens: total, + } +} + +func (p *Provider) readSessionUsageBreakdowns(tmpDir string, snap *core.UsageSnapshot) (int, error) { + files, err := findGeminiSessionFiles(tmpDir) + if err != nil { + return 0, err + } + if len(files) == 0 { + return 0, nil + } + + modelTotals := make(map[string]tokenUsage) + clientTotals := make(map[string]tokenUsage) + toolTotals := make(map[string]int) + languageUsageCounts := make(map[string]int) + changedFiles := make(map[string]bool) + commitCommands := make(map[string]bool) + modelDaily := make(map[string]map[string]float64) + clientDaily := make(map[string]map[string]float64) + clientSessions := make(map[string]int) + modelRequests := make(map[string]int) + modelSessions := make(map[string]int) + + dailyMessages := make(map[string]float64) + dailySessions := make(map[string]float64) + dailyToolCalls := make(map[string]float64) + dailyTokens := make(map[string]float64) + dailyInputTokens := make(map[string]float64) + dailyOutputTokens := make(map[string]float64) + dailyCachedTokens := make(map[string]float64) + dailyReasoningTokens := make(map[string]float64) + dailyToolTokens := make(map[string]float64) + + sessionIDs := make(map[string]bool) + sessionCount := 0 + totalMessages := 0 + totalTurns := 0 + totalToolCalls := 0 + totalInfoMessages := 0 + totalErrorMessages := 0 + totalAssistantMessages := 0 + totalToolSuccess := 0 + totalToolFailed := 0 + totalToolErrored := 0 + totalToolCancelled := 0 + quotaLimitEvents := 0 + modelLinesAdded := 0 + modelLinesRemoved := 0 + modelCharsAdded := 0 + modelCharsRemoved := 0 + userLinesAdded := 0 + userLinesRemoved := 0 + userCharsAdded := 0 + userCharsRemoved := 0 + diffStatEvents := 0 + inferredCommitCount := 0 + + var lastModelName string + var lastModelTokens int + foundLatest := false + + for _, path := range files { + chat, err := readGeminiChatFile(path) + if err != nil { + continue + } + + sessionID := strings.TrimSpace(chat.SessionID) + if sessionID == "" { + sessionID = path + } + if sessionIDs[sessionID] { + continue + } + sessionIDs[sessionID] = true + sessionCount++ + + clientName := normalizeClientName("CLI") + clientSessions[clientName]++ + + sessionDay := dayFromSession(chat.StartTime, chat.LastUpdated) + if sessionDay != "" { + dailySessions[sessionDay]++ + } + + var previous tokenUsage + var hasPrevious bool + fileHasUsage := false + sessionModels := make(map[string]bool) + + for _, msg := range chat.Messages { + day := dayFromTimestamp(msg.Timestamp) + if day == "" { + day = sessionDay + } + + switch strings.ToLower(strings.TrimSpace(msg.Type)) { + case "info": + totalInfoMessages++ + case "error": + totalErrorMessages++ + case "gemini", "assistant", "model": + totalAssistantMessages++ + } + + if isQuotaLimitMessage(msg.Content) { + quotaLimitEvents++ + } + + if strings.EqualFold(msg.Type, "user") { + totalMessages++ + if day != "" { + dailyMessages[day]++ + } + } + + if len(msg.ToolCalls) > 0 { + totalToolCalls += len(msg.ToolCalls) + if day != "" { + dailyToolCalls[day] += float64(len(msg.ToolCalls)) + } + for _, tc := range msg.ToolCalls { + toolName := strings.TrimSpace(tc.Name) + if toolName != "" { + toolTotals[toolName]++ + } + + status := strings.ToLower(strings.TrimSpace(tc.Status)) + switch { + case status == "" || status == "success" || status == "succeeded" || status == "ok" || status == "completed": + totalToolSuccess++ + case status == "cancelled" || status == "canceled": + totalToolCancelled++ + totalToolFailed++ + default: + totalToolErrored++ + totalToolFailed++ + } + + toolLower := strings.ToLower(toolName) + successfulToolCall := isGeminiToolCallSuccessful(status) + for _, path := range extractGeminiToolPaths(tc.Args) { + if successfulToolCall { + if lang := inferGeminiLanguageFromPath(path); lang != "" { + languageUsageCounts[lang]++ + } + } + if successfulToolCall && isGeminiMutatingTool(toolLower) { + changedFiles[path] = true + } + } + + if successfulToolCall && isGeminiMutatingTool(toolLower) { + if diff, ok := extractGeminiToolDiffStat(tc.ResultDisplay); ok { + modelLinesAdded += diff.ModelAddedLines + modelLinesRemoved += diff.ModelRemovedLines + modelCharsAdded += diff.ModelAddedChars + modelCharsRemoved += diff.ModelRemovedChars + userLinesAdded += diff.UserAddedLines + userLinesRemoved += diff.UserRemovedLines + userCharsAdded += diff.UserAddedChars + userCharsRemoved += diff.UserRemovedChars + diffStatEvents++ + } else { + added, removed := estimateGeminiToolLineDelta(tc.Args) + modelLinesAdded += added + modelLinesRemoved += removed + } + } + + if !successfulToolCall { + continue + } + cmd := strings.ToLower(extractGeminiToolCommand(tc.Args)) + if strings.Contains(cmd, "git commit") { + if !commitCommands[cmd] { + commitCommands[cmd] = true + inferredCommitCount++ + } + } else if strings.Contains(toolLower, "commit") { + inferredCommitCount++ + } + } + } + if msg.Tokens == nil { + continue + } + + modelName := normalizeModelName(msg.Model) + total := msg.Tokens.toUsage() + + if !foundLatest { + lastModelName = modelName + lastModelTokens = total.TotalTokens + fileHasUsage = true + } + modelRequests[modelName]++ + sessionModels[modelName] = true + + delta := total + if hasPrevious { + delta = usageDelta(total, previous) + if !validUsageDelta(delta) { + delta = total + } + } + previous = total + hasPrevious = true + + if delta.TotalTokens <= 0 { + continue + } + + addUsage(modelTotals, modelName, delta) + addUsage(clientTotals, clientName, delta) + + if day != "" { + addDailyUsage(modelDaily, modelName, day, float64(delta.TotalTokens)) + addDailyUsage(clientDaily, clientName, day, float64(delta.TotalTokens)) + dailyTokens[day] += float64(delta.TotalTokens) + dailyInputTokens[day] += float64(delta.InputTokens) + dailyOutputTokens[day] += float64(delta.OutputTokens) + dailyCachedTokens[day] += float64(delta.CachedInputTokens) + dailyReasoningTokens[day] += float64(delta.ReasoningTokens) + dailyToolTokens[day] += float64(delta.ToolTokens) + } + + totalTurns++ + } + + for modelName := range sessionModels { + modelSessions[modelName]++ + } + + if fileHasUsage { + foundLatest = true + } + } + + if sessionCount == 0 { + return 0, nil + } + + if lastModelName != "" && lastModelTokens > 0 { + limit := getModelContextLimit(lastModelName) + if limit > 0 { + used := float64(lastModelTokens) + lim := float64(limit) + snap.Metrics["context_window"] = core.Metric{ + Used: &used, + Limit: &lim, + Unit: "tokens", + Window: "current", + } + snap.Raw["active_model"] = lastModelName + } + } + + emitBreakdownMetrics("model", modelTotals, modelDaily, snap) + emitBreakdownMetrics("client", clientTotals, clientDaily, snap) + emitClientSessionMetrics(clientSessions, snap) + emitModelRequestMetrics(modelRequests, modelSessions, snap) + emitToolMetrics(toolTotals, snap) + if languageSummary := formatNamedCountMap(languageUsageCounts, "req"); languageSummary != "" { + snap.Raw["language_usage"] = languageSummary + } + for lang, count := range languageUsageCounts { + if count <= 0 { + continue + } + setUsedMetric(snap, "lang_"+sanitizeMetricName(lang), float64(count), "requests", defaultUsageWindowLabel) + } + + storeSeries(snap, "messages", dailyMessages) + storeSeries(snap, "sessions", dailySessions) + storeSeries(snap, "tool_calls", dailyToolCalls) + storeSeries(snap, "tokens_total", dailyTokens) + storeSeries(snap, "requests", dailyMessages) + storeSeries(snap, "analytics_requests", dailyMessages) + storeSeries(snap, "analytics_tokens", dailyTokens) + storeSeries(snap, "tokens_input", dailyInputTokens) + storeSeries(snap, "tokens_output", dailyOutputTokens) + storeSeries(snap, "tokens_cached", dailyCachedTokens) + storeSeries(snap, "tokens_reasoning", dailyReasoningTokens) + storeSeries(snap, "tokens_tool", dailyToolTokens) + + setUsedMetric(snap, "total_messages", float64(totalMessages), "messages", defaultUsageWindowLabel) + setUsedMetric(snap, "total_sessions", float64(sessionCount), "sessions", defaultUsageWindowLabel) + setUsedMetric(snap, "total_turns", float64(totalTurns), "turns", defaultUsageWindowLabel) + setUsedMetric(snap, "total_tool_calls", float64(totalToolCalls), "calls", defaultUsageWindowLabel) + setUsedMetric(snap, "total_info_messages", float64(totalInfoMessages), "messages", defaultUsageWindowLabel) + setUsedMetric(snap, "total_error_messages", float64(totalErrorMessages), "messages", defaultUsageWindowLabel) + setUsedMetric(snap, "total_assistant_messages", float64(totalAssistantMessages), "messages", defaultUsageWindowLabel) + setUsedMetric(snap, "tool_calls_success", float64(totalToolSuccess), "calls", defaultUsageWindowLabel) + setUsedMetric(snap, "tool_calls_failed", float64(totalToolFailed), "calls", defaultUsageWindowLabel) + setUsedMetric(snap, "tool_calls_total", float64(totalToolCalls), "calls", defaultUsageWindowLabel) + setUsedMetric(snap, "tool_completed", float64(totalToolSuccess), "calls", defaultUsageWindowLabel) + setUsedMetric(snap, "tool_errored", float64(totalToolErrored), "calls", defaultUsageWindowLabel) + setUsedMetric(snap, "tool_cancelled", float64(totalToolCancelled), "calls", defaultUsageWindowLabel) + if totalToolCalls > 0 { + successRate := float64(totalToolSuccess) / float64(totalToolCalls) * 100 + setUsedMetric(snap, "tool_success_rate", successRate, "%", defaultUsageWindowLabel) + } + setUsedMetric(snap, "quota_limit_events", float64(quotaLimitEvents), "events", defaultUsageWindowLabel) + setUsedMetric(snap, "total_prompts", float64(totalMessages), "prompts", defaultUsageWindowLabel) + + if cliUsage, ok := clientTotals["CLI"]; ok { + setUsedMetric(snap, "client_cli_messages", float64(totalMessages), "messages", defaultUsageWindowLabel) + setUsedMetric(snap, "client_cli_turns", float64(totalTurns), "turns", defaultUsageWindowLabel) + setUsedMetric(snap, "client_cli_tool_calls", float64(totalToolCalls), "calls", defaultUsageWindowLabel) + setUsedMetric(snap, "client_cli_input_tokens", float64(cliUsage.InputTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "client_cli_output_tokens", float64(cliUsage.OutputTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "client_cli_cached_tokens", float64(cliUsage.CachedInputTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "client_cli_reasoning_tokens", float64(cliUsage.ReasoningTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "client_cli_total_tokens", float64(cliUsage.TotalTokens), "tokens", defaultUsageWindowLabel) + } + + total := aggregateTokenTotals(modelTotals) + setUsedMetric(snap, "total_input_tokens", float64(total.InputTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "total_output_tokens", float64(total.OutputTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "total_cached_tokens", float64(total.CachedInputTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "total_reasoning_tokens", float64(total.ReasoningTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "total_tool_tokens", float64(total.ToolTokens), "tokens", defaultUsageWindowLabel) + setUsedMetric(snap, "total_tokens", float64(total.TotalTokens), "tokens", defaultUsageWindowLabel) + + if total.InputTokens > 0 { + cacheEfficiency := float64(total.CachedInputTokens) / float64(total.InputTokens) * 100 + setPercentMetric(snap, "cache_efficiency", cacheEfficiency, defaultUsageWindowLabel) + } + if total.TotalTokens > 0 { + reasoningShare := float64(total.ReasoningTokens) / float64(total.TotalTokens) * 100 + toolShare := float64(total.ToolTokens) / float64(total.TotalTokens) * 100 + setPercentMetric(snap, "reasoning_share", reasoningShare, defaultUsageWindowLabel) + setPercentMetric(snap, "tool_token_share", toolShare, defaultUsageWindowLabel) + } + if totalTurns > 0 { + avgTokensPerTurn := float64(total.TotalTokens) / float64(totalTurns) + setUsedMetric(snap, "avg_tokens_per_turn", avgTokensPerTurn, "tokens", defaultUsageWindowLabel) + } + if sessionCount > 0 { + avgToolsPerSession := float64(totalToolCalls) / float64(sessionCount) + setUsedMetric(snap, "avg_tools_per_session", avgToolsPerSession, "calls", defaultUsageWindowLabel) + } + + if _, v := latestSeriesValue(dailyMessages); v > 0 { + setUsedMetric(snap, "messages_today", v, "messages", "today") + } + if _, v := latestSeriesValue(dailySessions); v > 0 { + setUsedMetric(snap, "sessions_today", v, "sessions", "today") + } + if _, v := latestSeriesValue(dailyToolCalls); v > 0 { + setUsedMetric(snap, "tool_calls_today", v, "calls", "today") + } + if _, v := latestSeriesValue(dailyTokens); v > 0 { + setUsedMetric(snap, "tokens_today", v, "tokens", "today") + } + if _, v := latestSeriesValue(dailyInputTokens); v > 0 { + setUsedMetric(snap, "today_input_tokens", v, "tokens", "today") + } + if _, v := latestSeriesValue(dailyOutputTokens); v > 0 { + setUsedMetric(snap, "today_output_tokens", v, "tokens", "today") + } + if _, v := latestSeriesValue(dailyCachedTokens); v > 0 { + setUsedMetric(snap, "today_cached_tokens", v, "tokens", "today") + } + if _, v := latestSeriesValue(dailyReasoningTokens); v > 0 { + setUsedMetric(snap, "today_reasoning_tokens", v, "tokens", "today") + } + if _, v := latestSeriesValue(dailyToolTokens); v > 0 { + setUsedMetric(snap, "today_tool_tokens", v, "tokens", "today") + } + + setUsedMetric(snap, "7d_messages", sumLastNDays(dailyMessages, 7), "messages", "7d") + setUsedMetric(snap, "7d_sessions", sumLastNDays(dailySessions, 7), "sessions", "7d") + setUsedMetric(snap, "7d_tool_calls", sumLastNDays(dailyToolCalls, 7), "calls", "7d") + setUsedMetric(snap, "7d_tokens", sumLastNDays(dailyTokens, 7), "tokens", "7d") + setUsedMetric(snap, "7d_input_tokens", sumLastNDays(dailyInputTokens, 7), "tokens", "7d") + setUsedMetric(snap, "7d_output_tokens", sumLastNDays(dailyOutputTokens, 7), "tokens", "7d") + setUsedMetric(snap, "7d_cached_tokens", sumLastNDays(dailyCachedTokens, 7), "tokens", "7d") + setUsedMetric(snap, "7d_reasoning_tokens", sumLastNDays(dailyReasoningTokens, 7), "tokens", "7d") + setUsedMetric(snap, "7d_tool_tokens", sumLastNDays(dailyToolTokens, 7), "tokens", "7d") + + if modelLinesAdded > 0 { + setUsedMetric(snap, "composer_lines_added", float64(modelLinesAdded), "lines", defaultUsageWindowLabel) + } + if modelLinesRemoved > 0 { + setUsedMetric(snap, "composer_lines_removed", float64(modelLinesRemoved), "lines", defaultUsageWindowLabel) + } + if len(changedFiles) > 0 { + setUsedMetric(snap, "composer_files_changed", float64(len(changedFiles)), "files", defaultUsageWindowLabel) + } + if inferredCommitCount > 0 { + setUsedMetric(snap, "scored_commits", float64(inferredCommitCount), "commits", defaultUsageWindowLabel) + } + if userLinesAdded > 0 { + setUsedMetric(snap, "composer_user_lines_added", float64(userLinesAdded), "lines", defaultUsageWindowLabel) + } + if userLinesRemoved > 0 { + setUsedMetric(snap, "composer_user_lines_removed", float64(userLinesRemoved), "lines", defaultUsageWindowLabel) + } + if modelCharsAdded > 0 { + setUsedMetric(snap, "composer_model_chars_added", float64(modelCharsAdded), "chars", defaultUsageWindowLabel) + } + if modelCharsRemoved > 0 { + setUsedMetric(snap, "composer_model_chars_removed", float64(modelCharsRemoved), "chars", defaultUsageWindowLabel) + } + if userCharsAdded > 0 { + setUsedMetric(snap, "composer_user_chars_added", float64(userCharsAdded), "chars", defaultUsageWindowLabel) + } + if userCharsRemoved > 0 { + setUsedMetric(snap, "composer_user_chars_removed", float64(userCharsRemoved), "chars", defaultUsageWindowLabel) + } + if diffStatEvents > 0 { + setUsedMetric(snap, "composer_diffstat_events", float64(diffStatEvents), "calls", defaultUsageWindowLabel) + } + totalModelLineDelta := modelLinesAdded + modelLinesRemoved + totalUserLineDelta := userLinesAdded + userLinesRemoved + if totalModelLineDelta > 0 || totalUserLineDelta > 0 { + totalLineDelta := totalModelLineDelta + totalUserLineDelta + if totalLineDelta > 0 { + aiPct := float64(totalModelLineDelta) / float64(totalLineDelta) * 100 + setPercentMetric(snap, "ai_code_percentage", aiPct, defaultUsageWindowLabel) + } + } + + if quotaLimitEvents > 0 { + snap.Raw["quota_limit_detected"] = "true" + if _, hasQuota := snap.Metrics["quota"]; !hasQuota { + limit := 100.0 + remaining := 0.0 + used := 100.0 + snap.Metrics["quota"] = core.Metric{ + Limit: &limit, + Remaining: &remaining, + Used: &used, + Unit: "%", + Window: "daily", + } + applyQuotaStatus(snap, 0) + } + } + + return sessionCount, nil +} + +func findGeminiSessionFiles(tmpDir string) ([]string, error) { + if strings.TrimSpace(tmpDir) == "" { + return nil, nil + } + if _, err := os.Stat(tmpDir); err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, fmt.Errorf("stat tmp dir: %w", err) + } + + type item struct { + path string + modTime time.Time + } + var files []item + + walkErr := filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { + if err != nil || info == nil || info.IsDir() { + return nil + } + name := info.Name() + if !strings.HasPrefix(name, "session-") || !strings.HasSuffix(name, ".json") { + return nil + } + files = append(files, item{path: path, modTime: info.ModTime()}) + return nil + }) + if walkErr != nil { + return nil, fmt.Errorf("walk gemini tmp dir: %w", walkErr) + } + if len(files) == 0 { + return nil, nil + } + + sort.Slice(files, func(i, j int) bool { + if files[i].modTime.Equal(files[j].modTime) { + return files[i].path > files[j].path + } + return files[i].modTime.After(files[j].modTime) + }) + + return lo.Map(files, func(f item, _ int) string { return f.path }), nil +} + +func readGeminiChatFile(path string) (*geminiChatFile, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var chat geminiChatFile + if err := json.NewDecoder(f).Decode(&chat); err != nil { + return nil, err + } + return &chat, nil +} + +func emitBreakdownMetrics(prefix string, totals map[string]tokenUsage, daily map[string]map[string]float64, snap *core.UsageSnapshot) { + entries := sortUsageEntries(totals) + if len(entries) == 0 { + return + } + + for i, entry := range entries { + if i >= maxBreakdownMetrics { + break + } + keyPrefix := prefix + "_" + sanitizeMetricName(entry.Name) + setUsageMetric(snap, keyPrefix+"_total_tokens", float64(entry.Data.TotalTokens)) + setUsageMetric(snap, keyPrefix+"_input_tokens", float64(entry.Data.InputTokens)) + setUsageMetric(snap, keyPrefix+"_output_tokens", float64(entry.Data.OutputTokens)) + + if entry.Data.CachedInputTokens > 0 { + setUsageMetric(snap, keyPrefix+"_cached_tokens", float64(entry.Data.CachedInputTokens)) + } + if entry.Data.ReasoningTokens > 0 { + setUsageMetric(snap, keyPrefix+"_reasoning_tokens", float64(entry.Data.ReasoningTokens)) + } + + if byDay, ok := daily[entry.Name]; ok { + seriesKey := "tokens_" + prefix + "_" + sanitizeMetricName(entry.Name) + snap.DailySeries[seriesKey] = core.SortedTimePoints(byDay) + } + + if prefix == "model" { + rec := core.ModelUsageRecord{ + RawModelID: entry.Name, + RawSource: "json", + Window: defaultUsageWindowLabel, + InputTokens: core.Float64Ptr(float64(entry.Data.InputTokens)), + OutputTokens: core.Float64Ptr(float64(entry.Data.OutputTokens)), + TotalTokens: core.Float64Ptr(float64(entry.Data.TotalTokens)), + } + if entry.Data.CachedInputTokens > 0 { + rec.CachedTokens = core.Float64Ptr(float64(entry.Data.CachedInputTokens)) + } + if entry.Data.ReasoningTokens > 0 { + rec.ReasoningTokens = core.Float64Ptr(float64(entry.Data.ReasoningTokens)) + } + snap.AppendModelUsage(rec) + } + } + + snap.Raw[prefix+"_usage"] = formatUsageSummary(entries, maxBreakdownRaw) +} + +func emitClientSessionMetrics(clientSessions map[string]int, snap *core.UsageSnapshot) { + type entry struct { + name string + count int + } + var all []entry + for name, count := range clientSessions { + if count > 0 { + all = append(all, entry{name: name, count: count}) + } + } + sort.Slice(all, func(i, j int) bool { + if all[i].count == all[j].count { + return all[i].name < all[j].name + } + return all[i].count > all[j].count + }) + + for i, item := range all { + if i >= maxBreakdownMetrics { + break + } + value := float64(item.count) + snap.Metrics["client_"+sanitizeMetricName(item.name)+"_sessions"] = core.Metric{ + Used: &value, + Unit: "sessions", + Window: defaultUsageWindowLabel, + } + } +} + +func emitModelRequestMetrics(modelRequests, modelSessions map[string]int, snap *core.UsageSnapshot) { + type entry struct { + name string + requests int + sessions int + } + + all := make([]entry, 0, len(modelRequests)) + for name, requests := range modelRequests { + if requests <= 0 { + continue + } + all = append(all, entry{name: name, requests: requests, sessions: modelSessions[name]}) + } + + sort.Slice(all, func(i, j int) bool { + if all[i].requests == all[j].requests { + return all[i].name < all[j].name + } + return all[i].requests > all[j].requests + }) + + for i, item := range all { + if i >= maxBreakdownMetrics { + break + } + keyPrefix := "model_" + sanitizeMetricName(item.name) + req := float64(item.requests) + sess := float64(item.sessions) + snap.Metrics[keyPrefix+"_requests"] = core.Metric{ + Used: &req, + Unit: "requests", + Window: defaultUsageWindowLabel, + } + if item.sessions > 0 { + snap.Metrics[keyPrefix+"_sessions"] = core.Metric{ + Used: &sess, + Unit: "sessions", + Window: defaultUsageWindowLabel, + } + } + } +} + +func emitToolMetrics(toolTotals map[string]int, snap *core.UsageSnapshot) { + type entry struct { + name string + count int + } + var all []entry + for name, count := range toolTotals { + if count > 0 { + all = append(all, entry{name: name, count: count}) + } + } + sort.Slice(all, func(i, j int) bool { + if all[i].count == all[j].count { + return all[i].name < all[j].name + } + return all[i].count > all[j].count + }) + + var parts []string + limit := maxBreakdownRaw + for i, item := range all { + if i < limit { + parts = append(parts, fmt.Sprintf("%s (%d)", item.name, item.count)) + } + + val := float64(item.count) + snap.Metrics["tool_"+sanitizeMetricName(item.name)] = core.Metric{ + Used: &val, + Unit: "calls", + Window: defaultUsageWindowLabel, + } + } + + if len(all) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(all)-limit)) + } + + if len(parts) > 0 { + snap.Raw["tool_usage"] = strings.Join(parts, ", ") + } +} + +func aggregateTokenTotals(modelTotals map[string]tokenUsage) tokenUsage { + var total tokenUsage + for _, usage := range modelTotals { + total.InputTokens += usage.InputTokens + total.CachedInputTokens += usage.CachedInputTokens + total.OutputTokens += usage.OutputTokens + total.ReasoningTokens += usage.ReasoningTokens + total.ToolTokens += usage.ToolTokens + total.TotalTokens += usage.TotalTokens + } + return total +} + +func setUsageMetric(snap *core.UsageSnapshot, key string, value float64) { + if value <= 0 { + return + } + snap.Metrics[key] = core.Metric{ + Used: &value, + Unit: "tokens", + Window: defaultUsageWindowLabel, + } +} + +func addUsage(target map[string]tokenUsage, name string, delta tokenUsage) { + current := target[name] + current.InputTokens += delta.InputTokens + current.CachedInputTokens += delta.CachedInputTokens + current.OutputTokens += delta.OutputTokens + current.ReasoningTokens += delta.ReasoningTokens + current.ToolTokens += delta.ToolTokens + current.TotalTokens += delta.TotalTokens + target[name] = current +} + +func addDailyUsage(target map[string]map[string]float64, name, day string, value float64) { + if day == "" || value <= 0 { + return + } + if target[name] == nil { + target[name] = make(map[string]float64) + } + target[name][day] += value +} + +func sortUsageEntries(values map[string]tokenUsage) []usageEntry { + out := make([]usageEntry, 0, len(values)) + for name, data := range values { + out = append(out, usageEntry{Name: name, Data: data}) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Data.TotalTokens == out[j].Data.TotalTokens { + return out[i].Name < out[j].Name + } + return out[i].Data.TotalTokens > out[j].Data.TotalTokens + }) + return out +} + +func formatUsageSummary(entries []usageEntry, max int) string { + total := 0 + for _, entry := range entries { + total += entry.Data.TotalTokens + } + if total <= 0 { + return "" + } + + limit := max + if limit > len(entries) { + limit = len(entries) + } + + parts := make([]string, 0, limit+1) + for i := 0; i < limit; i++ { + entry := entries[i] + pct := float64(entry.Data.TotalTokens) / float64(total) * 100 + parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, shared.FormatTokenCount(entry.Data.TotalTokens), pct)) + } + if len(entries) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) + } + return strings.Join(parts, ", ") +} + +func formatNamedCountMap(m map[string]int, unit string) string { + if len(m) == 0 { + return "" + } + parts := make([]string, 0, len(m)) + for name, count := range m { + if count <= 0 { + continue + } + parts = append(parts, fmt.Sprintf("%s: %d %s", name, count, unit)) + } + sort.Strings(parts) + return strings.Join(parts, ", ") +} + +func isGeminiToolCallSuccessful(status string) bool { + status = strings.ToLower(strings.TrimSpace(status)) + return status == "" || status == "success" || status == "succeeded" || status == "ok" || status == "completed" +} + +func isGeminiMutatingTool(toolName string) bool { + toolName = strings.ToLower(strings.TrimSpace(toolName)) + if toolName == "" { + return false + } + return strings.Contains(toolName, "edit") || + strings.Contains(toolName, "write") || + strings.Contains(toolName, "create") || + strings.Contains(toolName, "delete") || + strings.Contains(toolName, "rename") || + strings.Contains(toolName, "move") || + strings.Contains(toolName, "replace") +} + +func extractGeminiToolCommand(raw json.RawMessage) string { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return "" + } + var command string + var walk func(v any) + walk = func(v any) { + if command != "" || v == nil { + return + } + switch value := v.(type) { + case map[string]any: + for key, child := range value { + k := strings.ToLower(strings.TrimSpace(key)) + if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { + if s, ok := child.(string); ok { + command = strings.TrimSpace(s) + return + } + } + } + for _, child := range value { + walk(child) + if command != "" { + return + } + } + case []any: + for _, child := range value { + walk(child) + if command != "" { + return + } + } + } + } + walk(payload) + return command +} + +func extractGeminiToolPaths(raw json.RawMessage) []string { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return nil + } + + pathHints := map[string]bool{ + "path": true, "paths": true, "file": true, "files": true, "filepath": true, "file_path": true, + "cwd": true, "dir": true, "directory": true, "target": true, "pattern": true, "glob": true, + "from": true, "to": true, "include": true, "exclude": true, + } + + candidates := make(map[string]bool) + var walk func(v any, hinted bool) + walk = func(v any, hinted bool) { + switch value := v.(type) { + case map[string]any: + for key, child := range value { + k := strings.ToLower(strings.TrimSpace(key)) + childHinted := hinted || pathHints[k] || strings.Contains(k, "path") || strings.Contains(k, "file") + walk(child, childHinted) + } + case []any: + for _, child := range value { + walk(child, hinted) + } + case string: + if !hinted { + return + } + for _, token := range extractGeminiPathTokens(value) { + candidates[token] = true + } + } + } + walk(payload, false) + + out := make([]string, 0, len(candidates)) + for c := range candidates { + out = append(out, c) + } + sort.Strings(out) + return out +} + +func extractGeminiPathTokens(raw string) []string { + raw = strings.TrimSpace(raw) + if raw == "" { + return nil + } + fields := strings.Fields(raw) + if len(fields) == 0 { + fields = []string{raw} + } + + var out []string + for _, field := range fields { + token := strings.Trim(field, "\"'`()[]{}<>,:;") + if token == "" { + continue + } + lower := strings.ToLower(token) + if strings.HasPrefix(lower, "http://") || strings.HasPrefix(lower, "https://") || strings.HasPrefix(lower, "file://") { + continue + } + if strings.HasPrefix(token, "-") { + continue + } + if !strings.Contains(token, "/") && !strings.Contains(token, "\\") && !strings.Contains(token, ".") { + continue + } + token = strings.TrimPrefix(token, "./") + if token == "" { + continue + } + out = append(out, token) + } + return lo.Uniq(out) +} + +func estimateGeminiToolLineDelta(raw json.RawMessage) (added int, removed int) { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return 0, 0 + } + lineCount := func(text string) int { + text = strings.TrimSpace(text) + if text == "" { + return 0 + } + return strings.Count(text, "\n") + 1 + } + var walk func(v any) + walk = func(v any) { + switch value := v.(type) { + case map[string]any: + var oldText, newText string + for _, key := range []string{"old_string", "old_text", "from", "replace"} { + if rawValue, ok := value[key]; ok { + if s, ok := rawValue.(string); ok { + oldText = s + break + } + } + } + for _, key := range []string{"new_string", "new_text", "to", "with"} { + if rawValue, ok := value[key]; ok { + if s, ok := rawValue.(string); ok { + newText = s + break + } + } + } + if oldText != "" || newText != "" { + removed += lineCount(oldText) + added += lineCount(newText) + } + if rawValue, ok := value["content"]; ok { + if s, ok := rawValue.(string); ok { + added += lineCount(s) + } + } + for _, child := range value { + walk(child) + } + case []any: + for _, child := range value { + walk(child) + } + } + } + walk(payload) + return added, removed +} + +func extractGeminiToolDiffStat(raw json.RawMessage) (geminiDiffStat, bool) { + var empty geminiDiffStat + raw = bytes.TrimSpace(raw) + if len(raw) == 0 || bytes.Equal(raw, []byte("null")) { + return empty, false + } + + var root map[string]json.RawMessage + if json.Unmarshal(raw, &root) != nil { + return empty, false + } + diffRaw, ok := root["diffStat"] + if !ok { + return empty, false + } + + var stat geminiDiffStat + if json.Unmarshal(diffRaw, &stat) != nil { + return empty, false + } + + stat.ModelAddedLines = max(0, stat.ModelAddedLines) + stat.ModelRemovedLines = max(0, stat.ModelRemovedLines) + stat.ModelAddedChars = max(0, stat.ModelAddedChars) + stat.ModelRemovedChars = max(0, stat.ModelRemovedChars) + stat.UserAddedLines = max(0, stat.UserAddedLines) + stat.UserRemovedLines = max(0, stat.UserRemovedLines) + stat.UserAddedChars = max(0, stat.UserAddedChars) + stat.UserRemovedChars = max(0, stat.UserRemovedChars) + + if stat.ModelAddedLines == 0 && + stat.ModelRemovedLines == 0 && + stat.ModelAddedChars == 0 && + stat.ModelRemovedChars == 0 && + stat.UserAddedLines == 0 && + stat.UserRemovedLines == 0 && + stat.UserAddedChars == 0 && + stat.UserRemovedChars == 0 { + return empty, false + } + + return stat, true +} + +func inferGeminiLanguageFromPath(path string) string { + p := strings.ToLower(strings.TrimSpace(path)) + if p == "" { + return "" + } + base := strings.ToLower(filepath.Base(p)) + switch base { + case "dockerfile": + return "docker" + case "makefile": + return "make" + } + switch strings.ToLower(filepath.Ext(p)) { + case ".go": + return "go" + case ".py": + return "python" + case ".ts", ".tsx": + return "typescript" + case ".js", ".jsx": + return "javascript" + case ".tf", ".tfvars", ".hcl": + return "terraform" + case ".sh", ".bash", ".zsh", ".fish": + return "shell" + case ".md", ".mdx": + return "markdown" + case ".json": + return "json" + case ".yml", ".yaml": + return "yaml" + case ".sql": + return "sql" + case ".rs": + return "rust" + case ".java": + return "java" + case ".c", ".h": + return "c" + case ".cc", ".cpp", ".cxx", ".hpp": + return "cpp" + case ".rb": + return "ruby" + case ".php": + return "php" + case ".swift": + return "swift" + case ".vue": + return "vue" + case ".svelte": + return "svelte" + case ".toml": + return "toml" + case ".xml": + return "xml" + } + return "" +} + +func usageDelta(current, previous tokenUsage) tokenUsage { + return tokenUsage{ + InputTokens: current.InputTokens - previous.InputTokens, + CachedInputTokens: current.CachedInputTokens - previous.CachedInputTokens, + OutputTokens: current.OutputTokens - previous.OutputTokens, + ReasoningTokens: current.ReasoningTokens - previous.ReasoningTokens, + ToolTokens: current.ToolTokens - previous.ToolTokens, + TotalTokens: current.TotalTokens - previous.TotalTokens, + } +} + +func validUsageDelta(delta tokenUsage) bool { + return delta.InputTokens >= 0 && + delta.CachedInputTokens >= 0 && + delta.OutputTokens >= 0 && + delta.ReasoningTokens >= 0 && + delta.ToolTokens >= 0 && + delta.TotalTokens >= 0 +} + +func normalizeModelName(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "unknown" + } + return name +} + +func normalizeClientName(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "Other" + } + return name +} + +func sanitizeMetricName(name string) string { + name = strings.ToLower(strings.TrimSpace(name)) + if name == "" { + return "unknown" + } + + var b strings.Builder + lastUnderscore := false + for _, r := range name { + switch { + case r >= 'a' && r <= 'z': + b.WriteRune(r) + lastUnderscore = false + case r >= '0' && r <= '9': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore { + b.WriteByte('_') + lastUnderscore = true + } + } + } + + out := strings.Trim(b.String(), "_") + if out == "" { + return "unknown" + } + return out +} + +func getModelContextLimit(model string) int { + model = strings.ToLower(model) + switch { + case strings.Contains(model, "1.5-pro"), strings.Contains(model, "1.5-flash-8b"): + return 2_000_000 + case strings.Contains(model, "1.5-flash"): + return 1_000_000 + case strings.Contains(model, "2.0-flash"): + return 1_000_000 + case strings.Contains(model, "gemini-3"), strings.Contains(model, "gemini-exp"): + return 2_000_000 + case strings.Contains(model, "pro"): + return 32_000 + case strings.Contains(model, "flash"): + return 32_000 + } + return 0 +} + +func dayFromTimestamp(timestamp string) string { + if timestamp == "" { + return "" + } + for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} { + if parsed, err := time.Parse(layout, timestamp); err == nil { + return parsed.Format("2006-01-02") + } + } + if len(timestamp) >= 10 { + candidate := timestamp[:10] + if _, err := time.Parse("2006-01-02", candidate); err == nil { + return candidate + } + } + return "" +} + +func dayFromSession(startTime, lastUpdated string) string { + if day := dayFromTimestamp(lastUpdated); day != "" { + return day + } + return dayFromTimestamp(startTime) +} + +func storeSeries(snap *core.UsageSnapshot, key string, values map[string]float64) { + if len(values) == 0 { + return + } + snap.DailySeries[key] = core.SortedTimePoints(values) +} + +func latestSeriesValue(values map[string]float64) (string, float64) { + if len(values) == 0 { + return "", 0 + } + dates := slices.Sorted(maps.Keys(values)) + last := dates[len(dates)-1] + return last, values[last] +} + +func sumLastNDays(values map[string]float64, days int) float64 { + if len(values) == 0 || days <= 0 { + return 0 + } + lastDate, _ := latestSeriesValue(values) + if lastDate == "" { + return 0 + } + end, err := time.Parse("2006-01-02", lastDate) + if err != nil { + return 0 + } + start := end.AddDate(0, 0, -(days - 1)) + + total := 0.0 + for date, value := range values { + t, err := time.Parse("2006-01-02", date) + if err != nil { + continue + } + if !t.Before(start) && !t.After(end) { + total += value + } + } + return total +} + +func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { + if value <= 0 { + return + } + v := value + snap.Metrics[key] = core.Metric{ + Used: &v, + Unit: unit, + Window: window, + } +} + +func setPercentMetric(snap *core.UsageSnapshot, key string, value float64, window string) { + if value < 0 { + return + } + if value > 100 { + value = 100 + } + v := value + limit := 100.0 + remaining := 100 - value + snap.Metrics[key] = core.Metric{ + Used: &v, + Limit: &limit, + Remaining: &remaining, + Unit: "%", + Window: window, + } +} + +func isQuotaLimitMessage(content json.RawMessage) bool { + text := strings.ToLower(parseMessageContentText(content)) + if text == "" { + return false + } + return strings.Contains(text, "usage limit reached") || + strings.Contains(text, "all pro models") || + strings.Contains(text, "/stats for usage details") +} + +func parseMessageContentText(content json.RawMessage) string { + content = bytes.TrimSpace(content) + if len(content) == 0 { + return "" + } + + var asString string + if content[0] == '"' && json.Unmarshal(content, &asString) == nil { + return asString + } + + var asArray []map[string]any + if content[0] == '[' && json.Unmarshal(content, &asArray) == nil { + var parts []string + for _, item := range asArray { + if text, ok := item["text"].(string); ok && strings.TrimSpace(text) != "" { + parts = append(parts, text) + } + } + if len(parts) > 0 { + return strings.Join(parts, " ") + } + } + + return string(content) +} diff --git a/internal/telemetry/provider_links.go b/internal/telemetry/provider_links.go index e5373a2..cc11b7f 100644 --- a/internal/telemetry/provider_links.go +++ b/internal/telemetry/provider_links.go @@ -1,10 +1,9 @@ package telemetry import ( - "sort" + "maps" + "slices" "strings" - - "github.com/samber/lo" ) func normalizeProviderLinks(in map[string]string) map[string]string { @@ -39,7 +38,5 @@ func telemetrySourceProvidersForTarget(targetProvider string, links map[string]s } } - out := lo.Keys(set) - sort.Strings(out) - return out + return slices.Sorted(maps.Keys(set)) } diff --git a/internal/telemetry/usage_view_materialize.go b/internal/telemetry/usage_view_materialize.go index a5f5b41..cb174fc 100644 --- a/internal/telemetry/usage_view_materialize.go +++ b/internal/telemetry/usage_view_materialize.go @@ -37,7 +37,9 @@ func materializeUsageFilter(ctx context.Context, db *sql.DB, filter usageFilter) filter.materializedTbl = tempTable cleanup := func() { - _, _ = db.ExecContext(context.Background(), fmt.Sprintf("DROP TABLE IF EXISTS %s", tempTable)) + cleanupCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 2*time.Second) + defer cancel() + _, _ = db.ExecContext(cleanupCtx, fmt.Sprintf("DROP TABLE IF EXISTS %s", tempTable)) } return filter, cleanup, nil } diff --git a/internal/tui/analytics_data.go b/internal/tui/analytics_data.go index 8c38eb3..9647c18 100644 --- a/internal/tui/analytics_data.go +++ b/internal/tui/analytics_data.go @@ -1,12 +1,13 @@ package tui import ( + "maps" + "slices" "sort" "strings" "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) const ( @@ -144,8 +145,7 @@ func extractCostData(snapshots map[string]core.UsageSnapshot, filter string) cos data.snapshots = snapshots lowerFilter := strings.ToLower(filter) - keys := lo.Keys(snapshots) - sort.Strings(keys) + keys := slices.Sorted(maps.Keys(snapshots)) for _, k := range keys { snap := snapshots[k] From 3cf676acd06707d32588af35cf96346fd260fd26 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Mon, 9 Mar 2026 22:16:43 +0100 Subject: [PATCH 27/32] refactor: split telemetry collectors and provider helpers --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 15 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 39 +- internal/providers/copilot/telemetry.go | 1547 ----------------- internal/providers/copilot/telemetry_logs.go | 120 ++ .../copilot/telemetry_session_file.go | 1148 ++++++++++++ .../copilot/telemetry_session_store.go | 223 +++ internal/providers/ollama/local_paths.go | 94 + internal/providers/ollama/ollama.go | 588 ------- internal/providers/ollama/request_helpers.go | 389 +++++ internal/providers/ollama/server_log_parse.go | 100 ++ internal/providers/opencode/telemetry.go | 1478 ---------------- .../opencode/telemetry_event_file.go | 173 ++ .../providers/opencode/telemetry_hooks.go | 757 ++++++++ .../providers/opencode/telemetry_sqlite.go | 600 +++++++ internal/providers/zai/monitor_helpers.go | 175 ++ internal/providers/zai/usage_extract.go | 459 +++++ internal/providers/zai/usage_helpers.go | 744 ++++++++ internal/providers/zai/zai.go | 1343 -------------- internal/tui/tiles_composition.go | 1392 +-------------- internal/tui/tiles_composition_clients.go | 418 +++++ internal/tui/tiles_composition_providers.go | 319 ++++ internal/tui/tiles_composition_tools.go | 441 +++++ 22 files changed, 6227 insertions(+), 6335 deletions(-) create mode 100644 internal/providers/copilot/telemetry_logs.go create mode 100644 internal/providers/copilot/telemetry_session_file.go create mode 100644 internal/providers/copilot/telemetry_session_store.go create mode 100644 internal/providers/ollama/local_paths.go create mode 100644 internal/providers/ollama/request_helpers.go create mode 100644 internal/providers/ollama/server_log_parse.go create mode 100644 internal/providers/opencode/telemetry_event_file.go create mode 100644 internal/providers/opencode/telemetry_hooks.go create mode 100644 internal/providers/opencode/telemetry_sqlite.go create mode 100644 internal/providers/zai/monitor_helpers.go create mode 100644 internal/providers/zai/usage_extract.go create mode 100644 internal/providers/zai/usage_helpers.go create mode 100644 internal/tui/tiles_composition_clients.go create mode 100644 internal/tui/tiles_composition_providers.go create mode 100644 internal/tui/tiles_composition_tools.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 4f52045..ae656db 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -69,20 +69,25 @@ This table captures every issue found in this pass. It is broad and high-signal, | R49 | Fixed | Settings modal preview-data split | `internal/tui/settings_modal.go`, `internal/tui/settings_modal_preview.go` | The large preview snapshot fixture for widget-section configuration moved out of the main settings modal behavior file, reducing render/input coupling inside `settings_modal.go`. | Continue moving purely preview/demo helpers out of modal behavior files. | | R50 | Fixed | Account-config contract comments aligned with runtime | `internal/core/provider.go` | `AccountConfig` comments no longer claim that `Binary` and `BaseURL` are valid primary homes for provider-local data paths. The type now documents the actual runtime contract: provider-local paths belong in `Paths`, with legacy compatibility handled inside provider packages. | A typed runtime-hints structure is still the next hardening step. | | R51 | Fixed | Config test file helper extraction | `internal/config/config_test.go`, `internal/config/test_helpers_test.go` | Repeated `settings.json` temp-file creation/loading boilerplate in the config test suite now goes through shared helpers for the common cases, shrinking some of the easiest-to-repeat fixture noise. | Continue the same pattern in the remaining large test files and higher-noise config cases. | +| R52 | Fixed | OpenCode telemetry collector split | `internal/providers/opencode/telemetry.go`, `internal/providers/opencode/telemetry_event_file.go`, `internal/providers/opencode/telemetry_sqlite.go`, `internal/providers/opencode/telemetry_hooks.go` | OpenCode telemetry no longer mixes event-file parsing, SQLite reads, and hook normalization in one file. Those concerns now live in dedicated units behind the same collector surface. | Keep future OpenCode telemetry changes inside the matching helper unit. | +| R53 | Fixed | Copilot telemetry collector split | `internal/providers/copilot/telemetry.go`, `internal/providers/copilot/telemetry_session_file.go`, `internal/providers/copilot/telemetry_session_store.go`, `internal/providers/copilot/telemetry_logs.go` | Copilot telemetry session JSONL parsing, session-store SQLite fallback, and CompactionProcessor log parsing now live outside the main telemetry collector file. | Continue extracting test fixtures from the large Copilot telemetry suite over time. | +| R54 | Fixed | Tile composition section split | `internal/tui/tiles_composition.go`, `internal/tui/tiles_composition_providers.go`, `internal/tui/tiles_composition_clients.go`, `internal/tui/tiles_composition_tools.go` | Provider, client/source, and tool composition render sections no longer share one large file. The core composition file now holds shared types/helpers and the section files hold the view-specific bar builders. | Continue splitting only if one of the new section files regrows. | +| R55 | Fixed | Z.AI provider helper decomposition | `internal/providers/zai/zai.go`, `internal/providers/zai/monitor_helpers.go`, `internal/providers/zai/usage_extract.go`, `internal/providers/zai/usage_helpers.go` | Z.AI API-base resolution, monitor request/quota handling, usage row extraction, payload capture, and normalization helpers now live outside the main provider file. The coordinator file is reduced to fetch orchestration and snapshot assembly. | Continue only if the remaining fetch/projection path grows again. | +| R56 | Fixed | Ollama path and log-helper split | `internal/providers/ollama/ollama.go`, `internal/providers/ollama/request_helpers.go`, `internal/providers/ollama/local_paths.go`, `internal/providers/ollama/server_log_parse.go` | Ollama request helpers, local path resolution, and GIN log parsing are now separate helper units instead of living inline in the main provider file. | The remaining large DB-population helpers are the next split point only if Ollama changes keep clustering there. | ## Action Table | ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | | --- | --- | --- | --- | --- | --- | --- | | A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow and the comments now match that behavior, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire the remaining compatibility shape in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | -| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/tui/detail.go`, `internal/tui/detail_tokens.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/tui/settings_modal_preview.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, tile-body composition is cached, and settings preview/layout pieces are separated, but TUI state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Composition bars, provider tile fallback/rate-limit selection, analytics model views, and analytics cost fallback now consume shared extractors, but some analytics/detail sections still decode metric-key conventions directly in TUI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/zai/zai.go`, `internal/providers/gemini_cli/gemini_cli.go` | Cursor, OpenRouter, Codex, Copilot, and Claude Code are now materially decomposed, but several providers still combine large parsing/projection flows in very large files. | Split the remaining large providers by concern: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | +| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/analytics.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, tile-body composition is cached, and composition/settings subsections are split, but state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | +| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Main dashboard composition, provider tile fallback/rate-limit selection, token sections, and cost fallback now consume shared extractors, but a few analytics/detail sections still decode metric-key conventions directly in UI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | +| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/gemini_cli/gemini_cli.go` | Cursor, OpenRouter, Codex, Copilot, Claude Code, Z.AI, and the telemetry collectors are now materially decomposed, but Ollama and Gemini CLI still keep large fetch/projection and local-session flows in very large files. | Split the remaining large providers by concern only where active change pressure justifies it: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | | A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | | A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | | A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | | A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. The config suite now has basic shared file helpers, but the larger provider/telemetry suites still carry too much duplicated fixture setup. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | -| A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go`, `internal/tui/settings_modal_preview.go`, `internal/tui/tiles_composition.go` | TUI logic is split across more focused files now, but several files are still individually large and still mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `composition_extractors`. | Better readability and easier targeted refactors. | +| A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/analytics.go` | TUI logic is split across more focused files now, and tile composition is no longer one giant file, but several files are still individually large and still mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `analytics_sections`. | Better readability and easier targeted refactors. | | A15 | P3 | Performance optimization follow-through in render path | `internal/tui/model.go`, `internal/tui/tiles.go`, `internal/tui/tiles_cache.go`, `internal/tui/detail.go`, `internal/tui/analytics.go` | Tile body composition is now cached per snapshot/update state, but detail and analytics still rebuild some derived structures on each render path. | Extend caching only to the remaining high-cost detail/analytics derivations if profiling or repeated churn justifies it. | Lower render cost without over-caching the whole UI. | ## Suggested Execution Order @@ -95,5 +100,5 @@ This table captures every issue found in this pass. It is broad and high-signal, ## Notes - The highest-risk remaining issues are architectural rather than immediately broken behavior. -- The biggest remaining drift risks are the metric-prefix parsing still spread across the TUI render path and the remaining large TUI/provider files. +- The biggest remaining drift risks are the last analytics/detail metric-key parsing pockets and the remaining large TUI/provider files. - The race pass completed cleanly for the core dashboard/daemon/telemetry packages after the timeframe fix. diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index 38c5a59..981e6d0 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -5,7 +5,7 @@ Repository: `/Users/janekbaraniewski/Workspace/priv/openusage` ## Scope -This is a refreshed architecture review after the dashboard race fix, daemon/read-model cleanup, provider parser consolidation, and the recent Cursor/OpenRouter/Ollama/Codex/Claude Code/TUI refactors on branch `feat/dashboard-race-parser-cleanups`. +This is a refreshed architecture review after the dashboard race fix, daemon/read-model cleanup, provider parser consolidation, telemetry collector splits, and the recent Cursor/OpenRouter/Ollama/Z.AI/Codex/Claude Code/TUI refactors on branch `feat/dashboard-race-parser-cleanups`. The goal of this report is not to restate already-fixed issues. It documents the meaningful problems still left in the current tree. @@ -23,10 +23,14 @@ These were major concerns in earlier reviews and are now materially addressed: - Claude Code local file readers, model-summary helpers, and conversation aggregation concentrated in one provider file. - Copilot GitHub API fetch/quota/org-metrics flow concentrated in the same file as local log/session parsing. - Copilot local config/log/session parsing concentrated in the same file as provider orchestration. +- Copilot telemetry JSONL/session-store/log parsing concentrated in one collector file. +- OpenCode telemetry event-file/SQLite/hook parsing concentrated in one collector file. - OpenRouter provider-resolution, analytics, generation, projection, and account-path monolith sprawl. - TUI side-effect leakage into config persistence / integration install / provider validation. - Settings modal layout/render wrapper living inline with settings state/input handling. +- Tile composition provider/client/tool sections living in one large file. - Ollama hot-path `time.Now()` usage in behavioral window/reset logic. +- Z.AI monitor helpers and usage extraction/payload parsing concentrated in one provider file. - Shared hook ingest parsing/local fallback drift between daemon and CLI. - Usage-view temp-table materialization and aggregate query fanout living inline in the main orchestration path. @@ -34,13 +38,16 @@ These were major concerns in earlier reviews and are now materially addressed: ### 1. [P2] TUI rendering and state handling are still concentrated in a few very large files -The TUI is much better than before, and provider tile display-summary logic no longer lives inline in `model.go`, while the settings modal layout wrapper now lives in its own file. Tile-body derivation is cached now as well. But [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go), and the remaining settings modal render sections are still large enough that unrelated concerns move together. +The TUI is much better than before, and provider tile display-summary logic no longer lives inline in `model.go`, while the settings modal layout wrapper now lives in its own file. Tile-body derivation is cached now, and provider/client/tool composition sections are split out of the main composition file. But [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [analytics.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/analytics.go), and the remaining settings modal render sections are still large enough that unrelated concerns move together. Refs: - [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go) - [model_display_info.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model_display_info.go) - [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go) - [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go) +- [tiles_composition_providers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition_providers.go) +- [tiles_composition_clients.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition_clients.go) +- [tiles_composition_tools.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition_tools.go) - [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) - [settings_modal_layout.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal_layout.go) @@ -80,20 +87,26 @@ What to address: ### 4. [P2] Several providers are still large mixed-responsibility units -Cursor, OpenRouter, Codex, Copilot, and Claude Code are now in much better shape, but several providers still remain monoliths that mix transport, parsing, normalization, and projection in one place. +Cursor, OpenRouter, Codex, Copilot, Claude Code, and Z.AI are now in much better shape, and the OpenCode/Copilot telemetry collectors are split as well. The remaining larger provider concentration is now mostly in Ollama and Gemini CLI. Refs: - [ollama.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/ollama.go) -- [zai.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/zai.go) +- [local_paths.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/local_paths.go) +- [server_log_parse.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/server_log_parse.go) - [gemini_cli.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/gemini_cli.go) -- [copilot.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/copilot.go) -- [api_data.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/api_data.go) -- [local_data.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/local_data.go) -- [local_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/local_helpers.go) -- [claude_code.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/claude_code.go) -- [local_files.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_files.go) -- [local_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/local_helpers.go) -- [conversation_usage.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/claude_code/conversation_usage.go) +- [session_usage.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/session_usage.go) +- [zai.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/zai.go) +- [monitor_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/monitor_helpers.go) +- [usage_extract.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/usage_extract.go) +- [usage_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/usage_helpers.go) +- [telemetry.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/opencode/telemetry.go) +- [telemetry_event_file.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/opencode/telemetry_event_file.go) +- [telemetry_sqlite.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/opencode/telemetry_sqlite.go) +- [telemetry_hooks.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/opencode/telemetry_hooks.go) +- [telemetry.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/telemetry.go) +- [telemetry_session_file.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/telemetry_session_file.go) +- [telemetry_session_store.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/telemetry_session_store.go) +- [telemetry_logs.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/telemetry_logs.go) What to address: - Split by concern, not by arbitrary line count: @@ -154,4 +167,4 @@ What to address: - The repo is in materially better shape than it was at the start of this cleanup branch. - The main remaining risks are now architectural and maintainability-oriented rather than immediate correctness regressions. -- The highest near-term drift risk is the remaining metric-prefix parsing still sitting in TUI render code plus the size of the remaining TUI/provider units. +- The highest near-term drift risk is the remaining analytics/detail metric-prefix parsing still sitting in UI render code plus the size of the remaining TUI/provider units. diff --git a/internal/providers/copilot/telemetry.go b/internal/providers/copilot/telemetry.go index 885e276..ac312b8 100644 --- a/internal/providers/copilot/telemetry.go +++ b/internal/providers/copilot/telemetry.go @@ -1,22 +1,12 @@ package copilot import ( - "bufio" "context" - "database/sql" "encoding/json" - "fmt" "os" "path/filepath" - "regexp" - "sort" - "strconv" "strings" - "time" - _ "github.com/mattn/go-sqlite3" - - "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/shared" ) @@ -153,1540 +143,3 @@ func defaultCopilotSessionStoreDB() string { } return filepath.Join(home, defaultCopilotStoreDB) } - -// parseCopilotTelemetrySessionFile parses a single session's events.jsonl and -// produces telemetry events from assistant.usage and assistant.message entries. -func parseCopilotTelemetrySessionFile(path, sessionID string) ([]shared.TelemetryEvent, error) { - data, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - lines := strings.Split(string(data), "\n") - currentModel := "" - workspaceID := "" - repo := "" - cwd := "" - clientLabel := "cli" - turnIndex := 0 - assistantUsageSeen := false - toolContexts := make(map[string]copilotTelemetryToolContext) - - var out []shared.TelemetryEvent - for lineNum, line := range lines { - line = strings.TrimSpace(line) - if line == "" { - continue - } - var evt sessionEvent - if json.Unmarshal([]byte(line), &evt) != nil { - continue - } - occurredAt := time.Now().UTC() - if ts := shared.FlexParseTime(evt.Timestamp); !ts.IsZero() { - occurredAt = ts - } - - switch evt.Type { - case "session.start": - var start sessionStartData - if json.Unmarshal(evt.Data, &start) == nil { - if start.Context.Repository != "" { - repo = start.Context.Repository - } - if start.Context.CWD != "" { - cwd = start.Context.CWD - workspaceID = shared.SanitizeWorkspace(start.Context.CWD) - } - clientLabel = normalizeCopilotClient(repo, cwd) - if currentModel == "" && start.SelectedModel != "" { - currentModel = start.SelectedModel - } - } - - case "session.context_changed": - var changed copilotTelemetrySessionContextChangedData - if json.Unmarshal(evt.Data, &changed) == nil { - if changed.Repository != "" { - repo = changed.Repository - } - if changed.CWD != "" { - cwd = changed.CWD - workspaceID = shared.SanitizeWorkspace(changed.CWD) - } - clientLabel = normalizeCopilotClient(repo, cwd) - } - - case "session.model_change": - var mc modelChangeData - if json.Unmarshal(evt.Data, &mc) == nil && mc.NewModel != "" { - currentModel = mc.NewModel - } - - case "session.info": - var info sessionInfoData - if json.Unmarshal(evt.Data, &info) == nil && info.InfoType == "model" { - if m := extractModelFromInfoMsg(info.Message); m != "" { - currentModel = m - } - } - - case "assistant.message": - var msg copilotTelemetryAssistantMessageData - if json.Unmarshal(evt.Data, &msg) != nil { - continue - } - - var toolRequests []json.RawMessage - if json.Unmarshal(msg.ToolRequests, &toolRequests) != nil || len(toolRequests) == 0 { - continue - } - - messageID := copilotTelemetryMessageID(sessionID, lineNum+1, msg.MessageID, evt.ID) - turnID := core.FirstNonEmpty(messageID, fmt.Sprintf("%s:line:%d", sessionID, lineNum+1)) - - for reqIdx, rawReq := range toolRequests { - req, ok := parseCopilotTelemetryToolRequest(rawReq) - if !ok { - continue - } - - explicitCallID := strings.TrimSpace(req.ToolCallID) != "" - toolCallID := strings.TrimSpace(req.ToolCallID) - if toolCallID == "" { - toolCallID = fmt.Sprintf("%s:%d:tool:%d", sessionID, lineNum+1, reqIdx+1) - } - - toolName, toolMeta := normalizeCopilotTelemetryToolName(req.RawName) - if toolName == "" { - toolName = "unknown" - } - - payload := copilotTelemetryBasePayload(path, lineNum+1, clientLabel, repo, cwd, "assistant.message.tool_request") - for key, value := range toolMeta { - payload[key] = value - } - payload["tool_call_id"] = toolCallID - - if req.Input != nil { - payload["tool_input"] = req.Input - if cmd := extractCopilotTelemetryCommand(req.Input); cmd != "" { - payload["command"] = cmd - } - if paths := shared.ExtractFilePathsFromPayload(req.Input); len(paths) > 0 { - payload["file"] = paths[0] - if lang := inferCopilotLanguageFromPath(paths[0]); lang != "" { - payload["language"] = lang - } - } - if added, removed := estimateCopilotTelemetryLineDelta(req.Input); added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - - if _, ok := payload["command"]; !ok { - if cmd := extractCopilotToolCommand(rawReq); cmd != "" { - payload["command"] = cmd - } - } - if _, ok := payload["file"]; !ok { - if paths := extractCopilotToolPaths(rawReq); len(paths) > 0 { - payload["file"] = paths[0] - if lang := inferCopilotLanguageFromPath(paths[0]); lang != "" { - payload["language"] = lang - } - } - } - if _, ok := payload["lines_added"]; !ok { - added, removed := estimateCopilotToolLineDelta(rawReq) - if added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - - model := strings.TrimSpace(currentModel) - if model == "" { - model = "unknown" - } - if upstream := copilotUpstreamProviderForModel(model); upstream != "" { - payload["upstream_provider"] = upstream - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ToolCallID: toolCallID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeToolUsage, - ModelRaw: model, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - ToolName: toolName, - Status: shared.TelemetryStatusUnknown, - Payload: payload, - }) - - if explicitCallID { - toolContexts[toolCallID] = copilotTelemetryToolContext{ - MessageID: messageID, - TurnID: turnID, - Model: model, - ToolName: toolName, - Payload: copyCopilotTelemetryPayload(payload), - } - } - } - - case "tool.execution_start": - var start copilotTelemetryToolExecutionStartData - if json.Unmarshal(evt.Data, &start) != nil { - continue - } - - explicitCallID := strings.TrimSpace(start.ToolCallID) != "" - toolCallID := strings.TrimSpace(start.ToolCallID) - if toolCallID == "" { - toolCallID = fmt.Sprintf("%s:%d:tool_start", sessionID, lineNum+1) - } - - ctx := toolContexts[toolCallID] - payload := copyCopilotTelemetryPayload(ctx.Payload) - if len(payload) == 0 { - payload = copilotTelemetryBasePayload(path, lineNum+1, clientLabel, repo, cwd, "tool.execution_start") - } else { - payload["event"] = "tool.execution_start" - payload["line"] = lineNum + 1 - } - payload["tool_call_id"] = toolCallID - - toolName := strings.TrimSpace(ctx.ToolName) - if start.ToolName != "" { - normalized, meta := normalizeCopilotTelemetryToolName(start.ToolName) - toolName = normalized - for key, value := range meta { - payload[key] = value - } - } - if toolName == "" { - toolName = "unknown" - } - - if args := decodeCopilotTelemetryJSONAny(start.Arguments); args != nil { - payload["tool_input"] = args - if _, ok := payload["command"]; !ok { - if cmd := extractCopilotTelemetryCommand(args); cmd != "" { - payload["command"] = cmd - } - } - if _, ok := payload["file"]; !ok { - if paths := shared.ExtractFilePathsFromPayload(args); len(paths) > 0 { - payload["file"] = paths[0] - if lang := inferCopilotLanguageFromPath(paths[0]); lang != "" { - payload["language"] = lang - } - } - } - if _, ok := payload["lines_added"]; !ok { - added, removed := estimateCopilotTelemetryLineDelta(args) - if added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - } - - model := strings.TrimSpace(ctx.Model) - if model == "" { - model = strings.TrimSpace(currentModel) - } - if model == "" { - model = "unknown" - } - if upstream := copilotUpstreamProviderForModel(model); upstream != "" { - payload["upstream_provider"] = upstream - } - - messageID := core.FirstNonEmpty(ctx.MessageID, fmt.Sprintf("%s:%d", sessionID, lineNum+1)) - turnID := core.FirstNonEmpty(ctx.TurnID, messageID) - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ToolCallID: toolCallID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeToolUsage, - ModelRaw: model, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - ToolName: toolName, - Status: shared.TelemetryStatusUnknown, - Payload: payload, - }) - - if explicitCallID { - toolContexts[toolCallID] = copilotTelemetryToolContext{ - MessageID: messageID, - TurnID: turnID, - Model: model, - ToolName: toolName, - Payload: copyCopilotTelemetryPayload(payload), - } - } - - case "tool.execution_complete": - var complete copilotTelemetryToolExecutionCompleteData - if json.Unmarshal(evt.Data, &complete) != nil { - continue - } - - toolCallID := strings.TrimSpace(complete.ToolCallID) - explicitCallID := toolCallID != "" - if toolCallID == "" { - toolCallID = fmt.Sprintf("%s:%d:tool_complete", sessionID, lineNum+1) - } - - ctx := toolContexts[toolCallID] - payload := copyCopilotTelemetryPayload(ctx.Payload) - if len(payload) == 0 { - payload = copilotTelemetryBasePayload(path, lineNum+1, clientLabel, repo, cwd, "tool.execution_complete") - } else { - payload["event"] = "tool.execution_complete" - payload["line"] = lineNum + 1 - } - payload["tool_call_id"] = toolCallID - - toolName := strings.TrimSpace(ctx.ToolName) - if complete.ToolName != "" { - normalized, meta := normalizeCopilotTelemetryToolName(complete.ToolName) - toolName = normalized - for key, value := range meta { - payload[key] = value - } - } - if toolName == "" { - toolName = "unknown" - } - - if complete.Success != nil { - payload["success"] = *complete.Success - } - if strings.TrimSpace(complete.Status) != "" { - payload["status_raw"] = strings.TrimSpace(complete.Status) - } - - if resultMeta := summarizeCopilotTelemetryResult(complete.Result); len(resultMeta) > 0 { - for key, value := range resultMeta { - if _, exists := payload[key]; !exists { - payload[key] = value - } - } - } - - errorCode, errorMessage := summarizeCopilotTelemetryError(complete.Error) - if errorCode != "" { - payload["error_code"] = errorCode - } - if errorMessage != "" { - payload["error_message"] = truncate(errorMessage, 240) - } - - model := strings.TrimSpace(ctx.Model) - if model == "" { - model = strings.TrimSpace(currentModel) - } - if model == "" { - model = "unknown" - } - if upstream := copilotUpstreamProviderForModel(model); upstream != "" { - payload["upstream_provider"] = upstream - } - - status := copilotTelemetryToolStatus(complete.Success, complete.Status, errorCode, errorMessage) - messageID := core.FirstNonEmpty(ctx.MessageID, fmt.Sprintf("%s:%d", sessionID, lineNum+1)) - turnID := core.FirstNonEmpty(ctx.TurnID, messageID) - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ToolCallID: toolCallID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeToolUsage, - ModelRaw: model, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - ToolName: toolName, - Status: status, - Payload: payload, - }) - - if explicitCallID { - toolContexts[toolCallID] = copilotTelemetryToolContext{ - MessageID: messageID, - TurnID: turnID, - Model: model, - ToolName: toolName, - Payload: copyCopilotTelemetryPayload(payload), - } - } - - case "session.workspace_file_changed": - var changed copilotTelemetryWorkspaceFileChangedData - if json.Unmarshal(evt.Data, &changed) != nil { - continue - } - filePath := strings.TrimSpace(changed.Path) - if filePath == "" { - continue - } - - op := sanitizeMetricName(changed.Operation) - if op == "" || op == "unknown" { - op = "change" - } - - payload := copilotTelemetryBasePayload(path, lineNum+1, clientLabel, repo, cwd, "session.workspace_file_changed") - payload["file"] = filePath - payload["operation"] = strings.TrimSpace(changed.Operation) - if lang := inferCopilotLanguageFromPath(filePath); lang != "" { - payload["language"] = lang - } - - model := strings.TrimSpace(currentModel) - if model == "" { - model = "unknown" - } - if upstream := copilotUpstreamProviderForModel(model); upstream != "" { - payload["upstream_provider"] = upstream - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: fmt.Sprintf("%s:file:%d", sessionID, lineNum+1), - MessageID: fmt.Sprintf("%s:%d", sessionID, lineNum+1), - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeToolUsage, - ModelRaw: model, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(0), - }, - ToolName: "workspace_file_" + op, - Status: shared.TelemetryStatusOK, - Payload: payload, - }) - - case "assistant.turn_start": - // Track turn starts; actual metric emission happens at turn_end. - continue - - case "assistant.turn_end": - turnIndex++ - if assistantUsageSeen || currentModel == "" { - continue - } - turnID := core.FirstNonEmpty(strings.TrimSpace(evt.ID), fmt.Sprintf("%s:synth:%d", sessionID, turnIndex)) - messageID := fmt.Sprintf("%s:%d", sessionID, lineNum+1) - payload := copilotTelemetryBasePayload(path, lineNum+1, clientLabel, repo, cwd, "assistant.turn_end") - payload["synthetic"] = true - payload["upstream_provider"] = copilotUpstreamProviderForModel(currentModel) - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: currentModel, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - Status: shared.TelemetryStatusOK, - Payload: payload, - }) - - case "assistant.usage": - var usage assistantUsageData - if json.Unmarshal(evt.Data, &usage) != nil { - continue - } - assistantUsageSeen = true - - model := usage.Model - if model == "" { - model = currentModel - } - if model == "" { - continue - } - - turnIndex++ - - turnID := core.FirstNonEmpty(strings.TrimSpace(evt.ID), fmt.Sprintf("%s:usage:%d", sessionID, turnIndex)) - messageID := fmt.Sprintf("%s:%d", sessionID, lineNum+1) - - totalTokens := int64(usage.InputTokens + usage.OutputTokens) - payload := copilotTelemetryBasePayload(path, lineNum+1, clientLabel, repo, cwd, "assistant.usage") - payload["source_file"] = path - payload["line"] = lineNum + 1 - payload["client"] = clientLabel - payload["upstream_provider"] = copilotUpstreamProviderForModel(model) - if usage.Duration > 0 { - payload["duration_ms"] = usage.Duration - } - if len(usage.QuotaSnapshots) > 0 { - payload["quota_snapshot_count"] = len(usage.QuotaSnapshots) - } - - te := shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: model, - TokenUsage: core.TokenUsage{ - InputTokens: core.Int64Ptr(int64(usage.InputTokens)), - OutputTokens: core.Int64Ptr(int64(usage.OutputTokens)), - TotalTokens: core.Int64Ptr(totalTokens), - Requests: core.Int64Ptr(1), - }, - Status: shared.TelemetryStatusOK, - Payload: payload, - } - - if usage.CacheReadTokens > 0 { - te.CacheReadTokens = core.Int64Ptr(int64(usage.CacheReadTokens)) - } - if usage.CacheWriteTokens > 0 { - te.CacheWriteTokens = core.Int64Ptr(int64(usage.CacheWriteTokens)) - } - if usage.Cost > 0 { - te.CostUSD = core.Float64Ptr(usage.Cost) - } - - out = append(out, te) - - case "session.shutdown": - var shutdown sessionShutdownData - if json.Unmarshal(evt.Data, &shutdown) != nil { - continue - } - - shutdownTurnID := core.FirstNonEmpty(strings.TrimSpace(evt.ID), fmt.Sprintf("%s:shutdown", sessionID)) - shutdownMessageID := fmt.Sprintf("%s:shutdown:%d", sessionID, lineNum+1) - - shutdownPayload := copilotTelemetryBasePayload(path, lineNum+1, clientLabel, repo, cwd, "session.shutdown") - shutdownPayload["shutdown_type"] = strings.TrimSpace(shutdown.ShutdownType) - shutdownPayload["total_premium_requests"] = shutdown.TotalPremiumRequests - shutdownPayload["total_api_duration_ms"] = shutdown.TotalAPIDurationMs - shutdownPayload["session_start_time"] = strings.TrimSpace(shutdown.SessionStartTime) - shutdownPayload["lines_added"] = shutdown.CodeChanges.LinesAdded - shutdownPayload["lines_removed"] = shutdown.CodeChanges.LinesRemoved - shutdownPayload["files_modified"] = shutdown.CodeChanges.FilesModified - shutdownPayload["model_metrics_count"] = len(shutdown.ModelMetrics) - if model := strings.TrimSpace(currentModel); model != "" { - shutdownPayload["upstream_provider"] = copilotUpstreamProviderForModel(model) - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: shutdownTurnID, - MessageID: shutdownMessageID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeTurnCompleted, - ModelRaw: core.FirstNonEmpty(strings.TrimSpace(currentModel), "unknown"), - Status: shared.TelemetryStatusOK, - Payload: shutdownPayload, - }) - - if assistantUsageSeen { - continue - } - - models := make([]string, 0, len(shutdown.ModelMetrics)) - for model := range shutdown.ModelMetrics { - models = append(models, model) - } - sort.Strings(models) - - for idx, model := range models { - modelMetric := shutdown.ModelMetrics[model] - model = strings.TrimSpace(model) - if model == "" { - model = core.FirstNonEmpty(strings.TrimSpace(currentModel), "unknown") - } - - inputTokens := int64(modelMetric.Usage.InputTokens) - outputTokens := int64(modelMetric.Usage.OutputTokens) - cacheReadTokens := int64(modelMetric.Usage.CacheReadTokens) - cacheWriteTokens := int64(modelMetric.Usage.CacheWriteTokens) - totalTokens := inputTokens + outputTokens - requests := int64(modelMetric.Requests.Count) - cost := modelMetric.Requests.Cost - - if totalTokens <= 0 && requests <= 0 && cost <= 0 { - continue - } - - messageID := fmt.Sprintf("%s:shutdown:%s", sessionID, sanitizeMetricName(model)) - if idx > 0 { - messageID = fmt.Sprintf("%s:%d", messageID, idx+1) - } - turnID := messageID - - payload := copilotTelemetryBasePayload(path, lineNum+1, clientLabel, repo, cwd, "session.shutdown.model_metric") - payload["model_metrics_source"] = "session.shutdown" - payload["upstream_provider"] = copilotUpstreamProviderForModel(model) - if idx == 0 { - payload["lines_added"] = shutdown.CodeChanges.LinesAdded - payload["lines_removed"] = shutdown.CodeChanges.LinesRemoved - payload["files_modified"] = shutdown.CodeChanges.FilesModified - } - - usageEvent := shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: model, - TokenUsage: core.TokenUsage{ - InputTokens: core.Int64Ptr(inputTokens), - OutputTokens: core.Int64Ptr(outputTokens), - TotalTokens: core.Int64Ptr(totalTokens), - }, - Status: shared.TelemetryStatusOK, - Payload: payload, - } - if requests > 0 { - usageEvent.Requests = core.Int64Ptr(requests) - } - if cacheReadTokens > 0 { - usageEvent.CacheReadTokens = core.Int64Ptr(cacheReadTokens) - } - if cacheWriteTokens > 0 { - usageEvent.CacheWriteTokens = core.Int64Ptr(cacheWriteTokens) - } - if cost > 0 { - usageEvent.CostUSD = core.Float64Ptr(cost) - } - - out = append(out, usageEvent) - } - } - } - - return out, nil -} - -func copilotTelemetryMessageID(sessionID string, lineNum int, messageID, fallbackID string) string { - messageID = strings.TrimSpace(messageID) - if messageID != "" { - if strings.Contains(messageID, ":") { - return messageID - } - return fmt.Sprintf("%s:%s", sessionID, messageID) - } - - fallbackID = strings.TrimSpace(fallbackID) - if fallbackID != "" { - return fmt.Sprintf("%s:%s", sessionID, fallbackID) - } - return fmt.Sprintf("%s:%d", sessionID, lineNum) -} - -func parseCopilotTelemetryToolRequest(raw json.RawMessage) (copilotTelemetryToolRequest, bool) { - var reqMap map[string]any - if json.Unmarshal(raw, &reqMap) != nil { - return copilotTelemetryToolRequest{}, false - } - - out := copilotTelemetryToolRequest{ - ToolCallID: strings.TrimSpace(anyToString(reqMap["toolCallId"])), - RawName: core.FirstNonEmpty(anyToString(reqMap["name"]), anyToString(reqMap["toolName"]), anyToString(reqMap["tool"])), - } - if out.RawName == "" { - out.RawName = extractCopilotToolName(raw) - } - - if value, ok := reqMap["arguments"]; ok { - out.Input = decodeCopilotTelemetryJSONAny(value) - } - if out.Input == nil { - if value, ok := reqMap["args"]; ok { - out.Input = decodeCopilotTelemetryJSONAny(value) - } - } - if out.Input == nil { - if value, ok := reqMap["input"]; ok { - out.Input = decodeCopilotTelemetryJSONAny(value) - } - } - - return out, true -} - -func normalizeCopilotTelemetryToolName(raw string) (string, map[string]any) { - meta := make(map[string]any) - name := strings.TrimSpace(raw) - if name == "" { - return "unknown", meta - } - - meta["tool_name_raw"] = name - - if server, function, ok := parseCopilotTelemetryMCPTool(name); ok { - canonical := "mcp__" + server + "__" + function - meta["tool_type"] = "mcp" - meta["mcp_server"] = server - meta["mcp_function"] = function - return canonical, meta - } - - return sanitizeMetricName(name), meta -} - -func parseCopilotTelemetryMCPTool(raw string) (string, string, bool) { - normalized := strings.ToLower(strings.TrimSpace(raw)) - if normalized == "" { - return "", "", false - } - - // Copilot-native MCP wrappers: github_mcp_server_list_issues. - if parts := strings.SplitN(normalized, "_mcp_server_", 2); len(parts) == 2 { - server := sanitizeCopilotMCPSegment(parts[0]) - function := sanitizeCopilotMCPSegment(parts[1]) - if server != "" && function != "" { - return server, function, true - } - } - if parts := strings.SplitN(normalized, "-mcp-server-", 2); len(parts) == 2 { - server := sanitizeCopilotMCPSegment(parts[0]) - function := sanitizeCopilotMCPSegment(parts[1]) - if server != "" && function != "" { - return server, function, true - } - } - - if strings.HasPrefix(normalized, "mcp__") { - rest := strings.TrimPrefix(normalized, "mcp__") - parts := strings.SplitN(rest, "__", 2) - if len(parts) != 2 { - return sanitizeCopilotMCPSegment(rest), "", false - } - server := sanitizeCopilotMCPSegment(parts[0]) - function := sanitizeCopilotMCPSegment(parts[1]) - if server == "" || function == "" { - return "", "", false - } - return server, function, true - } - - if strings.HasPrefix(normalized, "mcp-") || strings.HasPrefix(normalized, "mcp_") { - canonical := normalizeCopilotCursorStyleMCPName(normalized) - if strings.HasPrefix(canonical, "mcp__") { - parts := strings.SplitN(strings.TrimPrefix(canonical, "mcp__"), "__", 2) - if len(parts) == 2 { - server := sanitizeCopilotMCPSegment(parts[0]) - function := sanitizeCopilotMCPSegment(parts[1]) - if server != "" && function != "" { - return server, function, true - } - } - } - } - - // Legacy suffix format from earlier tool adapters: "server-function (mcp)". - if strings.HasSuffix(normalized, " (mcp)") { - body := strings.TrimSpace(strings.TrimSuffix(normalized, " (mcp)")) - body = strings.TrimPrefix(body, "user-") - if body == "" { - return "", "", false - } - if idx := findCopilotTelemetryServerFunctionSplit(body); idx > 0 { - server := sanitizeCopilotMCPSegment(body[:idx]) - function := sanitizeCopilotMCPSegment(body[idx+1:]) - if server != "" && function != "" { - return server, function, true - } - } - return "other", sanitizeCopilotMCPSegment(body), true - } - - return "", "", false -} - -func normalizeCopilotCursorStyleMCPName(name string) string { - if strings.HasPrefix(name, "mcp-") { - rest := name[4:] - parts := strings.SplitN(rest, "-user-", 2) - if len(parts) == 2 { - server := parts[0] - afterUser := parts[1] - serverDash := server + "-" - if strings.HasPrefix(afterUser, serverDash) { - return "mcp__" + server + "__" + afterUser[len(serverDash):] - } - if idx := strings.LastIndex(afterUser, "-"); idx > 0 { - return "mcp__" + server + "__" + afterUser[idx+1:] - } - return "mcp__" + server + "__" + afterUser - } - if idx := strings.Index(rest, "-"); idx > 0 { - return "mcp__" + rest[:idx] + "__" + rest[idx+1:] - } - return "mcp__" + rest + "__" - } - - if strings.HasPrefix(name, "mcp_") { - rest := name[4:] - if idx := strings.Index(rest, "_"); idx > 0 { - return "mcp__" + rest[:idx] + "__" + rest[idx+1:] - } - return "mcp__" + rest + "__" - } - - return name -} - -func findCopilotTelemetryServerFunctionSplit(s string) int { - best := -1 - for i := 0; i < len(s); i++ { - if s[i] != '-' { - continue - } - rest := s[i+1:] - if strings.Contains(rest, "_") { - best = i - } - } - return best -} - -func sanitizeCopilotMCPSegment(raw string) string { - raw = strings.ToLower(strings.TrimSpace(raw)) - if raw == "" { - return "" - } - - var b strings.Builder - lastUnderscore := false - for _, r := range raw { - switch { - case r >= 'a' && r <= 'z': - b.WriteRune(r) - lastUnderscore = false - case r >= '0' && r <= '9': - b.WriteRune(r) - lastUnderscore = false - case r == '_' || r == '-': - b.WriteRune(r) - lastUnderscore = false - default: - if !lastUnderscore { - b.WriteByte('_') - lastUnderscore = true - } - } - } - - return strings.Trim(b.String(), "_") -} - -func copilotTelemetryToolStatus(success *bool, statusRaw, errorCode, errorMessage string) shared.TelemetryStatus { - if success != nil { - if *success { - return shared.TelemetryStatusOK - } - if copilotTelemetryLooksAborted(errorCode, errorMessage, statusRaw) { - return shared.TelemetryStatusAborted - } - return shared.TelemetryStatusError - } - - switch strings.ToLower(strings.TrimSpace(statusRaw)) { - case "ok", "success", "succeeded", "completed", "complete": - return shared.TelemetryStatusOK - case "aborted", "cancelled", "canceled", "denied": - return shared.TelemetryStatusAborted - case "error", "failed", "failure": - return shared.TelemetryStatusError - } - - if errorCode != "" || errorMessage != "" { - if copilotTelemetryLooksAborted(errorCode, errorMessage, statusRaw) { - return shared.TelemetryStatusAborted - } - return shared.TelemetryStatusError - } - return shared.TelemetryStatusUnknown -} - -func copilotTelemetryLooksAborted(parts ...string) bool { - for _, part := range parts { - lower := strings.ToLower(strings.TrimSpace(part)) - if lower == "" { - continue - } - if strings.Contains(lower, "denied") || - strings.Contains(lower, "cancel") || - strings.Contains(lower, "abort") || - strings.Contains(lower, "rejected") || - strings.Contains(lower, "user initiated") { - return true - } - } - return false -} - -func summarizeCopilotTelemetryResult(raw json.RawMessage) map[string]any { - if len(strings.TrimSpace(string(raw))) == 0 { - return nil - } - decoded := decodeCopilotTelemetryJSONAny(raw) - if decoded == nil { - return nil - } - - payload := make(map[string]any) - - if paths := shared.ExtractFilePathsFromPayload(decoded); len(paths) > 0 { - payload["result_file"] = paths[0] - } - - switch value := decoded.(type) { - case map[string]any: - if content := anyToString(value["content"]); content != "" { - payload["result_chars"] = len(content) - if added, removed := countCopilotTelemetryUnifiedDiff(content); added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - if detailed := anyToString(value["detailedContent"]); detailed != "" { - payload["result_detailed_chars"] = len(detailed) - if _, hasLines := payload["lines_added"]; !hasLines { - if added, removed := countCopilotTelemetryUnifiedDiff(detailed); added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - } - if msg := anyToString(value["message"]); msg != "" { - payload["result_message"] = truncate(msg, 240) - } - case string: - if value != "" { - payload["result_chars"] = len(value) - if added, removed := countCopilotTelemetryUnifiedDiff(value); added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - } - - if len(payload) == 0 { - return nil - } - return payload -} - -func countCopilotTelemetryUnifiedDiff(raw string) (int, int) { - raw = strings.TrimSpace(raw) - if raw == "" { - return 0, 0 - } - if !strings.Contains(raw, "diff --git") && !strings.Contains(raw, "\n@@") { - return 0, 0 - } - - added := 0 - removed := 0 - for _, line := range strings.Split(raw, "\n") { - switch { - case strings.HasPrefix(line, "+++"), strings.HasPrefix(line, "---"), strings.HasPrefix(line, "@@"): - continue - case strings.HasPrefix(line, "+"): - added++ - case strings.HasPrefix(line, "-"): - removed++ - } - } - return added, removed -} - -func summarizeCopilotTelemetryError(raw json.RawMessage) (string, string) { - if len(strings.TrimSpace(string(raw))) == 0 { - return "", "" - } - decoded := decodeCopilotTelemetryJSONAny(raw) - if decoded == nil { - return "", "" - } - - switch value := decoded.(type) { - case map[string]any: - return strings.TrimSpace(anyToString(value["code"])), strings.TrimSpace(anyToString(value["message"])) - case string: - return "", strings.TrimSpace(value) - default: - return "", strings.TrimSpace(anyToString(decoded)) - } -} - -func copilotTelemetryBasePayload(path string, line int, client, repo, cwd, event string) map[string]any { - payload := map[string]any{ - "source_file": path, - "line": line, - "event": event, - "client": client, - "upstream_provider": "github", - } - if strings.TrimSpace(repo) != "" { - payload["repository"] = strings.TrimSpace(repo) - } - if strings.TrimSpace(cwd) != "" { - payload["cwd"] = strings.TrimSpace(cwd) - } - return payload -} - -func copyCopilotTelemetryPayload(in map[string]any) map[string]any { - if len(in) == 0 { - return nil - } - out := make(map[string]any, len(in)) - for key, value := range in { - out[key] = value - } - return out -} - -func decodeCopilotTelemetryJSONAny(raw any) any { - switch value := raw.(type) { - case nil: - return nil - case map[string]any: - return value - case []any: - return value - case json.RawMessage: - var out any - if json.Unmarshal(value, &out) == nil { - return out - } - return strings.TrimSpace(string(value)) - case []byte: - var out any - if json.Unmarshal(value, &out) == nil { - return out - } - return strings.TrimSpace(string(value)) - case string: - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return nil - } - var out any - if json.Unmarshal([]byte(trimmed), &out) == nil { - return out - } - return trimmed - default: - return value - } -} - -func extractCopilotTelemetryCommand(input any) string { - var command string - var walk func(value any) - walk = func(value any) { - if command != "" || value == nil { - return - } - switch v := value.(type) { - case map[string]any: - for key, child := range v { - k := strings.ToLower(strings.TrimSpace(key)) - if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { - if s, ok := child.(string); ok { - command = strings.TrimSpace(s) - return - } - } - } - for _, child := range v { - walk(child) - if command != "" { - return - } - } - case []any: - for _, child := range v { - walk(child) - if command != "" { - return - } - } - } - } - walk(input) - return command -} - -func estimateCopilotTelemetryLineDelta(input any) (int, int) { - if input == nil { - return 0, 0 - } - encoded, err := json.Marshal(map[string]any{"arguments": input}) - if err != nil { - return 0, 0 - } - return estimateCopilotToolLineDelta(encoded) -} - -func copilotUpstreamProviderForModel(model string) string { - model = strings.ToLower(strings.TrimSpace(model)) - if model == "" || model == "unknown" { - return "github" - } - switch { - case strings.Contains(model, "claude"): - return "anthropic" - case strings.Contains(model, "gpt"), strings.HasPrefix(model, "o1"), strings.HasPrefix(model, "o3"), strings.HasPrefix(model, "o4"): - return "openai" - case strings.Contains(model, "gemini"): - return "google" - case strings.Contains(model, "qwen"): - return "alibaba_cloud" - case strings.Contains(model, "deepseek"): - return "deepseek" - case strings.Contains(model, "llama"): - return "meta" - case strings.Contains(model, "mistral"): - return "mistral" - default: - return "github" - } -} - -func anyToString(v any) string { - switch value := v.(type) { - case string: - return value - case fmt.Stringer: - return value.String() - default: - if value == nil { - return "" - } - return fmt.Sprintf("%v", value) - } -} - -func truncate(input string, max int) string { - input = strings.TrimSpace(input) - if max <= 0 || len(input) <= max { - return input - } - return input[:max] -} - -func parseCopilotTelemetrySessionStore(ctx context.Context, dbPath string, skipSessions map[string]bool) ([]shared.TelemetryEvent, error) { - if strings.TrimSpace(dbPath) == "" { - return nil, nil - } - if _, err := os.Stat(dbPath); err != nil { - return nil, nil - } - - db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) - if err != nil { - return nil, err - } - defer db.Close() - - if !copilotTelemetryTableExists(ctx, db, "sessions") || !copilotTelemetryTableExists(ctx, db, "turns") { - return nil, nil - } - - query := ` - SELECT - s.id, - COALESCE(s.cwd, ''), - COALESCE(s.repository, ''), - COALESCE(t.turn_index, 0), - COALESCE(t.user_message, ''), - COALESCE(t.assistant_response, ''), - COALESCE(t.timestamp, '') - FROM sessions s - JOIN turns t ON t.session_id = s.id - ORDER BY s.id ASC, t.turn_index ASC - ` - - rows, err := db.QueryContext(ctx, query) - if err != nil { - return nil, err - } - defer rows.Close() - - var out []shared.TelemetryEvent - for rows.Next() { - if ctx.Err() != nil { - return out, ctx.Err() - } - - var ( - sessionID string - cwd string - repo string - turnIndex int - userMsg string - reply string - tsRaw string - ) - if err := rows.Scan(&sessionID, &cwd, &repo, &turnIndex, &userMsg, &reply, &tsRaw); err != nil { - continue - } - sessionID = strings.TrimSpace(sessionID) - if sessionID == "" || skipSessions[sessionID] { - continue - } - - workspaceID := shared.SanitizeWorkspace(cwd) - clientLabel := normalizeCopilotClient(repo, cwd) - occurredAt := time.Now().UTC() - if parsed := shared.FlexParseTime(tsRaw); !parsed.IsZero() { - occurredAt = parsed - } - - messageID := fmt.Sprintf("%s:turn:%d", sessionID, turnIndex) - model := "unknown" - - payload := map[string]any{ - "source_file": dbPath, - "event": "session_store.turn", - "client": clientLabel, - "upstream_provider": "github", - "session_store_fallback": true, - "user_chars": len(strings.TrimSpace(userMsg)), - "assistant_chars": len(strings.TrimSpace(reply)), - "turn_index": turnIndex, - } - if strings.TrimSpace(repo) != "" { - payload["repository"] = strings.TrimSpace(repo) - } - if strings.TrimSpace(cwd) != "" { - payload["cwd"] = strings.TrimSpace(cwd) - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelSQLite, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: messageID, - MessageID: messageID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: model, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - Status: shared.TelemetryStatusOK, - Payload: payload, - }) - } - if err := rows.Err(); err != nil { - return out, err - } - - // Add session_files fallback for language/code-stats even when JSONL tool - // execution events are unavailable. - if copilotTelemetryTableExists(ctx, db, "session_files") { - fileRows, err := db.QueryContext(ctx, ` - SELECT - COALESCE(sf.session_id, ''), - COALESCE(sf.file_path, ''), - COALESCE(sf.tool_name, ''), - COALESCE(sf.turn_index, 0), - COALESCE(sf.first_seen_at, ''), - COALESCE(s.cwd, ''), - COALESCE(s.repository, '') - FROM session_files sf - LEFT JOIN sessions s ON s.id = sf.session_id - ORDER BY sf.session_id ASC, sf.turn_index ASC, sf.id ASC - `) - if err == nil { - defer fileRows.Close() - for fileRows.Next() { - if ctx.Err() != nil { - return out, ctx.Err() - } - var ( - sessionID string - filePath string - toolRaw string - turnIndex int - tsRaw string - cwd string - repo string - ) - if err := fileRows.Scan(&sessionID, &filePath, &toolRaw, &turnIndex, &tsRaw, &cwd, &repo); err != nil { - continue - } - sessionID = strings.TrimSpace(sessionID) - filePath = strings.TrimSpace(filePath) - if sessionID == "" || filePath == "" || skipSessions[sessionID] { - continue - } - - workspaceID := shared.SanitizeWorkspace(cwd) - clientLabel := normalizeCopilotClient(repo, cwd) - occurredAt := time.Now().UTC() - if parsed := shared.FlexParseTime(tsRaw); !parsed.IsZero() { - occurredAt = parsed - } - - toolName, meta := normalizeCopilotTelemetryToolName(toolRaw) - if toolName == "" || toolName == "unknown" { - toolName = "workspace_file_changed" - } - - toolCallID := fmt.Sprintf("store:%s:%d:%s", sessionID, turnIndex, sanitizeMetricName(filePath)) - messageID := fmt.Sprintf("%s:turn:%d", sessionID, turnIndex) - payload := map[string]any{ - "source_file": dbPath, - "event": "session_store.file", - "client": clientLabel, - "upstream_provider": "github", - "session_store_fallback": true, - "file": filePath, - "turn_index": turnIndex, - "tool_name_raw": strings.TrimSpace(toolRaw), - } - for key, value := range meta { - payload[key] = value - } - if lang := inferCopilotLanguageFromPath(filePath); lang != "" { - payload["language"] = lang - } - if strings.TrimSpace(repo) != "" { - payload["repository"] = strings.TrimSpace(repo) - } - if strings.TrimSpace(cwd) != "" { - payload["cwd"] = strings.TrimSpace(cwd) - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySchemaVersion, - Channel: shared.TelemetryChannelSQLite, - OccurredAt: occurredAt, - AccountID: "copilot", - WorkspaceID: workspaceID, - SessionID: sessionID, - TurnID: messageID, - MessageID: messageID, - ToolCallID: toolCallID, - ProviderID: "copilot", - AgentName: "copilot", - EventType: shared.TelemetryEventTypeToolUsage, - ModelRaw: "unknown", - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - ToolName: toolName, - Status: shared.TelemetryStatusOK, - Payload: payload, - }) - } - } - } - - return out, nil -} - -// logTokenDelta represents a token count observation from CompactionProcessor logs. -type logTokenDelta struct { - Timestamp time.Time - Used int64 - Limit int64 -} - -// compactionRe matches CompactionProcessor utilization log lines. -// Example: 2026-02-21T19:45:41.056Z [INFO] CompactionProcessor: Utilization 16.0% (20465/128000 tokens) -var compactionRe = regexp.MustCompile( - `^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z)\s+\[INFO\]\s+CompactionProcessor:\s+Utilization\s+[\d.]+%\s+\((\d+)/(\d+)\s+tokens\)`, -) - -// parseCopilotLogTokenDeltas parses CompactionProcessor log entries and returns -// estimated token deltas (positive differences between consecutive entries). -func parseCopilotLogTokenDeltas(logsDir string) []logTokenDelta { - if logsDir == "" { - return nil - } - entries, err := os.ReadDir(logsDir) - if err != nil { - return nil - } - - var observations []logTokenDelta - for _, entry := range entries { - if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".log") { - continue - } - f, err := os.Open(filepath.Join(logsDir, entry.Name())) - if err != nil { - continue - } - scanner := bufio.NewScanner(f) - for scanner.Scan() { - m := compactionRe.FindStringSubmatch(scanner.Text()) - if m == nil { - continue - } - ts, err := time.Parse(time.RFC3339Nano, m[1]) - if err != nil { - continue - } - used, _ := strconv.ParseInt(m[2], 10, 64) - limit, _ := strconv.ParseInt(m[3], 10, 64) - observations = append(observations, logTokenDelta{Timestamp: ts, Used: used, Limit: limit}) - } - f.Close() - } - - if len(observations) < 2 { - return nil - } - - sort.Slice(observations, func(i, j int) bool { - return observations[i].Timestamp.Before(observations[j].Timestamp) - }) - - // Compute positive deltas — each delta represents approximate tokens consumed - // between consecutive CompactionProcessor observations. - var deltas []logTokenDelta - for i := 1; i < len(observations); i++ { - diff := observations[i].Used - observations[i-1].Used - if diff > 0 { - deltas = append(deltas, logTokenDelta{ - Timestamp: observations[i].Timestamp, - Used: diff, - Limit: observations[i].Limit, - }) - } - } - return deltas -} - -// enrichSyntheticTokenEstimates attaches estimated InputTokens to synthetic -// message_usage events by matching event timestamps to log token deltas. -func enrichSyntheticTokenEstimates(events []shared.TelemetryEvent, deltas []logTokenDelta) { - if len(deltas) == 0 { - return - } - for i := range events { - ev := &events[i] - if ev.EventType != shared.TelemetryEventTypeMessageUsage || ev.InputTokens != nil { - continue - } - if len(ev.Payload) == 0 { - continue - } - if syn, _ := ev.Payload["synthetic"].(bool); !syn { - continue - } - // Find the closest delta within 30 seconds of the event. - var bestDelta *logTokenDelta - bestGap := 30 * time.Second - for j := range deltas { - gap := ev.OccurredAt.Sub(deltas[j].Timestamp) - if gap < 0 { - gap = -gap - } - if gap < bestGap { - bestGap = gap - bestDelta = &deltas[j] - } - } - if bestDelta != nil { - ev.InputTokens = core.Int64Ptr(bestDelta.Used) - ev.Payload["estimated_tokens"] = true - } - } -} - -func defaultCopilotLogsPath() string { - home, err := os.UserHomeDir() - if err != nil || strings.TrimSpace(home) == "" { - return "" - } - return filepath.Join(home, defaultCopilotLogsDir) -} - -func copilotTelemetryTableExists(ctx context.Context, db *sql.DB, table string) bool { - var exists int - err := db.QueryRowContext(ctx, - `SELECT 1 FROM sqlite_master WHERE type='table' AND name=? LIMIT 1`, - strings.TrimSpace(table), - ).Scan(&exists) - return err == nil && exists == 1 -} diff --git a/internal/providers/copilot/telemetry_logs.go b/internal/providers/copilot/telemetry_logs.go new file mode 100644 index 0000000..c4bd420 --- /dev/null +++ b/internal/providers/copilot/telemetry_logs.go @@ -0,0 +1,120 @@ +package copilot + +import ( + "bufio" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +type logTokenDelta struct { + Timestamp time.Time + Used int64 + Limit int64 +} + +var compactionRe = regexp.MustCompile( + `^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z)\s+\[INFO\]\s+CompactionProcessor:\s+Utilization\s+[\d.]+%\s+\((\d+)/(\d+)\s+tokens\)`, +) + +func parseCopilotLogTokenDeltas(logsDir string) []logTokenDelta { + if logsDir == "" { + return nil + } + entries, err := os.ReadDir(logsDir) + if err != nil { + return nil + } + + var observations []logTokenDelta + for _, entry := range entries { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".log") { + continue + } + f, err := os.Open(filepath.Join(logsDir, entry.Name())) + if err != nil { + continue + } + scanner := bufio.NewScanner(f) + for scanner.Scan() { + m := compactionRe.FindStringSubmatch(scanner.Text()) + if m == nil { + continue + } + ts, err := time.Parse(time.RFC3339Nano, m[1]) + if err != nil { + continue + } + used, _ := strconv.ParseInt(m[2], 10, 64) + limit, _ := strconv.ParseInt(m[3], 10, 64) + observations = append(observations, logTokenDelta{Timestamp: ts, Used: used, Limit: limit}) + } + _ = f.Close() + } + if len(observations) < 2 { + return nil + } + + sort.Slice(observations, func(i, j int) bool { + return observations[i].Timestamp.Before(observations[j].Timestamp) + }) + + deltas := make([]logTokenDelta, 0, len(observations)-1) + for i := 1; i < len(observations); i++ { + diff := observations[i].Used - observations[i-1].Used + if diff > 0 { + deltas = append(deltas, logTokenDelta{ + Timestamp: observations[i].Timestamp, + Used: diff, + Limit: observations[i].Limit, + }) + } + } + return deltas +} + +func enrichSyntheticTokenEstimates(events []shared.TelemetryEvent, deltas []logTokenDelta) { + if len(deltas) == 0 { + return + } + for i := range events { + ev := &events[i] + if ev.EventType != shared.TelemetryEventTypeMessageUsage || ev.InputTokens != nil || len(ev.Payload) == 0 { + continue + } + if syn, _ := ev.Payload["synthetic"].(bool); !syn { + continue + } + var bestDelta *logTokenDelta + bestGap := 30 * time.Second + for j := range deltas { + gap := ev.OccurredAt.Sub(deltas[j].Timestamp) + if gap < 0 { + gap = -gap + } + if gap < bestGap { + bestGap = gap + bestDelta = &deltas[j] + } + } + if bestDelta != nil { + ev.InputTokens = core.Int64Ptr(bestDelta.Used) + ev.Payload["estimated_tokens"] = true + } + } +} + +func defaultCopilotLogsPath() string { + home, err := os.UserHomeDir() + if err != nil || strings.TrimSpace(home) == "" { + return "" + } + return filepath.Join(home, defaultCopilotLogsDir) +} diff --git a/internal/providers/copilot/telemetry_session_file.go b/internal/providers/copilot/telemetry_session_file.go new file mode 100644 index 0000000..9a4a1bd --- /dev/null +++ b/internal/providers/copilot/telemetry_session_file.go @@ -0,0 +1,1148 @@ +package copilot + +import ( + "encoding/json" + "fmt" + "os" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +type copilotTelemetrySessionState struct { + path string + sessionID string + currentModel string + workspaceID string + repo string + cwd string + clientLabel string + turnIndex int + assistantUsageSeen bool + toolContexts map[string]copilotTelemetryToolContext +} + +// parseCopilotTelemetrySessionFile parses a single session's events.jsonl and +// produces telemetry events from assistant.usage and assistant.message entries. +func parseCopilotTelemetrySessionFile(path, sessionID string) ([]shared.TelemetryEvent, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + state := copilotTelemetrySessionState{ + path: path, + sessionID: sessionID, + clientLabel: "cli", + toolContexts: make(map[string]copilotTelemetryToolContext), + } + + lines := strings.Split(string(data), "\n") + out := make([]shared.TelemetryEvent, 0, len(lines)) + for lineNum, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + var evt sessionEvent + if json.Unmarshal([]byte(line), &evt) != nil { + continue + } + occurredAt := time.Now().UTC() + if ts := shared.FlexParseTime(evt.Timestamp); !ts.IsZero() { + occurredAt = ts + } + appendSessionEvents(&out, &state, lineNum+1, evt, occurredAt) + } + + return out, nil +} + +func appendSessionEvents(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, evt sessionEvent, occurredAt time.Time) { + switch evt.Type { + case "session.start": + state.applyStart(evt.Data) + case "session.context_changed": + state.applyContextChanged(evt.Data) + case "session.model_change": + state.applyModelChange(evt.Data) + case "session.info": + state.applySessionInfo(evt.Data) + case "assistant.message": + appendAssistantMessageEvents(out, state, lineNum, evt, occurredAt) + case "tool.execution_start": + appendToolExecutionStartEvent(out, state, lineNum, evt.Data, occurredAt) + case "tool.execution_complete": + appendToolExecutionCompleteEvent(out, state, lineNum, evt.Data, occurredAt) + case "session.workspace_file_changed": + appendWorkspaceFileChangedEvent(out, state, lineNum, evt.Data, occurredAt) + case "assistant.turn_start": + return + case "assistant.turn_end": + appendSyntheticTurnEndEvent(out, state, lineNum, evt.ID, occurredAt) + case "assistant.usage": + appendAssistantUsageEvent(out, state, lineNum, evt.ID, evt.Data, occurredAt) + case "session.shutdown": + appendSessionShutdownEvents(out, state, lineNum, evt.ID, evt.Data, occurredAt) + } +} + +func (s *copilotTelemetrySessionState) applyStart(raw json.RawMessage) { + var start sessionStartData + if json.Unmarshal(raw, &start) != nil { + return + } + s.applyContext(start.Context.Repository, start.Context.CWD) + if s.currentModel == "" && start.SelectedModel != "" { + s.currentModel = start.SelectedModel + } +} + +func (s *copilotTelemetrySessionState) applyContextChanged(raw json.RawMessage) { + var changed copilotTelemetrySessionContextChangedData + if json.Unmarshal(raw, &changed) != nil { + return + } + s.applyContext(changed.Repository, changed.CWD) +} + +func (s *copilotTelemetrySessionState) applyContext(repository, cwd string) { + if repository != "" { + s.repo = repository + } + if cwd != "" { + s.cwd = cwd + s.workspaceID = shared.SanitizeWorkspace(cwd) + } + s.clientLabel = normalizeCopilotClient(s.repo, s.cwd) +} + +func (s *copilotTelemetrySessionState) applyModelChange(raw json.RawMessage) { + var mc modelChangeData + if json.Unmarshal(raw, &mc) == nil && mc.NewModel != "" { + s.currentModel = mc.NewModel + } +} + +func (s *copilotTelemetrySessionState) applySessionInfo(raw json.RawMessage) { + var info sessionInfoData + if json.Unmarshal(raw, &info) == nil && info.InfoType == "model" { + if model := extractModelFromInfoMsg(info.Message); model != "" { + s.currentModel = model + } + } +} + +func appendAssistantMessageEvents(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, evt sessionEvent, occurredAt time.Time) { + var msg copilotTelemetryAssistantMessageData + if json.Unmarshal(evt.Data, &msg) != nil { + return + } + + var toolRequests []json.RawMessage + if json.Unmarshal(msg.ToolRequests, &toolRequests) != nil || len(toolRequests) == 0 { + return + } + + messageID := copilotTelemetryMessageID(state.sessionID, lineNum, msg.MessageID, evt.ID) + turnID := core.FirstNonEmpty(messageID, fmt.Sprintf("%s:line:%d", state.sessionID, lineNum)) + + for reqIdx, rawReq := range toolRequests { + req, ok := parseCopilotTelemetryToolRequest(rawReq) + if !ok { + continue + } + appendAssistantToolRequestEvent(out, state, lineNum, occurredAt, messageID, turnID, reqIdx, rawReq, req) + } +} + +func appendAssistantToolRequestEvent( + out *[]shared.TelemetryEvent, + state *copilotTelemetrySessionState, + lineNum int, + occurredAt time.Time, + messageID, turnID string, + reqIdx int, + rawReq json.RawMessage, + req copilotTelemetryToolRequest, +) { + explicitCallID := strings.TrimSpace(req.ToolCallID) != "" + toolCallID := strings.TrimSpace(req.ToolCallID) + if toolCallID == "" { + toolCallID = fmt.Sprintf("%s:%d:tool:%d", state.sessionID, lineNum, reqIdx+1) + } + + toolName, toolMeta := normalizeCopilotTelemetryToolName(req.RawName) + if toolName == "" { + toolName = "unknown" + } + payload := copilotTelemetryBasePayload(state.path, lineNum, state.clientLabel, state.repo, state.cwd, "assistant.message.tool_request") + for key, value := range toolMeta { + payload[key] = value + } + payload["tool_call_id"] = toolCallID + + applyTelemetryToolInputPayload(payload, req.Input) + applyTelemetryFallbackPayload(payload, rawReq) + + model := currentOrUnknownModel(state.currentModel) + if upstream := copilotUpstreamProviderForModel(model); upstream != "" { + payload["upstream_provider"] = upstream + } + + *out = append(*out, shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: state.workspaceID, + SessionID: state.sessionID, + TurnID: turnID, + MessageID: messageID, + ToolCallID: toolCallID, + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeToolUsage, + ModelRaw: model, + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(1), + }, + ToolName: toolName, + Status: shared.TelemetryStatusUnknown, + Payload: payload, + }) + + if explicitCallID { + state.toolContexts[toolCallID] = copilotTelemetryToolContext{ + MessageID: messageID, + TurnID: turnID, + Model: model, + ToolName: toolName, + Payload: copyCopilotTelemetryPayload(payload), + } + } +} + +func applyTelemetryToolInputPayload(payload map[string]any, input any) { + if input == nil { + return + } + payload["tool_input"] = input + if cmd := extractCopilotTelemetryCommand(input); cmd != "" { + payload["command"] = cmd + } + if paths := shared.ExtractFilePathsFromPayload(input); len(paths) > 0 { + payload["file"] = paths[0] + if lang := inferCopilotLanguageFromPath(paths[0]); lang != "" { + payload["language"] = lang + } + } + if added, removed := estimateCopilotTelemetryLineDelta(input); added > 0 || removed > 0 { + payload["lines_added"] = added + payload["lines_removed"] = removed + } +} + +func applyTelemetryFallbackPayload(payload map[string]any, rawReq json.RawMessage) { + if _, ok := payload["command"]; !ok { + if cmd := extractCopilotToolCommand(rawReq); cmd != "" { + payload["command"] = cmd + } + } + if _, ok := payload["file"]; !ok { + if paths := extractCopilotToolPaths(rawReq); len(paths) > 0 { + payload["file"] = paths[0] + if lang := inferCopilotLanguageFromPath(paths[0]); lang != "" { + payload["language"] = lang + } + } + } + if _, ok := payload["lines_added"]; !ok { + added, removed := estimateCopilotToolLineDelta(rawReq) + if added > 0 || removed > 0 { + payload["lines_added"] = added + payload["lines_removed"] = removed + } + } +} + +func appendToolExecutionStartEvent(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, raw json.RawMessage, occurredAt time.Time) { + var start copilotTelemetryToolExecutionStartData + if json.Unmarshal(raw, &start) != nil { + return + } + + explicitCallID := strings.TrimSpace(start.ToolCallID) != "" + toolCallID := strings.TrimSpace(start.ToolCallID) + if toolCallID == "" { + toolCallID = fmt.Sprintf("%s:%d:tool_start", state.sessionID, lineNum) + } + + ctx := state.toolContexts[toolCallID] + payload := copyCopilotTelemetryPayload(ctx.Payload) + if len(payload) == 0 { + payload = copilotTelemetryBasePayload(state.path, lineNum, state.clientLabel, state.repo, state.cwd, "tool.execution_start") + } else { + payload["event"] = "tool.execution_start" + payload["line"] = lineNum + } + payload["tool_call_id"] = toolCallID + + toolName := strings.TrimSpace(ctx.ToolName) + if start.ToolName != "" { + normalized, meta := normalizeCopilotTelemetryToolName(start.ToolName) + toolName = normalized + for key, value := range meta { + payload[key] = value + } + } + if toolName == "" { + toolName = "unknown" + } + + if args := decodeCopilotTelemetryJSONAny(start.Arguments); args != nil { + applyTelemetryToolInputPayload(payload, args) + } + + model := currentOrUnknownModel(core.FirstNonEmpty(strings.TrimSpace(ctx.Model), strings.TrimSpace(state.currentModel))) + if upstream := copilotUpstreamProviderForModel(model); upstream != "" { + payload["upstream_provider"] = upstream + } + + messageID := core.FirstNonEmpty(ctx.MessageID, fmt.Sprintf("%s:%d", state.sessionID, lineNum)) + turnID := core.FirstNonEmpty(ctx.TurnID, messageID) + appendToolExecutionEvent(out, state, occurredAt, messageID, turnID, toolCallID, model, toolName, shared.TelemetryStatusUnknown, payload) + + if explicitCallID { + state.toolContexts[toolCallID] = copilotTelemetryToolContext{ + MessageID: messageID, + TurnID: turnID, + Model: model, + ToolName: toolName, + Payload: copyCopilotTelemetryPayload(payload), + } + } +} + +func appendToolExecutionCompleteEvent(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, raw json.RawMessage, occurredAt time.Time) { + var complete copilotTelemetryToolExecutionCompleteData + if json.Unmarshal(raw, &complete) != nil { + return + } + + toolCallID := strings.TrimSpace(complete.ToolCallID) + explicitCallID := toolCallID != "" + if toolCallID == "" { + toolCallID = fmt.Sprintf("%s:%d:tool_complete", state.sessionID, lineNum) + } + + ctx := state.toolContexts[toolCallID] + payload := copyCopilotTelemetryPayload(ctx.Payload) + if len(payload) == 0 { + payload = copilotTelemetryBasePayload(state.path, lineNum, state.clientLabel, state.repo, state.cwd, "tool.execution_complete") + } else { + payload["event"] = "tool.execution_complete" + payload["line"] = lineNum + } + payload["tool_call_id"] = toolCallID + + toolName := strings.TrimSpace(ctx.ToolName) + if complete.ToolName != "" { + normalized, meta := normalizeCopilotTelemetryToolName(complete.ToolName) + toolName = normalized + for key, value := range meta { + payload[key] = value + } + } + if toolName == "" { + toolName = "unknown" + } + if complete.Success != nil { + payload["success"] = *complete.Success + } + if strings.TrimSpace(complete.Status) != "" { + payload["status_raw"] = strings.TrimSpace(complete.Status) + } + for key, value := range summarizeCopilotTelemetryResult(complete.Result) { + if _, exists := payload[key]; !exists { + payload[key] = value + } + } + errorCode, errorMessage := summarizeCopilotTelemetryError(complete.Error) + if errorCode != "" { + payload["error_code"] = errorCode + } + if errorMessage != "" { + payload["error_message"] = truncate(errorMessage, 240) + } + + model := currentOrUnknownModel(core.FirstNonEmpty(strings.TrimSpace(ctx.Model), strings.TrimSpace(state.currentModel))) + if upstream := copilotUpstreamProviderForModel(model); upstream != "" { + payload["upstream_provider"] = upstream + } + + messageID := core.FirstNonEmpty(ctx.MessageID, fmt.Sprintf("%s:%d", state.sessionID, lineNum)) + turnID := core.FirstNonEmpty(ctx.TurnID, messageID) + status := copilotTelemetryToolStatus(complete.Success, complete.Status, errorCode, errorMessage) + appendToolExecutionEvent(out, state, occurredAt, messageID, turnID, toolCallID, model, toolName, status, payload) + + if explicitCallID { + state.toolContexts[toolCallID] = copilotTelemetryToolContext{ + MessageID: messageID, + TurnID: turnID, + Model: model, + ToolName: toolName, + Payload: copyCopilotTelemetryPayload(payload), + } + } +} + +func appendToolExecutionEvent( + out *[]shared.TelemetryEvent, + state *copilotTelemetrySessionState, + occurredAt time.Time, + messageID, turnID, toolCallID, model, toolName string, + status shared.TelemetryStatus, + payload map[string]any, +) { + *out = append(*out, shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: state.workspaceID, + SessionID: state.sessionID, + TurnID: turnID, + MessageID: messageID, + ToolCallID: toolCallID, + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeToolUsage, + ModelRaw: model, + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(1), + }, + ToolName: toolName, + Status: status, + Payload: payload, + }) +} + +func appendWorkspaceFileChangedEvent(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, raw json.RawMessage, occurredAt time.Time) { + var changed copilotTelemetryWorkspaceFileChangedData + if json.Unmarshal(raw, &changed) != nil { + return + } + filePath := strings.TrimSpace(changed.Path) + if filePath == "" { + return + } + + op := sanitizeMetricName(changed.Operation) + if op == "" || op == "unknown" { + op = "change" + } + + payload := copilotTelemetryBasePayload(state.path, lineNum, state.clientLabel, state.repo, state.cwd, "session.workspace_file_changed") + payload["file"] = filePath + payload["operation"] = strings.TrimSpace(changed.Operation) + if lang := inferCopilotLanguageFromPath(filePath); lang != "" { + payload["language"] = lang + } + + model := currentOrUnknownModel(state.currentModel) + if upstream := copilotUpstreamProviderForModel(model); upstream != "" { + payload["upstream_provider"] = upstream + } + + *out = append(*out, shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: state.workspaceID, + SessionID: state.sessionID, + TurnID: fmt.Sprintf("%s:file:%d", state.sessionID, lineNum), + MessageID: fmt.Sprintf("%s:%d", state.sessionID, lineNum), + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeToolUsage, + ModelRaw: model, + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(0), + }, + ToolName: "workspace_file_" + op, + Status: shared.TelemetryStatusOK, + Payload: payload, + }) +} + +func appendSyntheticTurnEndEvent(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, evtID string, occurredAt time.Time) { + state.turnIndex++ + if state.assistantUsageSeen || state.currentModel == "" { + return + } + + turnID := core.FirstNonEmpty(strings.TrimSpace(evtID), fmt.Sprintf("%s:synth:%d", state.sessionID, state.turnIndex)) + messageID := fmt.Sprintf("%s:%d", state.sessionID, lineNum) + payload := copilotTelemetryBasePayload(state.path, lineNum, state.clientLabel, state.repo, state.cwd, "assistant.turn_end") + payload["synthetic"] = true + payload["upstream_provider"] = copilotUpstreamProviderForModel(state.currentModel) + *out = append(*out, shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: state.workspaceID, + SessionID: state.sessionID, + TurnID: turnID, + MessageID: messageID, + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeMessageUsage, + ModelRaw: state.currentModel, + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(1), + }, + Status: shared.TelemetryStatusOK, + Payload: payload, + }) +} + +func appendAssistantUsageEvent(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, evtID string, raw json.RawMessage, occurredAt time.Time) { + var usage assistantUsageData + if json.Unmarshal(raw, &usage) != nil { + return + } + state.assistantUsageSeen = true + + model := core.FirstNonEmpty(usage.Model, state.currentModel) + if model == "" { + return + } + state.turnIndex++ + + turnID := core.FirstNonEmpty(strings.TrimSpace(evtID), fmt.Sprintf("%s:usage:%d", state.sessionID, state.turnIndex)) + messageID := fmt.Sprintf("%s:%d", state.sessionID, lineNum) + totalTokens := int64(usage.InputTokens + usage.OutputTokens) + payload := copilotTelemetryBasePayload(state.path, lineNum, state.clientLabel, state.repo, state.cwd, "assistant.usage") + payload["source_file"] = state.path + payload["line"] = lineNum + payload["client"] = state.clientLabel + payload["upstream_provider"] = copilotUpstreamProviderForModel(model) + if usage.Duration > 0 { + payload["duration_ms"] = usage.Duration + } + if len(usage.QuotaSnapshots) > 0 { + payload["quota_snapshot_count"] = len(usage.QuotaSnapshots) + } + + event := shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: state.workspaceID, + SessionID: state.sessionID, + TurnID: turnID, + MessageID: messageID, + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeMessageUsage, + ModelRaw: model, + TokenUsage: core.TokenUsage{ + InputTokens: core.Int64Ptr(int64(usage.InputTokens)), + OutputTokens: core.Int64Ptr(int64(usage.OutputTokens)), + TotalTokens: core.Int64Ptr(totalTokens), + Requests: core.Int64Ptr(1), + }, + Status: shared.TelemetryStatusOK, + Payload: payload, + } + if usage.CacheReadTokens > 0 { + event.CacheReadTokens = core.Int64Ptr(int64(usage.CacheReadTokens)) + } + if usage.CacheWriteTokens > 0 { + event.CacheWriteTokens = core.Int64Ptr(int64(usage.CacheWriteTokens)) + } + if usage.Cost > 0 { + event.CostUSD = core.Float64Ptr(usage.Cost) + } + *out = append(*out, event) +} + +func appendSessionShutdownEvents(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, evtID string, raw json.RawMessage, occurredAt time.Time) { + var shutdown sessionShutdownData + if json.Unmarshal(raw, &shutdown) != nil { + return + } + + shutdownTurnID := core.FirstNonEmpty(strings.TrimSpace(evtID), fmt.Sprintf("%s:shutdown", state.sessionID)) + shutdownMessageID := fmt.Sprintf("%s:shutdown:%d", state.sessionID, lineNum) + shutdownPayload := copilotTelemetryBasePayload(state.path, lineNum, state.clientLabel, state.repo, state.cwd, "session.shutdown") + shutdownPayload["shutdown_type"] = strings.TrimSpace(shutdown.ShutdownType) + shutdownPayload["total_premium_requests"] = shutdown.TotalPremiumRequests + shutdownPayload["total_api_duration_ms"] = shutdown.TotalAPIDurationMs + shutdownPayload["session_start_time"] = strings.TrimSpace(shutdown.SessionStartTime) + shutdownPayload["lines_added"] = shutdown.CodeChanges.LinesAdded + shutdownPayload["lines_removed"] = shutdown.CodeChanges.LinesRemoved + shutdownPayload["files_modified"] = shutdown.CodeChanges.FilesModified + shutdownPayload["model_metrics_count"] = len(shutdown.ModelMetrics) + if model := strings.TrimSpace(state.currentModel); model != "" { + shutdownPayload["upstream_provider"] = copilotUpstreamProviderForModel(model) + } + + *out = append(*out, shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: state.workspaceID, + SessionID: state.sessionID, + TurnID: shutdownTurnID, + MessageID: shutdownMessageID, + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeTurnCompleted, + ModelRaw: core.FirstNonEmpty(strings.TrimSpace(state.currentModel), "unknown"), + Status: shared.TelemetryStatusOK, + Payload: shutdownPayload, + }) + + if state.assistantUsageSeen { + return + } + + models := make([]string, 0, len(shutdown.ModelMetrics)) + for model := range shutdown.ModelMetrics { + models = append(models, model) + } + sort.Strings(models) + + for idx, model := range models { + appendShutdownModelMetricEvent(out, state, lineNum, occurredAt, shutdown, model, idx) + } +} + +func appendShutdownModelMetricEvent(out *[]shared.TelemetryEvent, state *copilotTelemetrySessionState, lineNum int, occurredAt time.Time, shutdown sessionShutdownData, model string, idx int) { + modelMetric := shutdown.ModelMetrics[model] + model = strings.TrimSpace(model) + if model == "" { + model = core.FirstNonEmpty(strings.TrimSpace(state.currentModel), "unknown") + } + + inputTokens := int64(modelMetric.Usage.InputTokens) + outputTokens := int64(modelMetric.Usage.OutputTokens) + cacheReadTokens := int64(modelMetric.Usage.CacheReadTokens) + cacheWriteTokens := int64(modelMetric.Usage.CacheWriteTokens) + totalTokens := inputTokens + outputTokens + requests := int64(modelMetric.Requests.Count) + cost := modelMetric.Requests.Cost + if totalTokens <= 0 && requests <= 0 && cost <= 0 { + return + } + + messageID := fmt.Sprintf("%s:shutdown:%s", state.sessionID, sanitizeMetricName(model)) + if idx > 0 { + messageID = fmt.Sprintf("%s:%d", messageID, idx+1) + } + payload := copilotTelemetryBasePayload(state.path, lineNum, state.clientLabel, state.repo, state.cwd, "session.shutdown.model_metric") + payload["model_metrics_source"] = "session.shutdown" + payload["upstream_provider"] = copilotUpstreamProviderForModel(model) + if idx == 0 { + payload["lines_added"] = shutdown.CodeChanges.LinesAdded + payload["lines_removed"] = shutdown.CodeChanges.LinesRemoved + payload["files_modified"] = shutdown.CodeChanges.FilesModified + } + + event := shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: state.workspaceID, + SessionID: state.sessionID, + TurnID: messageID, + MessageID: messageID, + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeMessageUsage, + ModelRaw: model, + TokenUsage: core.TokenUsage{ + InputTokens: core.Int64Ptr(inputTokens), + OutputTokens: core.Int64Ptr(outputTokens), + TotalTokens: core.Int64Ptr(totalTokens), + }, + Status: shared.TelemetryStatusOK, + Payload: payload, + } + if requests > 0 { + event.Requests = core.Int64Ptr(requests) + } + if cacheReadTokens > 0 { + event.CacheReadTokens = core.Int64Ptr(cacheReadTokens) + } + if cacheWriteTokens > 0 { + event.CacheWriteTokens = core.Int64Ptr(cacheWriteTokens) + } + if cost > 0 { + event.CostUSD = core.Float64Ptr(cost) + } + *out = append(*out, event) +} + +func currentOrUnknownModel(model string) string { + model = strings.TrimSpace(model) + if model == "" { + return "unknown" + } + return model +} + +func copilotTelemetryMessageID(sessionID string, lineNum int, messageID, fallbackID string) string { + messageID = strings.TrimSpace(messageID) + if messageID != "" { + if strings.Contains(messageID, ":") { + return messageID + } + return fmt.Sprintf("%s:%s", sessionID, messageID) + } + + fallbackID = strings.TrimSpace(fallbackID) + if fallbackID != "" { + return fmt.Sprintf("%s:%s", sessionID, fallbackID) + } + return fmt.Sprintf("%s:%d", sessionID, lineNum) +} + +func parseCopilotTelemetryToolRequest(raw json.RawMessage) (copilotTelemetryToolRequest, bool) { + var reqMap map[string]any + if json.Unmarshal(raw, &reqMap) != nil { + return copilotTelemetryToolRequest{}, false + } + + out := copilotTelemetryToolRequest{ + ToolCallID: strings.TrimSpace(anyToString(reqMap["toolCallId"])), + RawName: core.FirstNonEmpty(anyToString(reqMap["name"]), anyToString(reqMap["toolName"]), anyToString(reqMap["tool"])), + } + if out.RawName == "" { + out.RawName = extractCopilotToolName(raw) + } + for _, key := range []string{"arguments", "args", "input"} { + if value, ok := reqMap[key]; ok && out.Input == nil { + out.Input = decodeCopilotTelemetryJSONAny(value) + } + } + return out, true +} + +func normalizeCopilotTelemetryToolName(raw string) (string, map[string]any) { + meta := map[string]any{} + name := strings.TrimSpace(raw) + if name == "" { + return "unknown", meta + } + meta["tool_name_raw"] = name + if server, function, ok := parseCopilotTelemetryMCPTool(name); ok { + meta["tool_type"] = "mcp" + meta["mcp_server"] = server + meta["mcp_function"] = function + return "mcp__" + server + "__" + function, meta + } + return sanitizeMetricName(name), meta +} + +func parseCopilotTelemetryMCPTool(raw string) (string, string, bool) { + normalized := strings.ToLower(strings.TrimSpace(raw)) + if normalized == "" { + return "", "", false + } + for _, marker := range []string{"_mcp_server_", "-mcp-server-"} { + if parts := strings.SplitN(normalized, marker, 2); len(parts) == 2 { + server := sanitizeCopilotMCPSegment(parts[0]) + function := sanitizeCopilotMCPSegment(parts[1]) + if server != "" && function != "" { + return server, function, true + } + } + } + if strings.HasPrefix(normalized, "mcp__") { + parts := strings.SplitN(strings.TrimPrefix(normalized, "mcp__"), "__", 2) + if len(parts) == 2 { + server := sanitizeCopilotMCPSegment(parts[0]) + function := sanitizeCopilotMCPSegment(parts[1]) + if server != "" && function != "" { + return server, function, true + } + } + } + if strings.HasPrefix(normalized, "mcp-") || strings.HasPrefix(normalized, "mcp_") { + canonical := normalizeCopilotCursorStyleMCPName(normalized) + if strings.HasPrefix(canonical, "mcp__") { + parts := strings.SplitN(strings.TrimPrefix(canonical, "mcp__"), "__", 2) + if len(parts) == 2 { + server := sanitizeCopilotMCPSegment(parts[0]) + function := sanitizeCopilotMCPSegment(parts[1]) + if server != "" && function != "" { + return server, function, true + } + } + } + } + if strings.HasSuffix(normalized, " (mcp)") { + body := strings.TrimSpace(strings.TrimSuffix(normalized, " (mcp)")) + body = strings.TrimPrefix(body, "user-") + if body == "" { + return "", "", false + } + if idx := findCopilotTelemetryServerFunctionSplit(body); idx > 0 { + server := sanitizeCopilotMCPSegment(body[:idx]) + function := sanitizeCopilotMCPSegment(body[idx+1:]) + if server != "" && function != "" { + return server, function, true + } + } + return "other", sanitizeCopilotMCPSegment(body), true + } + return "", "", false +} + +func normalizeCopilotCursorStyleMCPName(name string) string { + if strings.HasPrefix(name, "mcp-") { + rest := name[4:] + parts := strings.SplitN(rest, "-user-", 2) + if len(parts) == 2 { + server := parts[0] + afterUser := parts[1] + serverDash := server + "-" + if strings.HasPrefix(afterUser, serverDash) { + return "mcp__" + server + "__" + afterUser[len(serverDash):] + } + if idx := strings.LastIndex(afterUser, "-"); idx > 0 { + return "mcp__" + server + "__" + afterUser[idx+1:] + } + return "mcp__" + server + "__" + afterUser + } + if idx := strings.Index(rest, "-"); idx > 0 { + return "mcp__" + rest[:idx] + "__" + rest[idx+1:] + } + return "mcp__" + rest + "__" + } + if strings.HasPrefix(name, "mcp_") { + rest := name[4:] + if idx := strings.Index(rest, "_"); idx > 0 { + return "mcp__" + rest[:idx] + "__" + rest[idx+1:] + } + return "mcp__" + rest + "__" + } + return name +} + +func findCopilotTelemetryServerFunctionSplit(s string) int { + best := -1 + for i := 0; i < len(s); i++ { + if s[i] == '-' && strings.Contains(s[i+1:], "_") { + best = i + } + } + return best +} + +func sanitizeCopilotMCPSegment(raw string) string { + raw = strings.ToLower(strings.TrimSpace(raw)) + if raw == "" { + return "" + } + var b strings.Builder + lastUnderscore := false + for _, r := range raw { + switch { + case r >= 'a' && r <= 'z', r >= '0' && r <= '9', r == '_', r == '-': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore { + b.WriteByte('_') + lastUnderscore = true + } + } + } + return strings.Trim(b.String(), "_") +} + +func copilotTelemetryToolStatus(success *bool, statusRaw, errorCode, errorMessage string) shared.TelemetryStatus { + if success != nil { + if *success { + return shared.TelemetryStatusOK + } + if copilotTelemetryLooksAborted(errorCode, errorMessage, statusRaw) { + return shared.TelemetryStatusAborted + } + return shared.TelemetryStatusError + } + switch strings.ToLower(strings.TrimSpace(statusRaw)) { + case "ok", "success", "succeeded", "completed", "complete": + return shared.TelemetryStatusOK + case "aborted", "cancelled", "canceled", "denied": + return shared.TelemetryStatusAborted + case "error", "failed", "failure": + return shared.TelemetryStatusError + } + if errorCode != "" || errorMessage != "" { + if copilotTelemetryLooksAborted(errorCode, errorMessage, statusRaw) { + return shared.TelemetryStatusAborted + } + return shared.TelemetryStatusError + } + return shared.TelemetryStatusUnknown +} + +func copilotTelemetryLooksAborted(parts ...string) bool { + for _, part := range parts { + lower := strings.ToLower(strings.TrimSpace(part)) + if lower == "" { + continue + } + if strings.Contains(lower, "denied") || strings.Contains(lower, "cancel") || strings.Contains(lower, "abort") || strings.Contains(lower, "rejected") || strings.Contains(lower, "user initiated") { + return true + } + } + return false +} + +func summarizeCopilotTelemetryResult(raw json.RawMessage) map[string]any { + if len(strings.TrimSpace(string(raw))) == 0 { + return nil + } + decoded := decodeCopilotTelemetryJSONAny(raw) + if decoded == nil { + return nil + } + payload := map[string]any{} + if paths := shared.ExtractFilePathsFromPayload(decoded); len(paths) > 0 { + payload["result_file"] = paths[0] + } + switch value := decoded.(type) { + case map[string]any: + if content := anyToString(value["content"]); content != "" { + payload["result_chars"] = len(content) + if added, removed := countCopilotTelemetryUnifiedDiff(content); added > 0 || removed > 0 { + payload["lines_added"] = added + payload["lines_removed"] = removed + } + } + if detailed := anyToString(value["detailedContent"]); detailed != "" { + payload["result_detailed_chars"] = len(detailed) + if _, ok := payload["lines_added"]; !ok { + if added, removed := countCopilotTelemetryUnifiedDiff(detailed); added > 0 || removed > 0 { + payload["lines_added"] = added + payload["lines_removed"] = removed + } + } + } + if msg := anyToString(value["message"]); msg != "" { + payload["result_message"] = truncate(msg, 240) + } + case string: + if value != "" { + payload["result_chars"] = len(value) + if added, removed := countCopilotTelemetryUnifiedDiff(value); added > 0 || removed > 0 { + payload["lines_added"] = added + payload["lines_removed"] = removed + } + } + } + if len(payload) == 0 { + return nil + } + return payload +} + +func countCopilotTelemetryUnifiedDiff(raw string) (int, int) { + raw = strings.TrimSpace(raw) + if raw == "" || (!strings.Contains(raw, "diff --git") && !strings.Contains(raw, "\n@@")) { + return 0, 0 + } + added, removed := 0, 0 + for _, line := range strings.Split(raw, "\n") { + switch { + case strings.HasPrefix(line, "+++"), strings.HasPrefix(line, "---"), strings.HasPrefix(line, "@@"): + case strings.HasPrefix(line, "+"): + added++ + case strings.HasPrefix(line, "-"): + removed++ + } + } + return added, removed +} + +func summarizeCopilotTelemetryError(raw json.RawMessage) (string, string) { + if len(strings.TrimSpace(string(raw))) == 0 { + return "", "" + } + decoded := decodeCopilotTelemetryJSONAny(raw) + if decoded == nil { + return "", "" + } + switch value := decoded.(type) { + case map[string]any: + return strings.TrimSpace(anyToString(value["code"])), strings.TrimSpace(anyToString(value["message"])) + case string: + return "", strings.TrimSpace(value) + default: + return "", strings.TrimSpace(anyToString(decoded)) + } +} + +func copilotTelemetryBasePayload(path string, line int, client, repo, cwd, event string) map[string]any { + payload := map[string]any{ + "source_file": path, + "line": line, + "event": event, + "client": client, + "upstream_provider": "github", + } + if strings.TrimSpace(repo) != "" { + payload["repository"] = strings.TrimSpace(repo) + } + if strings.TrimSpace(cwd) != "" { + payload["cwd"] = strings.TrimSpace(cwd) + } + return payload +} + +func copyCopilotTelemetryPayload(in map[string]any) map[string]any { + if len(in) == 0 { + return nil + } + out := make(map[string]any, len(in)) + for key, value := range in { + out[key] = value + } + return out +} + +func decodeCopilotTelemetryJSONAny(raw any) any { + switch value := raw.(type) { + case nil: + return nil + case map[string]any, []any: + return value + case json.RawMessage: + var out any + if json.Unmarshal(value, &out) == nil { + return out + } + return strings.TrimSpace(string(value)) + case []byte: + var out any + if json.Unmarshal(value, &out) == nil { + return out + } + return strings.TrimSpace(string(value)) + case string: + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil + } + var out any + if json.Unmarshal([]byte(trimmed), &out) == nil { + return out + } + return trimmed + default: + return value + } +} + +func extractCopilotTelemetryCommand(input any) string { + var command string + var walk func(any) + walk = func(value any) { + if command != "" || value == nil { + return + } + switch v := value.(type) { + case map[string]any: + for key, child := range v { + k := strings.ToLower(strings.TrimSpace(key)) + if (k == "command" || k == "cmd" || k == "script" || k == "shell_command") && child != nil { + if s, ok := child.(string); ok { + command = strings.TrimSpace(s) + return + } + } + } + for _, child := range v { + walk(child) + } + case []any: + for _, child := range v { + walk(child) + } + } + } + walk(input) + return command +} + +func estimateCopilotTelemetryLineDelta(input any) (int, int) { + if input == nil { + return 0, 0 + } + encoded, err := json.Marshal(map[string]any{"arguments": input}) + if err != nil { + return 0, 0 + } + return estimateCopilotToolLineDelta(encoded) +} + +func copilotUpstreamProviderForModel(model string) string { + model = strings.ToLower(strings.TrimSpace(model)) + if model == "" || model == "unknown" { + return "github" + } + switch { + case strings.Contains(model, "claude"): + return "anthropic" + case strings.Contains(model, "gpt"), strings.HasPrefix(model, "o1"), strings.HasPrefix(model, "o3"), strings.HasPrefix(model, "o4"): + return "openai" + case strings.Contains(model, "gemini"): + return "google" + case strings.Contains(model, "qwen"): + return "alibaba_cloud" + case strings.Contains(model, "deepseek"): + return "deepseek" + case strings.Contains(model, "llama"): + return "meta" + case strings.Contains(model, "mistral"): + return "mistral" + default: + return "github" + } +} + +func anyToString(v any) string { + switch value := v.(type) { + case string: + return value + case fmt.Stringer: + return value.String() + default: + if value == nil { + return "" + } + return fmt.Sprintf("%v", value) + } +} + +func truncate(input string, max int) string { + input = strings.TrimSpace(input) + if max <= 0 || len(input) <= max { + return input + } + return input[:max] +} diff --git a/internal/providers/copilot/telemetry_session_store.go b/internal/providers/copilot/telemetry_session_store.go new file mode 100644 index 0000000..ae2b61d --- /dev/null +++ b/internal/providers/copilot/telemetry_session_store.go @@ -0,0 +1,223 @@ +package copilot + +import ( + "context" + "database/sql" + "fmt" + "os" + "strings" + "time" + + _ "github.com/mattn/go-sqlite3" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +func parseCopilotTelemetrySessionStore(ctx context.Context, dbPath string, skipSessions map[string]bool) ([]shared.TelemetryEvent, error) { + if strings.TrimSpace(dbPath) == "" { + return nil, nil + } + if _, err := os.Stat(dbPath); err != nil { + return nil, nil + } + + db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro", dbPath)) + if err != nil { + return nil, err + } + defer db.Close() + + if !copilotTelemetryTableExists(ctx, db, "sessions") || !copilotTelemetryTableExists(ctx, db, "turns") { + return nil, nil + } + + out, err := appendSessionStoreTurnEvents(ctx, db, dbPath, skipSessions) + if err != nil { + return out, err + } + if !copilotTelemetryTableExists(ctx, db, "session_files") { + return out, nil + } + return appendSessionStoreFileEvents(ctx, db, dbPath, skipSessions, out) +} + +func appendSessionStoreTurnEvents(ctx context.Context, db *sql.DB, dbPath string, skipSessions map[string]bool) ([]shared.TelemetryEvent, error) { + rows, err := db.QueryContext(ctx, ` + SELECT + s.id, + COALESCE(s.cwd, ''), + COALESCE(s.repository, ''), + COALESCE(t.turn_index, 0), + COALESCE(t.user_message, ''), + COALESCE(t.assistant_response, ''), + COALESCE(t.timestamp, '') + FROM sessions s + JOIN turns t ON t.session_id = s.id + ORDER BY s.id ASC, t.turn_index ASC + `) + if err != nil { + return nil, err + } + defer rows.Close() + + var out []shared.TelemetryEvent + for rows.Next() { + if ctx.Err() != nil { + return out, ctx.Err() + } + var sessionID, cwd, repo, userMsg, reply, tsRaw string + var turnIndex int + if err := rows.Scan(&sessionID, &cwd, &repo, &turnIndex, &userMsg, &reply, &tsRaw); err != nil { + continue + } + sessionID = strings.TrimSpace(sessionID) + if sessionID == "" || skipSessions[sessionID] { + continue + } + out = append(out, buildSessionStoreTurnEvent(dbPath, sessionID, cwd, repo, userMsg, reply, tsRaw, turnIndex)) + } + return out, rows.Err() +} + +func buildSessionStoreTurnEvent(dbPath, sessionID, cwd, repo, userMsg, reply, tsRaw string, turnIndex int) shared.TelemetryEvent { + occurredAt := time.Now().UTC() + if parsed := shared.FlexParseTime(tsRaw); !parsed.IsZero() { + occurredAt = parsed + } + messageID := fmt.Sprintf("%s:turn:%d", sessionID, turnIndex) + payload := map[string]any{ + "source_file": dbPath, + "event": "session_store.turn", + "client": normalizeCopilotClient(repo, cwd), + "upstream_provider": "github", + "session_store_fallback": true, + "user_chars": len(strings.TrimSpace(userMsg)), + "assistant_chars": len(strings.TrimSpace(reply)), + "turn_index": turnIndex, + } + if strings.TrimSpace(repo) != "" { + payload["repository"] = strings.TrimSpace(repo) + } + if strings.TrimSpace(cwd) != "" { + payload["cwd"] = strings.TrimSpace(cwd) + } + return shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelSQLite, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: shared.SanitizeWorkspace(cwd), + SessionID: sessionID, + TurnID: messageID, + MessageID: messageID, + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeMessageUsage, + ModelRaw: "unknown", + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(1), + }, + Status: shared.TelemetryStatusOK, + Payload: payload, + } +} + +func appendSessionStoreFileEvents(ctx context.Context, db *sql.DB, dbPath string, skipSessions map[string]bool, out []shared.TelemetryEvent) ([]shared.TelemetryEvent, error) { + rows, err := db.QueryContext(ctx, ` + SELECT + COALESCE(sf.session_id, ''), + COALESCE(sf.file_path, ''), + COALESCE(sf.tool_name, ''), + COALESCE(sf.turn_index, 0), + COALESCE(sf.first_seen_at, ''), + COALESCE(s.cwd, ''), + COALESCE(s.repository, '') + FROM session_files sf + LEFT JOIN sessions s ON s.id = sf.session_id + ORDER BY sf.session_id ASC, sf.turn_index ASC, sf.id ASC + `) + if err != nil { + return out, nil + } + defer rows.Close() + + for rows.Next() { + if ctx.Err() != nil { + return out, ctx.Err() + } + var sessionID, filePath, toolRaw, tsRaw, cwd, repo string + var turnIndex int + if err := rows.Scan(&sessionID, &filePath, &toolRaw, &turnIndex, &tsRaw, &cwd, &repo); err != nil { + continue + } + sessionID = strings.TrimSpace(sessionID) + filePath = strings.TrimSpace(filePath) + if sessionID == "" || filePath == "" || skipSessions[sessionID] { + continue + } + out = append(out, buildSessionStoreFileEvent(dbPath, sessionID, filePath, toolRaw, tsRaw, cwd, repo, turnIndex)) + } + return out, nil +} + +func buildSessionStoreFileEvent(dbPath, sessionID, filePath, toolRaw, tsRaw, cwd, repo string, turnIndex int) shared.TelemetryEvent { + occurredAt := time.Now().UTC() + if parsed := shared.FlexParseTime(tsRaw); !parsed.IsZero() { + occurredAt = parsed + } + toolName, meta := normalizeCopilotTelemetryToolName(toolRaw) + if toolName == "" || toolName == "unknown" { + toolName = "workspace_file_changed" + } + messageID := fmt.Sprintf("%s:turn:%d", sessionID, turnIndex) + payload := map[string]any{ + "source_file": dbPath, + "event": "session_store.file", + "client": normalizeCopilotClient(repo, cwd), + "upstream_provider": "github", + "session_store_fallback": true, + "file": filePath, + "turn_index": turnIndex, + "tool_name_raw": strings.TrimSpace(toolRaw), + } + for key, value := range meta { + payload[key] = value + } + if lang := inferCopilotLanguageFromPath(filePath); lang != "" { + payload["language"] = lang + } + if strings.TrimSpace(repo) != "" { + payload["repository"] = strings.TrimSpace(repo) + } + if strings.TrimSpace(cwd) != "" { + payload["cwd"] = strings.TrimSpace(cwd) + } + return shared.TelemetryEvent{ + SchemaVersion: telemetrySchemaVersion, + Channel: shared.TelemetryChannelSQLite, + OccurredAt: occurredAt, + AccountID: "copilot", + WorkspaceID: shared.SanitizeWorkspace(cwd), + SessionID: sessionID, + TurnID: messageID, + MessageID: messageID, + ToolCallID: fmt.Sprintf("store:%s:%d:%s", sessionID, turnIndex, sanitizeMetricName(filePath)), + ProviderID: "copilot", + AgentName: "copilot", + EventType: shared.TelemetryEventTypeToolUsage, + ModelRaw: "unknown", + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(1), + }, + ToolName: toolName, + Status: shared.TelemetryStatusOK, + Payload: payload, + } +} + +func copilotTelemetryTableExists(ctx context.Context, db *sql.DB, table string) bool { + var exists int + err := db.QueryRowContext(ctx, `SELECT 1 FROM sqlite_master WHERE type='table' AND name=? LIMIT 1`, strings.TrimSpace(table)).Scan(&exists) + return err == nil && exists == 1 +} diff --git a/internal/providers/ollama/local_paths.go b/internal/providers/ollama/local_paths.go new file mode 100644 index 0000000..7c252df --- /dev/null +++ b/internal/providers/ollama/local_paths.go @@ -0,0 +1,94 @@ +package ollama + +import ( + "os" + "path/filepath" + "runtime" + "sort" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func resolveDesktopDBPath(acct core.AccountConfig) string { + if acct.ExtraData != nil { + for _, key := range []string{"db_path", "app_db"} { + if v := strings.TrimSpace(acct.ExtraData[key]); v != "" { + return v + } + } + } + + home, err := os.UserHomeDir() + if err != nil { + return "" + } + + switch runtime.GOOS { + case "darwin": + return filepath.Join(home, "Library", "Application Support", "Ollama", "db.sqlite") + case "linux": + candidates := []string{ + filepath.Join(home, ".local", "share", "Ollama", "db.sqlite"), + filepath.Join(home, ".config", "Ollama", "db.sqlite"), + } + for _, c := range candidates { + if fileExists(c) { + return c + } + } + return candidates[0] + case "windows": + appData := os.Getenv("APPDATA") + if appData != "" { + return filepath.Join(appData, "Ollama", "db.sqlite") + } + return filepath.Join(home, "AppData", "Roaming", "Ollama", "db.sqlite") + default: + return filepath.Join(home, ".ollama", "db.sqlite") + } +} + +func resolveServerConfigPath(acct core.AccountConfig) string { + if acct.ExtraData != nil { + if v := strings.TrimSpace(acct.ExtraData["server_config"]); v != "" { + return v + } + if configDir := strings.TrimSpace(acct.ExtraData["config_dir"]); configDir != "" { + return filepath.Join(configDir, "server.json") + } + } + + home, err := os.UserHomeDir() + if err != nil { + return "" + } + return filepath.Join(home, ".ollama", "server.json") +} + +func resolveServerLogFiles(acct core.AccountConfig) []string { + logDir := "" + if acct.ExtraData != nil { + logDir = strings.TrimSpace(acct.ExtraData["logs_dir"]) + if logDir == "" { + if configDir := strings.TrimSpace(acct.ExtraData["config_dir"]); configDir != "" { + logDir = filepath.Join(configDir, "logs") + } + } + } + if logDir == "" { + home, err := os.UserHomeDir() + if err != nil { + return nil + } + logDir = filepath.Join(home, ".ollama", "logs") + } + + pattern := filepath.Join(logDir, "server*.log") + files, err := filepath.Glob(pattern) + if err != nil { + return nil + } + sort.Strings(files) + return files +} diff --git a/internal/providers/ollama/ollama.go b/internal/providers/ollama/ollama.go index d6b9f1f..ae147ae 100644 --- a/internal/providers/ollama/ollama.go +++ b/internal/providers/ollama/ollama.go @@ -1,20 +1,15 @@ package ollama import ( - "bufio" - "bytes" "context" "database/sql" "encoding/json" "errors" "fmt" - "io" "net/http" "net/url" "os" - "path/filepath" "regexp" - "runtime" "sort" "strconv" "strings" @@ -1164,89 +1159,6 @@ func resolveCloudBaseURL(acct core.AccountConfig) string { return normalize(defaultCloudBaseURL) } -func resolveDesktopDBPath(acct core.AccountConfig) string { - if acct.ExtraData != nil { - for _, key := range []string{"db_path", "app_db"} { - if v := strings.TrimSpace(acct.ExtraData[key]); v != "" { - return v - } - } - } - - home, err := os.UserHomeDir() - if err != nil { - return "" - } - - switch runtime.GOOS { - case "darwin": - return filepath.Join(home, "Library", "Application Support", "Ollama", "db.sqlite") - case "linux": - candidates := []string{ - filepath.Join(home, ".local", "share", "Ollama", "db.sqlite"), - filepath.Join(home, ".config", "Ollama", "db.sqlite"), - } - for _, c := range candidates { - if fileExists(c) { - return c - } - } - return candidates[0] - case "windows": - appData := os.Getenv("APPDATA") - if appData != "" { - return filepath.Join(appData, "Ollama", "db.sqlite") - } - return filepath.Join(home, "AppData", "Roaming", "Ollama", "db.sqlite") - default: - return filepath.Join(home, ".ollama", "db.sqlite") - } -} - -func resolveServerConfigPath(acct core.AccountConfig) string { - if acct.ExtraData != nil { - if v := strings.TrimSpace(acct.ExtraData["server_config"]); v != "" { - return v - } - if configDir := strings.TrimSpace(acct.ExtraData["config_dir"]); configDir != "" { - return filepath.Join(configDir, "server.json") - } - } - - home, err := os.UserHomeDir() - if err != nil { - return "" - } - return filepath.Join(home, ".ollama", "server.json") -} - -func resolveServerLogFiles(acct core.AccountConfig) []string { - logDir := "" - if acct.ExtraData != nil { - logDir = strings.TrimSpace(acct.ExtraData["logs_dir"]) - if logDir == "" { - if configDir := strings.TrimSpace(acct.ExtraData["config_dir"]); configDir != "" { - logDir = filepath.Join(configDir, "logs") - } - } - } - if logDir == "" { - home, err := os.UserHomeDir() - if err != nil { - return nil - } - logDir = filepath.Join(home, ".ollama", "logs") - } - - pattern := filepath.Join(logDir, "server*.log") - files, err := filepath.Glob(pattern) - if err != nil { - return nil - } - sort.Strings(files) - return files -} - func queryCount(ctx context.Context, db *sql.DB, query string) (int64, error) { var count int64 if err := db.QueryRowContext(ctx, query).Scan(&count); err != nil { @@ -2055,506 +1967,6 @@ func populateDailySeriesFromDB(ctx context.Context, db *sql.DB, snap *core.Usage return nil } -func parseLogFile(path string, onEvent func(ginLogEvent)) error { - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - const maxLogLine = 1024 * 1024 - buf := make([]byte, 0, 64*1024) - scanner.Buffer(buf, maxLogLine) - - for scanner.Scan() { - line := scanner.Text() - event, ok := parseGINLogLine(line) - if !ok { - continue - } - onEvent(event) - } - if err := scanner.Err(); err != nil { - return err - } - return nil -} - -func parseGINLogLine(line string) (ginLogEvent, bool) { - line = strings.TrimSpace(line) - if !strings.HasPrefix(line, "[GIN]") { - return ginLogEvent{}, false - } - - parts := strings.Split(line, "|") - if len(parts) < 5 { - return ginLogEvent{}, false - } - - left := strings.TrimSpace(strings.TrimPrefix(parts[0], "[GIN]")) - leftParts := strings.Split(left, " - ") - if len(leftParts) != 2 { - return ginLogEvent{}, false - } - - timestamp, err := time.ParseInLocation("2006/01/02 15:04:05", strings.TrimSpace(leftParts[0])+" "+strings.TrimSpace(leftParts[1]), time.Local) - if err != nil { - return ginLogEvent{}, false - } - - status, err := strconv.Atoi(strings.TrimSpace(parts[1])) - if err != nil { - return ginLogEvent{}, false - } - - durationText := strings.TrimSpace(parts[2]) - durationText = strings.ReplaceAll(durationText, "µ", "u") - duration, err := time.ParseDuration(durationText) - if err != nil { - return ginLogEvent{}, false - } - - methodPath := strings.TrimSpace(parts[4]) - methodPathParts := strings.Fields(methodPath) - if len(methodPathParts) < 2 { - return ginLogEvent{}, false - } - - method := strings.TrimSpace(methodPathParts[0]) - path := strings.Trim(strings.TrimSpace(methodPathParts[1]), `"`) - if method == "" || path == "" { - return ginLogEvent{}, false - } - - return ginLogEvent{ - Timestamp: timestamp, - Status: status, - Duration: duration, - Method: method, - Path: path, - }, true -} - -func isInferencePath(path string) bool { - switch path { - case "/api/chat", "/api/generate", "/api/embed", "/api/embeddings", - "/v1/chat/completions", "/v1/completions", "/v1/responses", "/v1/embeddings", "/v1/messages": - return true - default: - return false - } -} - -func doJSONRequest(ctx context.Context, method, url, apiKey string, out any, client *http.Client) (int, http.Header, error) { - req, err := http.NewRequestWithContext(ctx, method, url, nil) - if err != nil { - return 0, nil, err - } - if apiKey != "" { - req.Header.Set("Authorization", "Bearer "+apiKey) - } - req.Header.Set("Accept", "application/json") - - resp, err := client.Do(req) - if err != nil { - return 0, nil, err - } - defer resp.Body.Close() - - if out == nil { - io.Copy(io.Discard, resp.Body) - return resp.StatusCode, resp.Header, nil - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return resp.StatusCode, resp.Header, err - } - if len(body) == 0 { - return resp.StatusCode, resp.Header, nil - } - if err := json.Unmarshal(body, out); err != nil { - return resp.StatusCode, resp.Header, err - } - return resp.StatusCode, resp.Header, nil -} - -func doJSONPostRequest(ctx context.Context, url string, body any, out any, client *http.Client) (int, error) { - payload, err := json.Marshal(body) - if err != nil { - return 0, err - } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload)) - if err != nil { - return 0, err - } - req.Header.Set("Accept", "application/json") - req.Header.Set("Content-Type", "application/json") - - resp, err := client.Do(req) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if out == nil { - io.Copy(io.Discard, resp.Body) - return resp.StatusCode, nil - } - - respBody, err := io.ReadAll(resp.Body) - if err != nil { - return resp.StatusCode, err - } - if len(respBody) == 0 { - return resp.StatusCode, nil - } - if err := json.Unmarshal(respBody, out); err != nil { - return resp.StatusCode, err - } - return resp.StatusCode, nil -} - -func sanitizeMetricPart(input string) string { - s := strings.ToLower(strings.TrimSpace(input)) - s = nonAlnumRe.ReplaceAllString(s, "_") - s = strings.Trim(s, "_") - if s == "" { - return "unknown" - } - return s -} - -func normalizeModelName(input string) string { - s := strings.TrimSpace(strings.ToLower(input)) - if s == "" { - return "" - } - s = strings.Trim(strings.TrimPrefix(s, "models/"), "/") - if strings.HasPrefix(s, "ollama.com/") { - s = strings.TrimPrefix(s, "ollama.com/") - } - if i := strings.LastIndex(s, "/"); i >= 0 { - s = s[i+1:] - } - s = strings.TrimSpace(strings.TrimSuffix(s, ":latest")) - return s -} - -func cloudEndpointURL(base, path string) string { - base = strings.TrimRight(strings.TrimSpace(base), "/") - if base == "" { - base = defaultCloudBaseURL - } - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return base + path -} - -func resolveCloudSessionCookie(acct core.AccountConfig) string { - if acct.ExtraData != nil { - for _, key := range []string{"cloud_session_cookie", "session_cookie", "cookie"} { - if v := strings.TrimSpace(acct.ExtraData[key]); v != "" { - return v - } - } - } - if v := strings.TrimSpace(os.Getenv("OLLAMA_SESSION_COOKIE")); v != "" { - return v - } - return "" -} - -func fetchCloudUsageFromSettingsPage(ctx context.Context, cloudBaseURL, apiKey string, acct core.AccountConfig, snap *core.UsageSnapshot, client *http.Client) (bool, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, cloudEndpointURL(cloudBaseURL, "/settings"), nil) - if err != nil { - return false, fmt.Errorf("ollama: creating settings request: %w", err) - } - req.Header.Set("Accept", "text/html,application/xhtml+xml") - if apiKey != "" { - req.Header.Set("Authorization", "Bearer "+apiKey) - } - if cookie := resolveCloudSessionCookie(acct); cookie != "" { - req.Header.Set("Cookie", cookie) - } - - resp, err := client.Do(req) - if err != nil { - return false, fmt.Errorf("ollama: cloud settings request failed: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden { - return false, nil - } - if resp.StatusCode != http.StatusOK { - return false, fmt.Errorf("ollama: cloud settings endpoint returned HTTP %d", resp.StatusCode) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return false, fmt.Errorf("ollama: reading cloud settings response: %w", err) - } - - pcts := make(map[string]float64) - for _, m := range settingsUsageRe.FindAllStringSubmatch(string(body), -1) { - if len(m) < 3 { - continue - } - label := strings.ToLower(strings.TrimSpace(m[1])) - v, convErr := strconv.ParseFloat(strings.TrimSpace(m[2]), 64) - if convErr != nil { - continue - } - pcts[label] = v - } - - resets := make(map[string]time.Time) - for _, m := range settingsResetRe.FindAllStringSubmatch(string(body), -1) { - if len(m) < 3 { - continue - } - label := strings.ToLower(strings.TrimSpace(m[1])) - t, ok := parseAnyTime(strings.TrimSpace(m[2])) - if !ok { - continue - } - resets[label] = t - } - - found := false - if v, ok := pcts["session usage"]; ok { - snap.Metrics["usage_five_hour"] = core.Metric{ - Used: core.Float64Ptr(v), - Unit: "%", - Window: "5h", - } - if t, ok := resets["session usage"]; ok { - snap.Resets["usage_five_hour"] = t - snap.SetAttribute("block_end", t.Format(time.RFC3339)) - snap.SetAttribute("block_start", t.Add(-5*time.Hour).Format(time.RFC3339)) - } - found = true - } - if v, ok := pcts["weekly usage"]; ok { - weekly := core.Metric{ - Used: core.Float64Ptr(v), - Unit: "%", - Window: "1w", - } - snap.Metrics["usage_weekly"] = weekly - // Backward-compatible alias for existing widgets/config. - snap.Metrics["usage_one_day"] = core.Metric{ - Used: core.Float64Ptr(v), - Unit: "%", - Window: "1d", - } - if t, ok := resets["weekly usage"]; ok { - snap.Resets["usage_weekly"] = t - snap.Resets["usage_one_day"] = t - } - found = true - } - - return found, nil -} - -func setValueMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { - snap.Metrics[key] = core.Metric{ - Used: core.Float64Ptr(value), - Remaining: core.Float64Ptr(value), - Unit: unit, - Window: window, - } -} - -func fileExists(path string) bool { - info, err := os.Stat(path) - return err == nil && !info.IsDir() -} - -func summarizeModels(models []tagModel, limit int) string { - if len(models) == 0 || limit <= 0 { - return "" - } - out := make([]string, 0, limit) - for i := 0; i < len(models) && i < limit; i++ { - name := normalizeModelName(models[i].Name) - if name == "" { - name = normalizeModelName(models[i].Model) - } - if name == "" { - continue - } - out = append(out, name) - } - return strings.Join(out, ", ") -} - -func normalizeHeaderKey(k string) string { - return strings.ReplaceAll(strings.ToLower(strings.TrimSpace(k)), "-", "_") -} - -func isCloudModel(model tagModel) bool { - name := strings.ToLower(strings.TrimSpace(model.Name)) - mdl := strings.ToLower(strings.TrimSpace(model.Model)) - if strings.HasSuffix(name, ":cloud") || strings.HasSuffix(mdl, ":cloud") { - return true - } - if strings.TrimSpace(model.RemoteHost) != "" || strings.TrimSpace(model.RemoteModel) != "" { - return true - } - return false -} - -func anyValueCaseInsensitive(m map[string]any, keys ...string) (any, bool) { - if len(m) == 0 { - return nil, false - } - want := make(map[string]struct{}, len(keys)) - for _, key := range keys { - norm := normalizeLookupKey(key) - if norm == "" { - continue - } - want[norm] = struct{}{} - } - for k, v := range m { - if _, ok := want[normalizeLookupKey(k)]; ok { - return v, true - } - } - return nil, false -} - -func anyStringCaseInsensitive(m map[string]any, keys ...string) string { - v, ok := anyValueCaseInsensitive(m, keys...) - if !ok { - return "" - } - switch val := v.(type) { - case string: - return strings.TrimSpace(val) - case fmt.Stringer: - return strings.TrimSpace(val.String()) - default: - return "" - } -} - -func anyMapCaseInsensitive(m map[string]any, keys ...string) map[string]any { - v, ok := anyValueCaseInsensitive(m, keys...) - if !ok { - return nil - } - out, _ := v.(map[string]any) - return out -} - -func anyBoolCaseInsensitive(m map[string]any, keys ...string) (bool, bool) { - v, ok := anyValueCaseInsensitive(m, keys...) - if !ok { - return false, false - } - switch val := v.(type) { - case bool: - return val, true - case string: - b, err := strconv.ParseBool(strings.TrimSpace(val)) - if err == nil { - return b, true - } - } - return false, false -} - -func anyFloatCaseInsensitive(m map[string]any, keys ...string) (float64, bool) { - v, ok := anyValueCaseInsensitive(m, keys...) - if !ok { - return 0, false - } - return anyFloat(v) -} - -func anyFloat(v any) (float64, bool) { - switch val := v.(type) { - case float64: - return val, true - case float32: - return float64(val), true - case int: - return float64(val), true - case int64: - return float64(val), true - case int32: - return float64(val), true - case uint: - return float64(val), true - case uint64: - return float64(val), true - case uint32: - return float64(val), true - case json.Number: - f, err := val.Float64() - if err == nil { - return f, true - } - case string: - s := strings.TrimSpace(strings.TrimSuffix(val, "%")) - f, err := strconv.ParseFloat(s, 64) - if err == nil { - return f, true - } - } - return 0, false -} - -func anyNullStringCaseInsensitive(m map[string]any, keys ...string) string { - raw := anyMapCaseInsensitive(m, keys...) - if len(raw) == 0 { - return "" - } - valid, ok := anyBoolCaseInsensitive(raw, "valid") - if ok && !valid { - return "" - } - return anyStringCaseInsensitive(raw, "string", "value") -} - -func anyNullTimeCaseInsensitive(m map[string]any, keys ...string) (time.Time, bool) { - raw := anyMapCaseInsensitive(m, keys...) - if len(raw) == 0 { - return time.Time{}, false - } - valid, ok := anyBoolCaseInsensitive(raw, "valid") - if ok && !valid { - return time.Time{}, false - } - timeRaw := anyStringCaseInsensitive(raw, "time", "value") - if timeRaw == "" { - return time.Time{}, false - } - return parseAnyTime(timeRaw) -} - -func normalizeLookupKey(s string) string { - s = strings.TrimSpace(strings.ToLower(s)) - s = strings.ReplaceAll(s, "_", "") - s = strings.ReplaceAll(s, "-", "") - s = strings.ReplaceAll(s, ".", "") - return s -} - -func parseAnyTime(raw string) (time.Time, bool) { - t, err := shared.ParseTimestampString(raw) - if err != nil { - return time.Time{}, false - } - return t, true -} - type versionResponse struct { Version string `json:"version"` } diff --git a/internal/providers/ollama/request_helpers.go b/internal/providers/ollama/request_helpers.go new file mode 100644 index 0000000..2bcba31 --- /dev/null +++ b/internal/providers/ollama/request_helpers.go @@ -0,0 +1,389 @@ +package ollama + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +func doJSONRequest(ctx context.Context, method, url, apiKey string, out any, client *http.Client) (int, http.Header, error) { + req, err := http.NewRequestWithContext(ctx, method, url, nil) + if err != nil { + return 0, nil, err + } + if apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + req.Header.Set("Accept", "application/json") + + resp, err := client.Do(req) + if err != nil { + return 0, nil, err + } + defer resp.Body.Close() + + if out == nil { + _, _ = io.Copy(io.Discard, resp.Body) + return resp.StatusCode, resp.Header, nil + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return resp.StatusCode, resp.Header, err + } + if len(body) == 0 { + return resp.StatusCode, resp.Header, nil + } + if err := json.Unmarshal(body, out); err != nil { + return resp.StatusCode, resp.Header, err + } + return resp.StatusCode, resp.Header, nil +} + +func doJSONPostRequest(ctx context.Context, url string, body any, out any, client *http.Client) (int, error) { + payload, err := json.Marshal(body) + if err != nil { + return 0, err + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload)) + if err != nil { + return 0, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if out == nil { + _, _ = io.Copy(io.Discard, resp.Body) + return resp.StatusCode, nil + } + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return resp.StatusCode, err + } + if len(respBody) == 0 { + return resp.StatusCode, nil + } + if err := json.Unmarshal(respBody, out); err != nil { + return resp.StatusCode, err + } + return resp.StatusCode, nil +} + +func sanitizeMetricPart(input string) string { + s := strings.ToLower(strings.TrimSpace(input)) + s = nonAlnumRe.ReplaceAllString(s, "_") + s = strings.Trim(s, "_") + if s == "" { + return "unknown" + } + return s +} + +func normalizeModelName(input string) string { + s := strings.TrimSpace(strings.ToLower(input)) + if s == "" { + return "" + } + s = strings.Trim(strings.TrimPrefix(s, "models/"), "/") + if strings.HasPrefix(s, "ollama.com/") { + s = strings.TrimPrefix(s, "ollama.com/") + } + if i := strings.LastIndex(s, "/"); i >= 0 { + s = s[i+1:] + } + return strings.TrimSpace(strings.TrimSuffix(s, ":latest")) +} + +func cloudEndpointURL(base, path string) string { + base = strings.TrimRight(strings.TrimSpace(base), "/") + if base == "" { + base = defaultCloudBaseURL + } + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return base + path +} + +func resolveCloudSessionCookie(acct core.AccountConfig) string { + if acct.ExtraData != nil { + for _, key := range []string{"cloud_session_cookie", "session_cookie", "cookie"} { + if v := strings.TrimSpace(acct.ExtraData[key]); v != "" { + return v + } + } + } + return strings.TrimSpace(os.Getenv("OLLAMA_SESSION_COOKIE")) +} + +func fetchCloudUsageFromSettingsPage(ctx context.Context, cloudBaseURL, apiKey string, acct core.AccountConfig, snap *core.UsageSnapshot, client *http.Client) (bool, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, cloudEndpointURL(cloudBaseURL, "/settings"), nil) + if err != nil { + return false, fmt.Errorf("ollama: creating settings request: %w", err) + } + req.Header.Set("Accept", "text/html,application/xhtml+xml") + if apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + if cookie := resolveCloudSessionCookie(acct); cookie != "" { + req.Header.Set("Cookie", cookie) + } + + resp, err := client.Do(req) + if err != nil { + return false, fmt.Errorf("ollama: cloud settings request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden { + return false, nil + } + if resp.StatusCode != http.StatusOK { + return false, fmt.Errorf("ollama: cloud settings endpoint returned HTTP %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return false, fmt.Errorf("ollama: reading cloud settings response: %w", err) + } + + pcts := make(map[string]float64) + for _, match := range settingsUsageRe.FindAllStringSubmatch(string(body), -1) { + if len(match) < 3 { + continue + } + value, convErr := strconv.ParseFloat(strings.TrimSpace(match[2]), 64) + if convErr == nil { + pcts[strings.ToLower(strings.TrimSpace(match[1]))] = value + } + } + resets := make(map[string]time.Time) + for _, match := range settingsResetRe.FindAllStringSubmatch(string(body), -1) { + if len(match) < 3 { + continue + } + if t, ok := parseAnyTime(strings.TrimSpace(match[2])); ok { + resets[strings.ToLower(strings.TrimSpace(match[1]))] = t + } + } + + found := false + if value, ok := pcts["session usage"]; ok { + snap.Metrics["usage_five_hour"] = core.Metric{Used: core.Float64Ptr(value), Unit: "%", Window: "5h"} + if t, ok := resets["session usage"]; ok { + snap.Resets["usage_five_hour"] = t + snap.SetAttribute("block_end", t.Format(time.RFC3339)) + snap.SetAttribute("block_start", t.Add(-5*time.Hour).Format(time.RFC3339)) + } + found = true + } + if value, ok := pcts["weekly usage"]; ok { + snap.Metrics["usage_weekly"] = core.Metric{Used: core.Float64Ptr(value), Unit: "%", Window: "1w"} + snap.Metrics["usage_one_day"] = core.Metric{Used: core.Float64Ptr(value), Unit: "%", Window: "1d"} + if t, ok := resets["weekly usage"]; ok { + snap.Resets["usage_weekly"] = t + snap.Resets["usage_one_day"] = t + } + found = true + } + return found, nil +} + +func setValueMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { + snap.Metrics[key] = core.Metric{ + Used: core.Float64Ptr(value), + Remaining: core.Float64Ptr(value), + Unit: unit, + Window: window, + } +} + +func fileExists(path string) bool { + info, err := os.Stat(path) + return err == nil && !info.IsDir() +} + +func summarizeModels(models []tagModel, limit int) string { + if len(models) == 0 || limit <= 0 { + return "" + } + out := make([]string, 0, limit) + for i := 0; i < len(models) && i < limit; i++ { + name := normalizeModelName(models[i].Name) + if name == "" { + name = normalizeModelName(models[i].Model) + } + if name != "" { + out = append(out, name) + } + } + return strings.Join(out, ", ") +} + +func normalizeHeaderKey(k string) string { + return strings.ReplaceAll(strings.ToLower(strings.TrimSpace(k)), "-", "_") +} + +func isCloudModel(model tagModel) bool { + name := strings.ToLower(strings.TrimSpace(model.Name)) + mdl := strings.ToLower(strings.TrimSpace(model.Model)) + if strings.HasSuffix(name, ":cloud") || strings.HasSuffix(mdl, ":cloud") { + return true + } + return strings.TrimSpace(model.RemoteHost) != "" || strings.TrimSpace(model.RemoteModel) != "" +} + +func anyValueCaseInsensitive(m map[string]any, keys ...string) (any, bool) { + if len(m) == 0 { + return nil, false + } + want := make(map[string]struct{}, len(keys)) + for _, key := range keys { + if norm := normalizeLookupKey(key); norm != "" { + want[norm] = struct{}{} + } + } + for key, value := range m { + if _, ok := want[normalizeLookupKey(key)]; ok { + return value, true + } + } + return nil, false +} + +func anyStringCaseInsensitive(m map[string]any, keys ...string) string { + value, ok := anyValueCaseInsensitive(m, keys...) + if !ok { + return "" + } + switch val := value.(type) { + case string: + return strings.TrimSpace(val) + case fmt.Stringer: + return strings.TrimSpace(val.String()) + default: + return "" + } +} + +func anyMapCaseInsensitive(m map[string]any, keys ...string) map[string]any { + value, ok := anyValueCaseInsensitive(m, keys...) + if !ok { + return nil + } + out, _ := value.(map[string]any) + return out +} + +func anyBoolCaseInsensitive(m map[string]any, keys ...string) (bool, bool) { + value, ok := anyValueCaseInsensitive(m, keys...) + if !ok { + return false, false + } + switch val := value.(type) { + case bool: + return val, true + case string: + b, err := strconv.ParseBool(strings.TrimSpace(val)) + return b, err == nil + default: + return false, false + } +} + +func anyFloatCaseInsensitive(m map[string]any, keys ...string) (float64, bool) { + value, ok := anyValueCaseInsensitive(m, keys...) + if !ok { + return 0, false + } + return anyFloat(value) +} + +func anyFloat(v any) (float64, bool) { + switch val := v.(type) { + case float64: + return val, true + case float32: + return float64(val), true + case int: + return float64(val), true + case int64: + return float64(val), true + case int32: + return float64(val), true + case uint: + return float64(val), true + case uint64: + return float64(val), true + case uint32: + return float64(val), true + case json.Number: + f, err := val.Float64() + return f, err == nil + case string: + s := strings.TrimSpace(strings.TrimSuffix(val, "%")) + f, err := strconv.ParseFloat(s, 64) + return f, err == nil + default: + return 0, false + } +} + +func anyNullStringCaseInsensitive(m map[string]any, keys ...string) string { + raw := anyMapCaseInsensitive(m, keys...) + if len(raw) == 0 { + return "" + } + if valid, ok := anyBoolCaseInsensitive(raw, "valid"); ok && !valid { + return "" + } + return anyStringCaseInsensitive(raw, "string", "value") +} + +func anyNullTimeCaseInsensitive(m map[string]any, keys ...string) (time.Time, bool) { + raw := anyMapCaseInsensitive(m, keys...) + if len(raw) == 0 { + return time.Time{}, false + } + if valid, ok := anyBoolCaseInsensitive(raw, "valid"); ok && !valid { + return time.Time{}, false + } + timeRaw := anyStringCaseInsensitive(raw, "time", "value") + if timeRaw == "" { + return time.Time{}, false + } + return parseAnyTime(timeRaw) +} + +func normalizeLookupKey(s string) string { + s = strings.TrimSpace(strings.ToLower(s)) + s = strings.ReplaceAll(s, "_", "") + s = strings.ReplaceAll(s, "-", "") + s = strings.ReplaceAll(s, ".", "") + return s +} + +func parseAnyTime(raw string) (time.Time, bool) { + t, err := shared.ParseTimestampString(raw) + if err != nil { + return time.Time{}, false + } + return t, true +} diff --git a/internal/providers/ollama/server_log_parse.go b/internal/providers/ollama/server_log_parse.go new file mode 100644 index 0000000..ae33abd --- /dev/null +++ b/internal/providers/ollama/server_log_parse.go @@ -0,0 +1,100 @@ +package ollama + +import ( + "bufio" + "os" + "strconv" + "strings" + "time" +) + +func parseLogFile(path string, onEvent func(ginLogEvent)) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + const maxLogLine = 1024 * 1024 + buf := make([]byte, 0, 64*1024) + scanner.Buffer(buf, maxLogLine) + + for scanner.Scan() { + line := scanner.Text() + event, ok := parseGINLogLine(line) + if !ok { + continue + } + onEvent(event) + } + if err := scanner.Err(); err != nil { + return err + } + return nil +} + +func parseGINLogLine(line string) (ginLogEvent, bool) { + line = strings.TrimSpace(line) + if !strings.HasPrefix(line, "[GIN]") { + return ginLogEvent{}, false + } + + parts := strings.Split(line, "|") + if len(parts) < 5 { + return ginLogEvent{}, false + } + + left := strings.TrimSpace(strings.TrimPrefix(parts[0], "[GIN]")) + leftParts := strings.Split(left, " - ") + if len(leftParts) != 2 { + return ginLogEvent{}, false + } + + timestamp, err := time.ParseInLocation("2006/01/02 15:04:05", strings.TrimSpace(leftParts[0])+" "+strings.TrimSpace(leftParts[1]), time.Local) + if err != nil { + return ginLogEvent{}, false + } + + status, err := strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return ginLogEvent{}, false + } + + durationText := strings.TrimSpace(parts[2]) + durationText = strings.ReplaceAll(durationText, "µ", "u") + duration, err := time.ParseDuration(durationText) + if err != nil { + return ginLogEvent{}, false + } + + methodPath := strings.TrimSpace(parts[4]) + methodPathParts := strings.Fields(methodPath) + if len(methodPathParts) < 2 { + return ginLogEvent{}, false + } + + method := strings.TrimSpace(methodPathParts[0]) + path := strings.Trim(strings.TrimSpace(methodPathParts[1]), `"`) + if method == "" || path == "" { + return ginLogEvent{}, false + } + + return ginLogEvent{ + Timestamp: timestamp, + Status: status, + Duration: duration, + Method: method, + Path: path, + }, true +} + +func isInferencePath(path string) bool { + switch path { + case "/api/chat", "/api/generate", "/api/embed", "/api/embeddings", + "/v1/chat/completions", "/v1/completions", "/v1/responses", "/v1/embeddings", "/v1/messages": + return true + default: + return false + } +} diff --git a/internal/providers/opencode/telemetry.go b/internal/providers/opencode/telemetry.go index 4a8b872..8958d4d 100644 --- a/internal/providers/opencode/telemetry.go +++ b/internal/providers/opencode/telemetry.go @@ -1,17 +1,11 @@ package opencode import ( - "bufio" "context" - "database/sql" "encoding/json" - "fmt" "os" "path/filepath" "strings" - "time" - - _ "github.com/mattn/go-sqlite3" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/shared" @@ -181,1475 +175,3 @@ func (p *Provider) ParseHookPayload(raw []byte, opts shared.TelemetryCollectOpti } return events, nil } - -// ParseTelemetryEventFile parses OpenCode event jsonl/ndjson files. -func ParseTelemetryEventFile(path string) ([]shared.TelemetryEvent, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var out []shared.TelemetryEvent - scanner := bufio.NewScanner(f) - scanner.Buffer(make([]byte, 0, 512*1024), 8*1024*1024) - lineNumber := 0 - - for scanner.Scan() { - lineNumber++ - var ev eventEnvelope - if err := json.Unmarshal(scanner.Bytes(), &ev); err != nil { - continue - } - - typ := strings.TrimSpace(ev.Type) - if typ == "" { - typ = strings.TrimSpace(ev.Event) - } - switch typ { - case "message.updated": - var props messageUpdatedProps - if err := json.Unmarshal(ev.Properties, &props); err != nil { - continue - } - info := props.Info - if strings.ToLower(strings.TrimSpace(info.Role)) != "assistant" { - continue - } - - messageID := strings.TrimSpace(info.ID) - if messageID == "" { - messageID = fmt.Sprintf("%s:%d", path, lineNumber) - } - total := info.Tokens.Input + info.Tokens.Output + info.Tokens.Reasoning + info.Tokens.Cache.Read + info.Tokens.Cache.Write - occurred := shared.UnixAuto(info.Time.Created) - if info.Time.Completed > 0 { - occurred = shared.UnixAuto(info.Time.Completed) - } - - providerID := strings.TrimSpace(info.ProviderID) - if providerID == "" { - providerID = "opencode" - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetryEventSchema, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurred, - AccountID: "", - WorkspaceID: shared.SanitizeWorkspace(info.Path.CWD), - SessionID: strings.TrimSpace(info.SessionID), - TurnID: strings.TrimSpace(info.ParentID), - MessageID: messageID, - ProviderID: providerID, - AgentName: "opencode", - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: strings.TrimSpace(info.ModelID), - TokenUsage: core.TokenUsage{ - InputTokens: core.Int64Ptr(info.Tokens.Input), - OutputTokens: core.Int64Ptr(info.Tokens.Output), - ReasoningTokens: core.Int64Ptr(info.Tokens.Reasoning), - CacheReadTokens: core.Int64Ptr(info.Tokens.Cache.Read), - CacheWriteTokens: core.Int64Ptr(info.Tokens.Cache.Write), - TotalTokens: core.Int64Ptr(total), - CostUSD: core.Float64Ptr(info.Cost), - }, - Status: shared.TelemetryStatusOK, - Payload: map[string]any{ - "file": path, - "line": lineNumber, - }, - }) - - case "tool.execute.after": - if len(ev.Payload) == 0 { - continue - } - var tool toolPayload - if err := json.Unmarshal(ev.Payload, &tool); err != nil { - continue - } - toolID := strings.TrimSpace(tool.ToolCallID) - if toolID == "" { - toolID = fmt.Sprintf("%s:%d", path, lineNumber) - } - - name := strings.TrimSpace(tool.ToolName) - if name == "" { - name = strings.TrimSpace(tool.Name) - } - if name == "" { - name = "unknown" - } - occurred := time.Now().UTC() - if tool.Timestamp > 0 { - occurred = shared.UnixAuto(tool.Timestamp) - } - - // Extract tool's target file path from raw payload for language inference. - toolFilePath := "" - var rawPayloadMap map[string]any - if json.Unmarshal(ev.Payload, &rawPayloadMap) == nil { - if paths := shared.ExtractFilePathsFromPayload(rawPayloadMap); len(paths) > 0 { - toolFilePath = paths[0] - } - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetryEventSchema, - Channel: shared.TelemetryChannelJSONL, - OccurredAt: occurred, - AccountID: "", - SessionID: strings.TrimSpace(tool.SessionID), - MessageID: strings.TrimSpace(tool.MessageID), - ToolCallID: toolID, - ProviderID: "opencode", - AgentName: "opencode", - EventType: shared.TelemetryEventTypeToolUsage, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - ToolName: strings.ToLower(name), - Status: shared.TelemetryStatusOK, - Payload: map[string]any{ - "source_file": path, - "line": lineNumber, - "file": toolFilePath, - }, - }) - } - } - if err := scanner.Err(); err != nil { - return out, err - } - return out, nil -} - -// CollectTelemetryFromSQLite parses OpenCode SQLite data (message + part tables). -func CollectTelemetryFromSQLite(ctx context.Context, dbPath string) ([]shared.TelemetryEvent, error) { - if strings.TrimSpace(dbPath) == "" { - return nil, nil - } - if _, err := os.Stat(dbPath); err != nil { - return nil, nil - } - - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return nil, err - } - defer db.Close() - - if !sqliteTableExists(ctx, db, "message") { - return nil, nil - } - - partSummaryByMessage := make(map[string]partSummary) - hasPartTable := sqliteTableExists(ctx, db, "part") - if hasPartTable { - partSummaryByMessage, _ = collectPartSummary(ctx, db) - } - - var out []shared.TelemetryEvent - seenMessages := map[string]bool{} - - if hasPartTable { - stepRows, err := db.QueryContext(ctx, ` - SELECT p.id, p.message_id, p.session_id, p.time_created, p.time_updated, p.data, COALESCE(m.data, '{}'), COALESCE(s.directory, '') - FROM part p - LEFT JOIN message m ON m.id = p.message_id - LEFT JOIN session s ON s.id = p.session_id - WHERE COALESCE(json_extract(p.data, '$.type'), '') = 'step-finish' - ORDER BY p.time_updated ASC - `) - if err == nil { - for stepRows.Next() { - if ctx.Err() != nil { - _ = stepRows.Close() - return out, ctx.Err() - } - - var ( - partID string - messageIDDB string - sessionIDDB string - timeCreated int64 - timeUpdated int64 - partJSON string - messageJSON string - sessionDir string - ) - if err := stepRows.Scan(&partID, &messageIDDB, &sessionIDDB, &timeCreated, &timeUpdated, &partJSON, &messageJSON, &sessionDir); err != nil { - continue - } - - partPayload := decodeJSONMap([]byte(partJSON)) - messagePayload := decodeJSONMap([]byte(messageJSON)) - - u := extractUsage(partPayload) - if !hasUsage(u) { - continue - } - - messageID := core.FirstNonEmpty(strings.TrimSpace(messageIDDB), shared.FirstPathString(messagePayload, []string{"id"}), shared.FirstPathString(messagePayload, []string{"messageID"})) - if messageID == "" || seenMessages[messageID] { - continue - } - - sessionID := core.FirstNonEmpty(strings.TrimSpace(sessionIDDB), shared.FirstPathString(messagePayload, []string{"sessionID"})) - turnID := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"parentID"}), shared.FirstPathString(messagePayload, []string{"turnID"})) - providerID := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"providerID"}), shared.FirstPathString(messagePayload, []string{"model", "providerID"}), "opencode") - modelRaw := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"modelID"}), shared.FirstPathString(messagePayload, []string{"model", "modelID"})) - upstreamProvider := extractUpstreamProviderFromMaps(partPayload, messagePayload) - - occurredAt := shared.UnixAuto(timeUpdated) - if timeCreated > 0 { - occurredAt = shared.UnixAuto(timeCreated) - } - - eventStatus := mapMessageStatus(shared.FirstPathString(partPayload, []string{"reason"})) - - contextSummary := map[string]any{} - if summary, ok := partSummaryByMessage[messageID]; ok { - partsByType := make(map[string]any, len(summary.PartsByType)) - for partType, count := range summary.PartsByType { - partsByType[partType] = count - } - contextSummary = map[string]any{ - "parts_total": summary.PartsTotal, - "parts_by_type": partsByType, - } - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySQLiteSchema, - Channel: shared.TelemetryChannelSQLite, - OccurredAt: occurredAt, - AccountID: "", - WorkspaceID: shared.SanitizeWorkspace(core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"path", "cwd"}), shared.FirstPathString(messagePayload, []string{"path", "root"}), strings.TrimSpace(sessionDir))), - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ProviderID: providerID, - AgentName: core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"agent"}), "opencode"), - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: modelRaw, - TokenUsage: core.TokenUsage{ - InputTokens: u.InputTokens, - OutputTokens: u.OutputTokens, - ReasoningTokens: u.ReasoningTokens, - CacheReadTokens: u.CacheReadTokens, - CacheWriteTokens: u.CacheWriteTokens, - TotalTokens: u.TotalTokens, - CostUSD: u.CostUSD, - Requests: core.Int64Ptr(1), - }, - Status: eventStatus, - Payload: map[string]any{ - "source": map[string]any{ - "db_path": dbPath, - "table": "part", - "type": "step-finish", - }, - "db": map[string]any{ - "part_id": strings.TrimSpace(partID), - "message_id": strings.TrimSpace(messageIDDB), - "session_id": strings.TrimSpace(sessionIDDB), - "time_created": timeCreated, - "time_updated": timeUpdated, - }, - "message": map[string]any{ - "provider_id": providerID, - "model_id": modelRaw, - "mode": shared.FirstPathString(messagePayload, []string{"mode"}), - "finish": shared.FirstPathString(messagePayload, []string{"finish"}), - }, - "step": map[string]any{ - "type": shared.FirstPathString(partPayload, []string{"type"}), - "reason": shared.FirstPathString(partPayload, []string{"reason"}), - }, - "upstream_provider": upstreamProvider, - "context": contextSummary, - }, - }) - seenMessages[messageID] = true - } - _ = stepRows.Close() - } - } - - messageRows, err := db.QueryContext(ctx, ` - SELECT m.id, m.session_id, m.time_created, m.time_updated, m.data, COALESCE(s.directory, '') - FROM message m - LEFT JOIN session s ON s.id = m.session_id - ORDER BY m.time_updated ASC - `) - if err == nil { - for messageRows.Next() { - if ctx.Err() != nil { - _ = messageRows.Close() - return out, ctx.Err() - } - - var ( - messageIDRaw string - sessionIDRaw string - timeCreated int64 - timeUpdated int64 - messageJSON string - sessionDir string - ) - if err := messageRows.Scan(&messageIDRaw, &sessionIDRaw, &timeCreated, &timeUpdated, &messageJSON, &sessionDir); err != nil { - continue - } - payload := decodeJSONMap([]byte(messageJSON)) - if strings.ToLower(shared.FirstPathString(payload, []string{"role"})) != "assistant" { - continue - } - - u := extractUsage(payload) - completedAt := ptrInt64FromFloat(shared.FirstPathNumber(payload, []string{"time", "completed"})) - createdAt := ptrInt64FromFloat(shared.FirstPathNumber(payload, []string{"time", "created"})) - if !hasUsage(u) && completedAt <= 0 { - continue - } - - messageID := core.FirstNonEmpty(strings.TrimSpace(messageIDRaw), shared.FirstPathString(payload, []string{"id"}), shared.FirstPathString(payload, []string{"messageID"})) - if messageID == "" || seenMessages[messageID] { - continue - } - - if !hasUsage(u) { - continue - } - - providerID := core.FirstNonEmpty(shared.FirstPathString(payload, []string{"providerID"}), shared.FirstPathString(payload, []string{"model", "providerID"}), "opencode") - modelRaw := core.FirstNonEmpty(shared.FirstPathString(payload, []string{"modelID"}), shared.FirstPathString(payload, []string{"model", "modelID"})) - upstreamProvider := extractUpstreamProviderFromMaps(payload) - sessionID := core.FirstNonEmpty(strings.TrimSpace(sessionIDRaw), shared.FirstPathString(payload, []string{"sessionID"})) - turnID := core.FirstNonEmpty(shared.FirstPathString(payload, []string{"parentID"}), shared.FirstPathString(payload, []string{"turnID"})) - - occurredAt := shared.UnixAuto(timeUpdated) - switch { - case completedAt > 0: - occurredAt = shared.UnixAuto(completedAt) - case createdAt > 0: - occurredAt = shared.UnixAuto(createdAt) - case timeCreated > 0: - occurredAt = shared.UnixAuto(timeCreated) - } - - eventStatus := shared.TelemetryStatusOK - finish := strings.ToLower(shared.FirstPathString(payload, []string{"finish"})) - if strings.Contains(finish, "error") || strings.Contains(finish, "fail") { - eventStatus = shared.TelemetryStatusError - } - if strings.Contains(finish, "abort") || strings.Contains(finish, "cancel") { - eventStatus = shared.TelemetryStatusAborted - } - - contextSummary := map[string]any{} - if summary, ok := partSummaryByMessage[messageID]; ok { - partsByType := make(map[string]any, len(summary.PartsByType)) - for partType, count := range summary.PartsByType { - partsByType[partType] = count - } - contextSummary = map[string]any{ - "parts_total": summary.PartsTotal, - "parts_by_type": partsByType, - } - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySQLiteSchema, - Channel: shared.TelemetryChannelSQLite, - OccurredAt: occurredAt, - AccountID: "", - WorkspaceID: shared.SanitizeWorkspace(core.FirstNonEmpty(shared.FirstPathString(payload, []string{"path", "cwd"}), shared.FirstPathString(payload, []string{"path", "root"}), strings.TrimSpace(sessionDir))), - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ProviderID: providerID, - AgentName: core.FirstNonEmpty(shared.FirstPathString(payload, []string{"agent"}), "opencode"), - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: modelRaw, - TokenUsage: core.TokenUsage{ - InputTokens: u.InputTokens, - OutputTokens: u.OutputTokens, - ReasoningTokens: u.ReasoningTokens, - CacheReadTokens: u.CacheReadTokens, - CacheWriteTokens: u.CacheWriteTokens, - TotalTokens: u.TotalTokens, - CostUSD: u.CostUSD, - Requests: core.Int64Ptr(1), - }, - Status: eventStatus, - Payload: map[string]any{ - "source": map[string]any{ - "db_path": dbPath, - "table": "message", - }, - "db": map[string]any{ - "message_id": strings.TrimSpace(messageIDRaw), - "session_id": strings.TrimSpace(sessionIDRaw), - "time_created": timeCreated, - "time_updated": timeUpdated, - }, - "message": map[string]any{ - "provider_id": providerID, - "model_id": modelRaw, - "role": shared.FirstPathString(payload, []string{"role"}), - "mode": shared.FirstPathString(payload, []string{"mode"}), - "finish": shared.FirstPathString(payload, []string{"finish"}), - "error_name": shared.FirstPathString(payload, []string{"error", "name"}), - }, - "upstream_provider": upstreamProvider, - "context": contextSummary, - }, - }) - seenMessages[messageID] = true - } - _ = messageRows.Close() - } - - if !hasPartTable { - return out, nil - } - - seenTools := map[string]bool{} - toolRows, err := db.QueryContext(ctx, ` - SELECT p.id, p.message_id, p.session_id, p.time_created, p.time_updated, p.data, COALESCE(m.data, '{}'), COALESCE(s.directory, '') - FROM part p - LEFT JOIN message m ON m.id = p.message_id - LEFT JOIN session s ON s.id = p.session_id - WHERE COALESCE(json_extract(p.data, '$.type'), '') = 'tool' - ORDER BY p.time_updated ASC - `) - if err != nil { - return out, nil - } - defer toolRows.Close() - - for toolRows.Next() { - if ctx.Err() != nil { - return out, ctx.Err() - } - var ( - partID string - messageIDDB string - sessionIDDB string - timeCreated int64 - timeUpdated int64 - partJSON string - messageJSON string - sessionDir string - ) - if err := toolRows.Scan(&partID, &messageIDDB, &sessionIDDB, &timeCreated, &timeUpdated, &partJSON, &messageJSON, &sessionDir); err != nil { - continue - } - - partPayload := decodeJSONMap([]byte(partJSON)) - messagePayload := decodeJSONMap([]byte(messageJSON)) - - toolCallID := core.FirstNonEmpty(shared.FirstPathString(partPayload, []string{"callID"}), shared.FirstPathString(partPayload, []string{"call_id"}), strings.TrimSpace(partID)) - if toolCallID == "" || seenTools[toolCallID] { - continue - } - - statusRaw := strings.ToLower(shared.FirstPathString(partPayload, []string{"state", "status"})) - status, include := mapToolStatus(statusRaw) - if !include { - continue - } - seenTools[toolCallID] = true - - toolName := strings.ToLower(core.FirstNonEmpty(shared.FirstPathString(partPayload, []string{"tool"}), shared.FirstPathString(partPayload, []string{"name"}), "unknown")) - sessionID := core.FirstNonEmpty(strings.TrimSpace(sessionIDDB), shared.FirstPathString(partPayload, []string{"sessionID"}), shared.FirstPathString(messagePayload, []string{"sessionID"})) - messageID := core.FirstNonEmpty(strings.TrimSpace(messageIDDB), shared.FirstPathString(partPayload, []string{"messageID"}), shared.FirstPathString(messagePayload, []string{"id"})) - providerID := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"providerID"}), shared.FirstPathString(messagePayload, []string{"model", "providerID"}), "opencode") - modelRaw := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"modelID"}), shared.FirstPathString(messagePayload, []string{"model", "modelID"})) - upstreamProvider := extractUpstreamProviderFromMaps(partPayload, messagePayload) - - occurredAt := shared.UnixAuto(timeUpdated) - if ts := ptrInt64FromFloat(shared.FirstPathNumber(partPayload, - []string{"state", "time", "end"}, - []string{"state", "time", "start"}, - []string{"time", "end"}, - []string{"time", "start"}, - )); ts > 0 { - occurredAt = shared.UnixAuto(ts) - } else if timeCreated > 0 { - occurredAt = shared.UnixAuto(timeCreated) - } - - // Extract tool's target file path from part payload for language inference. - toolFilePath := "" - if stateInput, ok := partPayload["state"].(map[string]any); ok { - if paths := shared.ExtractFilePathsFromPayload(stateInput); len(paths) > 0 { - toolFilePath = paths[0] - } - } - if toolFilePath == "" { - if paths := shared.ExtractFilePathsFromPayload(partPayload); len(paths) > 0 { - toolFilePath = paths[0] - } - } - - out = append(out, shared.TelemetryEvent{ - SchemaVersion: telemetrySQLiteSchema, - Channel: shared.TelemetryChannelSQLite, - OccurredAt: occurredAt, - AccountID: "", - WorkspaceID: shared.SanitizeWorkspace(core.FirstNonEmpty( - shared.FirstPathString(messagePayload, []string{"path", "cwd"}), - shared.FirstPathString(messagePayload, []string{"path", "root"}), - strings.TrimSpace(sessionDir), - )), - SessionID: sessionID, - MessageID: messageID, - ToolCallID: toolCallID, - ProviderID: providerID, - AgentName: core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"agent"}), "opencode"), - EventType: shared.TelemetryEventTypeToolUsage, - ModelRaw: modelRaw, - ToolName: toolName, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - Status: status, - Payload: map[string]any{ - "source": map[string]any{ - "db_path": dbPath, - "table": "part", - }, - "db": map[string]any{ - "part_id": strings.TrimSpace(partID), - "message_id": strings.TrimSpace(messageIDDB), - "session_id": strings.TrimSpace(sessionIDDB), - "time_created": timeCreated, - "time_updated": timeUpdated, - }, - "message": map[string]any{ - "provider_id": providerID, - "model_id": modelRaw, - "mode": shared.FirstPathString(messagePayload, []string{"mode"}), - }, - "upstream_provider": upstreamProvider, - "status": statusRaw, - "file": toolFilePath, - }, - }) - } - - return out, nil -} - -// ParseTelemetryHookPayload parses OpenCode plugin hook payloads. -func ParseTelemetryHookPayload(raw []byte) ([]shared.TelemetryEvent, error) { - trimmed := strings.TrimSpace(string(raw)) - if trimmed == "" { - return nil, nil - } - - var root map[string]json.RawMessage - if err := json.Unmarshal([]byte(trimmed), &root); err != nil { - return nil, fmt.Errorf("decode hook payload: %w", err) - } - rootPayload := decodeRawMessageMap(root) - - if eventRaw, ok := root["event"]; ok && len(eventRaw) > 0 { - return parseEventJSON(eventRaw, decodeJSONMap(eventRaw), true) - } - if hookRaw, ok := root["hook"]; ok { - var hook string - if err := json.Unmarshal(hookRaw, &hook); err != nil { - return nil, fmt.Errorf("decode hook name: %w", err) - } - switch strings.TrimSpace(hook) { - case "tool.execute.after": - return parseToolExecuteAfterHook(root, rootPayload) - case "chat.message": - return parseChatMessageHook(root, rootPayload) - default: - return []shared.TelemetryEvent{buildRawEnvelope(rootPayload, telemetryHookSchema, strings.TrimSpace(hook))}, nil - } - } - if _, ok := root["type"]; ok { - return parseEventJSON([]byte(trimmed), decodeJSONMap([]byte(trimmed)), true) - } - - return []shared.TelemetryEvent{buildRawEnvelope(rootPayload, telemetryHookSchema, "")}, nil -} - -func parseEventJSON(raw []byte, rawPayload map[string]any, includeUnknown bool) ([]shared.TelemetryEvent, error) { - var ev eventEnvelope - if err := json.Unmarshal(raw, &ev); err != nil { - return nil, fmt.Errorf("decode opencode event: %w", err) - } - - typ := strings.TrimSpace(ev.Type) - if typ == "" { - typ = strings.TrimSpace(ev.Event) - } - switch typ { - case "message.updated": - var props messageUpdatedProps - if err := json.Unmarshal(ev.Properties, &props); err != nil { - return nil, fmt.Errorf("decode message.updated properties: %w", err) - } - info := props.Info - if strings.ToLower(strings.TrimSpace(info.Role)) != "assistant" { - if includeUnknown { - return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, typ)}, nil - } - return nil, nil - } - messageID := strings.TrimSpace(info.ID) - if messageID == "" { - if includeUnknown { - return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, typ)}, nil - } - return nil, nil - } - providerID := core.FirstNonEmpty(strings.TrimSpace(info.ProviderID), "opencode") - occurredAt := shared.UnixAuto(info.Time.Created) - if info.Time.Completed > 0 { - occurredAt = shared.UnixAuto(info.Time.Completed) - } - totalTokens := info.Tokens.Input + info.Tokens.Output + info.Tokens.Reasoning + info.Tokens.Cache.Read + info.Tokens.Cache.Write - - return []shared.TelemetryEvent{{ - SchemaVersion: telemetryEventSchema, - Channel: shared.TelemetryChannelHook, - OccurredAt: occurredAt, - AccountID: "", - WorkspaceID: shared.SanitizeWorkspace(info.Path.CWD), - SessionID: strings.TrimSpace(info.SessionID), - TurnID: strings.TrimSpace(info.ParentID), - MessageID: messageID, - ProviderID: providerID, - AgentName: "opencode", - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: strings.TrimSpace(info.ModelID), - TokenUsage: core.TokenUsage{ - InputTokens: core.Int64Ptr(info.Tokens.Input), - OutputTokens: core.Int64Ptr(info.Tokens.Output), - ReasoningTokens: core.Int64Ptr(info.Tokens.Reasoning), - CacheReadTokens: core.Int64Ptr(info.Tokens.Cache.Read), - CacheWriteTokens: core.Int64Ptr(info.Tokens.Cache.Write), - TotalTokens: core.Int64Ptr(totalTokens), - CostUSD: core.Float64Ptr(info.Cost), - }, - Status: shared.TelemetryStatusOK, - Payload: mergePayload(rawPayload, map[string]any{ - "event_type": "message.updated", - }), - }}, nil - - case "tool.execute.after": - if len(ev.Payload) == 0 { - if includeUnknown { - return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, typ)}, nil - } - return nil, nil - } - var payload toolPayload - if err := json.Unmarshal(ev.Payload, &payload); err != nil { - return nil, fmt.Errorf("decode tool.execute.after payload: %w", err) - } - toolCallID := strings.TrimSpace(payload.ToolCallID) - if toolCallID == "" { - if includeUnknown { - return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, typ)}, nil - } - return nil, nil - } - toolName := strings.ToLower(core.FirstNonEmpty(strings.TrimSpace(payload.ToolName), strings.TrimSpace(payload.Name), "unknown")) - - return []shared.TelemetryEvent{{ - SchemaVersion: telemetryEventSchema, - Channel: shared.TelemetryChannelHook, - OccurredAt: hookTimestampOrNow(payload.Timestamp), - AccountID: "", - SessionID: strings.TrimSpace(payload.SessionID), - MessageID: strings.TrimSpace(payload.MessageID), - ToolCallID: toolCallID, - ProviderID: "opencode", - AgentName: "opencode", - EventType: shared.TelemetryEventTypeToolUsage, - ToolName: toolName, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - Status: shared.TelemetryStatusOK, - Payload: mergePayload(rawPayload, map[string]any{ - "event_type": "tool.execute.after", - }), - }}, nil - } - - if includeUnknown { - return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, typ)}, nil - } - return nil, nil -} - -func parseToolExecuteAfterHook(root map[string]json.RawMessage, rawPayload map[string]any) ([]shared.TelemetryEvent, error) { - var input hookToolExecuteAfterInput - if rawInput, ok := root["input"]; ok { - if err := json.Unmarshal(rawInput, &input); err != nil { - return nil, fmt.Errorf("decode tool.execute.after hook input: %w", err) - } - } - var output hookToolExecuteAfterOutput - if rawOutput, ok := root["output"]; ok { - _ = json.Unmarshal(rawOutput, &output) - } - - toolCallID := strings.TrimSpace(input.CallID) - if toolCallID == "" { - return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryHookSchema, "tool.execute.after")}, nil - } - toolName := strings.ToLower(core.FirstNonEmpty(strings.TrimSpace(input.Tool), "unknown")) - - return []shared.TelemetryEvent{{ - SchemaVersion: telemetryHookSchema, - Channel: shared.TelemetryChannelHook, - OccurredAt: parseHookTimestamp(root), - AccountID: "", - SessionID: strings.TrimSpace(input.SessionID), - ToolCallID: toolCallID, - ProviderID: "opencode", - AgentName: "opencode", - EventType: shared.TelemetryEventTypeToolUsage, - ToolName: toolName, - TokenUsage: core.TokenUsage{ - Requests: core.Int64Ptr(1), - }, - Status: shared.TelemetryStatusOK, - Payload: mergePayload(rawPayload, map[string]any{ - "hook": "tool.execute.after", - "title": strings.TrimSpace(output.Title), - }), - }}, nil -} - -func parseChatMessageHook(root map[string]json.RawMessage, rawPayload map[string]any) ([]shared.TelemetryEvent, error) { - var input hookChatMessageInput - if rawInput, ok := root["input"]; ok { - if err := json.Unmarshal(rawInput, &input); err != nil { - return nil, fmt.Errorf("decode chat.message hook input: %w", err) - } - } - var output hookChatMessageOutput - if rawOutput, ok := root["output"]; ok { - _ = json.Unmarshal(rawOutput, &output) - } - var outputMap map[string]any - if rawOutput, ok := root["output"]; ok { - _ = json.Unmarshal(rawOutput, &outputMap) - } - - sessionID := core.FirstNonEmpty(input.SessionID, output.Message.SessionID) - turnID := core.FirstNonEmpty(input.MessageID, output.Message.ID) - messageID := core.FirstNonEmpty(output.Message.ID, input.MessageID) - outputProviderID := shared.FirstPathString(outputMap, - []string{"message", "model", "providerID"}, - []string{"message", "model", "provider_id"}, - []string{"message", "info", "providerID"}, - []string{"message", "info", "provider_id"}, - []string{"message", "info", "model", "providerID"}, - []string{"message", "info", "model", "provider_id"}, - []string{"model", "providerID"}, - []string{"model", "provider_id"}, - []string{"providerID"}, - []string{"provider_id"}, - []string{"message", "providerID"}, - []string{"message", "provider_id"}, - ) - outputModelID := shared.FirstPathString(outputMap, - []string{"message", "model", "modelID"}, - []string{"message", "model", "model_id"}, - []string{"message", "info", "modelID"}, - []string{"message", "info", "model_id"}, - []string{"message", "info", "model", "modelID"}, - []string{"message", "info", "model", "model_id"}, - []string{"model", "modelID"}, - []string{"model", "model_id"}, - []string{"modelID"}, - []string{"model_id"}, - []string{"message", "modelID"}, - []string{"message", "model_id"}, - ) - u := extractUsage(outputMap) - providerID := core.FirstNonEmpty(outputProviderID, input.Model.ProviderID, "opencode") - modelRaw := strings.TrimSpace(outputModelID) - if !hasUsage(u) { - providerID = core.FirstNonEmpty(outputProviderID, input.Model.ProviderID, "opencode") - modelRaw = core.FirstNonEmpty(outputModelID, strings.TrimSpace(input.Model.ModelID)) - } - upstreamProvider := sanitizeUpstreamProviderCandidate(core.FirstNonEmpty( - shared.FirstPathString(outputMap, - []string{"upstream_provider"}, - []string{"upstreamProvider"}, - []string{"route", "provider_name"}, - []string{"route", "providerName"}, - []string{"route", "provider"}, - []string{"router", "provider_name"}, - []string{"router", "providerName"}, - []string{"router", "provider"}, - []string{"routing", "provider_name"}, - []string{"routing", "providerName"}, - []string{"routing", "provider"}, - []string{"endpoint", "provider_name"}, - []string{"endpoint", "providerName"}, - []string{"endpoint", "provider"}, - []string{"provider_name"}, - []string{"providerName"}, - []string{"provider"}, - []string{"message", "provider_name"}, - []string{"message", "providerName"}, - []string{"message", "provider"}, - []string{"message", "info", "provider_name"}, - []string{"message", "info", "providerName"}, - []string{"message", "info", "provider"}, - ), - )) - if upstreamProvider == "" { - modelProviderHint := sanitizeUpstreamProviderCandidate(core.FirstNonEmpty( - shared.FirstPathString(outputMap, - []string{"message", "model", "provider"}, - []string{"message", "model", "provider_name"}, - []string{"message", "model", "providerName"}, - []string{"model", "provider"}, - []string{"model", "provider_name"}, - []string{"model", "providerName"}, - ), - outputProviderID, - )) - if modelProviderHint != "" { - upstreamProvider = modelProviderHint - } - } - contextSummary := extractContextSummary(outputMap) - - if turnID == "" && sessionID == "" { - return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryHookSchema, "chat.message")}, nil - } - - normalized := map[string]any{ - "hook": "chat.message", - "agent": strings.TrimSpace(input.Agent), - "variant": strings.TrimSpace(input.Variant), - "parts_count": output.PartsCount, - "context": contextSummary, - } - if upstreamProvider != "" { - normalized["upstream_provider"] = upstreamProvider - } - - return []shared.TelemetryEvent{{ - SchemaVersion: telemetryHookSchema, - Channel: shared.TelemetryChannelHook, - OccurredAt: parseHookTimestamp(root), - AccountID: "", - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ProviderID: providerID, - AgentName: "opencode", - EventType: shared.TelemetryEventTypeMessageUsage, - ModelRaw: modelRaw, - TokenUsage: core.TokenUsage{ - InputTokens: u.InputTokens, - OutputTokens: u.OutputTokens, - ReasoningTokens: u.ReasoningTokens, - CacheReadTokens: u.CacheReadTokens, - CacheWriteTokens: u.CacheWriteTokens, - TotalTokens: u.TotalTokens, - CostUSD: u.CostUSD, - Requests: core.Int64Ptr(1), - }, - Status: shared.TelemetryStatusOK, - Payload: mergePayload(rawPayload, normalized), - }}, nil -} - -func sanitizeUpstreamProviderCandidate(value string) string { - name := strings.TrimSpace(value) - if name == "" { - return "" - } - clean := strings.ToLower(name) - switch clean { - case "openrouter", "openusage", "opencode", "unknown": - return "" - } - return clean -} - -func extractUpstreamProviderFromMaps(payloads ...map[string]any) string { - for _, payload := range payloads { - if len(payload) == 0 { - continue - } - candidate := sanitizeUpstreamProviderCandidate(core.FirstNonEmpty( - shared.FirstPathString(payload, - []string{"upstream_provider"}, - []string{"upstreamProvider"}, - []string{"route", "provider_name"}, - []string{"route", "providerName"}, - []string{"route", "provider"}, - []string{"router", "provider_name"}, - []string{"router", "providerName"}, - []string{"router", "provider"}, - []string{"routing", "provider_name"}, - []string{"routing", "providerName"}, - []string{"routing", "provider"}, - []string{"endpoint", "provider_name"}, - []string{"endpoint", "providerName"}, - []string{"endpoint", "provider"}, - []string{"provider_name"}, - []string{"providerName"}, - []string{"provider"}, - []string{"message", "provider_name"}, - []string{"message", "providerName"}, - []string{"message", "provider"}, - []string{"message", "info", "provider_name"}, - []string{"message", "info", "providerName"}, - []string{"message", "info", "provider"}, - ), - shared.FirstPathString(payload, - []string{"message", "model", "provider"}, - []string{"message", "model", "provider_name"}, - []string{"message", "model", "providerName"}, - []string{"model", "provider"}, - []string{"model", "provider_name"}, - []string{"model", "providerName"}, - []string{"model", "providerID"}, - ), - )) - if candidate != "" { - return candidate - } - - rawResponseBody := core.FirstNonEmpty( - shared.FirstPathString(payload, []string{"error", "data", "responseBody"}), - shared.FirstPathString(payload, []string{"error", "responseBody"}), - ) - if rawResponseBody == "" { - continue - } - responseBodyPayload := decodeJSONMap([]byte(rawResponseBody)) - candidate = sanitizeUpstreamProviderCandidate(core.FirstNonEmpty( - shared.FirstPathString(responseBodyPayload, - []string{"error", "metadata", "provider_name"}, - []string{"error", "metadata", "providerName"}, - []string{"metadata", "provider_name"}, - []string{"metadata", "providerName"}, - []string{"metadata", "provider"}, - []string{"provider_name"}, - []string{"providerName"}, - []string{"provider"}, - ), - )) - if candidate != "" { - return candidate - } - } - return "" -} - -func buildRawEnvelope(rawPayload map[string]any, schemaVersion, detectedType string) shared.TelemetryEvent { - occurredAt := parseHookTimestampAny(rawPayload) - providerID := core.FirstNonEmpty( - shared.FirstPathString(rawPayload, - []string{"provider_id"}, - []string{"providerID"}, - []string{"input", "model", "providerID"}, - []string{"output", "message", "model", "providerID"}, - []string{"output", "model", "providerID"}, - []string{"model", "providerID"}, - []string{"event", "properties", "info", "providerID"}, - ), - "opencode", - ) - sessionID := shared.FirstPathString(rawPayload, - []string{"session_id"}, - []string{"sessionID"}, - []string{"input", "sessionID"}, - []string{"output", "message", "sessionID"}, - []string{"event", "properties", "info", "sessionID"}, - ) - turnID := shared.FirstPathString(rawPayload, - []string{"turn_id"}, - []string{"turnID"}, - []string{"input", "messageID"}, - []string{"output", "message", "id"}, - []string{"event", "properties", "info", "parentID"}, - ) - messageID := shared.FirstPathString(rawPayload, - []string{"message_id"}, - []string{"messageID"}, - []string{"input", "messageID"}, - []string{"output", "message", "id"}, - []string{"event", "properties", "info", "id"}, - ) - toolCallID := shared.FirstPathString(rawPayload, - []string{"tool_call_id"}, - []string{"toolCallID"}, - []string{"input", "callID"}, - []string{"event", "payload", "toolCallID"}, - ) - modelRaw := shared.FirstPathString(rawPayload, - []string{"model_id"}, - []string{"modelID"}, - []string{"input", "model", "modelID"}, - []string{"output", "message", "model", "modelID"}, - []string{"output", "model", "modelID"}, - []string{"model", "modelID"}, - []string{"event", "properties", "info", "modelID"}, - ) - workspace := shared.SanitizeWorkspace(shared.FirstPathString(rawPayload, - []string{"workspace_id"}, - []string{"workspaceID"}, - []string{"event", "properties", "info", "path", "cwd"}, - )) - eventName := core.FirstNonEmpty( - detectedType, - shared.FirstPathString(rawPayload, []string{"hook"}), - shared.FirstPathString(rawPayload, []string{"type"}), - shared.FirstPathString(rawPayload, []string{"event"}), - ) - - return shared.TelemetryEvent{ - SchemaVersion: schemaVersion, - Channel: shared.TelemetryChannelHook, - OccurredAt: occurredAt, - AccountID: "", - WorkspaceID: workspace, - SessionID: sessionID, - TurnID: turnID, - MessageID: messageID, - ToolCallID: toolCallID, - ProviderID: providerID, - AgentName: "opencode", - EventType: shared.TelemetryEventTypeRawEnvelope, - ModelRaw: modelRaw, - Status: shared.TelemetryStatusUnknown, - Payload: mergePayload(rawPayload, map[string]any{ - "captured_as": "raw_envelope", - "detected_event": eventName, - }), - } -} - -func collectPartSummary(ctx context.Context, db *sql.DB) (map[string]partSummary, error) { - rows, err := db.QueryContext(ctx, ` - SELECT message_id, COALESCE(NULLIF(TRIM(json_extract(data, '$.type')), ''), 'unknown') AS part_type, COUNT(*) - FROM part - GROUP BY message_id, part_type - `) - if err != nil { - return nil, err - } - defer rows.Close() - - out := make(map[string]partSummary) - for rows.Next() { - var ( - messageID string - partType string - count int64 - ) - if err := rows.Scan(&messageID, &partType, &count); err != nil { - continue - } - messageID = strings.TrimSpace(messageID) - if messageID == "" { - continue - } - partType = strings.TrimSpace(partType) - if partType == "" { - partType = "unknown" - } - s := out[messageID] - if s.PartsByType == nil { - s.PartsByType = map[string]int64{} - } - s.PartsTotal += count - s.PartsByType[partType] += count - out[messageID] = s - } - if err := rows.Err(); err != nil { - return out, err - } - return out, nil -} - -func sqliteTableExists(ctx context.Context, db *sql.DB, table string) bool { - var exists int - err := db.QueryRowContext(ctx, `SELECT 1 FROM sqlite_master WHERE type='table' AND name=? LIMIT 1`, strings.TrimSpace(table)).Scan(&exists) - return err == nil && exists == 1 -} - -func mapToolStatus(status string) (shared.TelemetryStatus, bool) { - status = strings.ToLower(strings.TrimSpace(status)) - switch status { - case "", "completed", "complete", "success", "succeeded": - return shared.TelemetryStatusOK, true - case "error", "failed", "failure": - return shared.TelemetryStatusError, true - case "aborted", "cancelled", "canceled", "terminated": - return shared.TelemetryStatusAborted, true - case "running", "pending", "queued", "in_progress", "in-progress": - return shared.TelemetryStatusUnknown, false - default: - return shared.TelemetryStatusUnknown, true - } -} - -func mapMessageStatus(reason string) shared.TelemetryStatus { - reason = strings.ToLower(strings.TrimSpace(reason)) - switch { - case strings.Contains(reason, "error"), strings.Contains(reason, "fail"): - return shared.TelemetryStatusError - case strings.Contains(reason, "abort"), strings.Contains(reason, "cancel"): - return shared.TelemetryStatusAborted - default: - return shared.TelemetryStatusOK - } -} - -func appendDedupTelemetryEvents( - out *[]shared.TelemetryEvent, - events []shared.TelemetryEvent, - seenMessage map[string]bool, - seenTools map[string]bool, - accountID string, -) { - for _, ev := range events { - ev.AccountID = core.FirstNonEmpty(accountID, ev.AccountID) - switch ev.EventType { - case shared.TelemetryEventTypeToolUsage: - key := core.FirstNonEmpty(strings.TrimSpace(ev.ToolCallID)) - if key == "" { - key = core.FirstNonEmpty(strings.TrimSpace(ev.SessionID), strings.TrimSpace(ev.MessageID)) + "|" + strings.ToLower(strings.TrimSpace(ev.ToolName)) - } - if key != "" { - if seenTools[key] { - continue - } - seenTools[key] = true - } - case shared.TelemetryEventTypeMessageUsage: - key := core.FirstNonEmpty(strings.TrimSpace(ev.MessageID)) - if key == "" { - key = core.FirstNonEmpty(strings.TrimSpace(ev.SessionID), strings.TrimSpace(ev.TurnID)) - } - if key != "" { - if seenMessage[key] { - continue - } - seenMessage[key] = true - } - } - *out = append(*out, ev) - } -} - -func hasUsage(u usage) bool { - for _, value := range []*int64{ - u.InputTokens, u.OutputTokens, u.ReasoningTokens, u.CacheReadTokens, u.CacheWriteTokens, u.TotalTokens, - } { - if value != nil && *value > 0 { - return true - } - } - return u.CostUSD != nil && *u.CostUSD > 0 -} - -func extractUsage(output map[string]any) usage { - if len(output) == 0 { - return usage{} - } - input := shared.FirstPathNumber(output, - []string{"usage", "input_tokens"}, []string{"usage", "inputTokens"}, []string{"usage", "input"}, - []string{"message", "usage", "input_tokens"}, []string{"message", "usage", "inputTokens"}, []string{"message", "usage", "input"}, - []string{"tokens", "input"}, []string{"input_tokens"}, []string{"inputTokens"}, - ) - outputTokens := shared.FirstPathNumber(output, - []string{"usage", "output_tokens"}, []string{"usage", "outputTokens"}, []string{"usage", "output"}, - []string{"message", "usage", "output_tokens"}, []string{"message", "usage", "outputTokens"}, []string{"message", "usage", "output"}, - []string{"tokens", "output"}, []string{"output_tokens"}, []string{"outputTokens"}, - ) - reasoning := shared.FirstPathNumber(output, - []string{"usage", "reasoning_tokens"}, []string{"usage", "reasoningTokens"}, []string{"usage", "reasoning"}, - []string{"message", "usage", "reasoning_tokens"}, []string{"message", "usage", "reasoningTokens"}, []string{"message", "usage", "reasoning"}, - []string{"tokens", "reasoning"}, []string{"reasoning_tokens"}, []string{"reasoningTokens"}, - ) - cacheRead := shared.FirstPathNumber(output, - []string{"usage", "cache_read_input_tokens"}, []string{"usage", "cacheReadInputTokens"}, []string{"usage", "cache_read_tokens"}, - []string{"usage", "cacheReadTokens"}, []string{"usage", "cache", "read"}, - []string{"message", "usage", "cache_read_input_tokens"}, []string{"message", "usage", "cacheReadInputTokens"}, []string{"message", "usage", "cache", "read"}, - []string{"tokens", "cache", "read"}, - ) - cacheWrite := shared.FirstPathNumber(output, - []string{"usage", "cache_creation_input_tokens"}, []string{"usage", "cacheCreationInputTokens"}, []string{"usage", "cache_write_tokens"}, - []string{"usage", "cacheWriteTokens"}, []string{"usage", "cache", "write"}, - []string{"message", "usage", "cache_creation_input_tokens"}, []string{"message", "usage", "cacheCreationInputTokens"}, []string{"message", "usage", "cache", "write"}, - []string{"tokens", "cache", "write"}, - ) - total := shared.FirstPathNumber(output, - []string{"usage", "total_tokens"}, []string{"usage", "totalTokens"}, []string{"usage", "total"}, - []string{"message", "usage", "total_tokens"}, []string{"message", "usage", "totalTokens"}, []string{"message", "usage", "total"}, - []string{"tokens", "total"}, []string{"total_tokens"}, []string{"totalTokens"}, - ) - cost := shared.FirstPathNumber(output, - []string{"usage", "cost_usd"}, []string{"usage", "costUSD"}, []string{"usage", "cost"}, - []string{"message", "usage", "cost_usd"}, []string{"message", "usage", "costUSD"}, []string{"message", "usage", "cost"}, - []string{"cost_usd"}, []string{"costUSD"}, []string{"cost"}, - ) - - result := usage{ - InputTokens: shared.NumberToInt64Ptr(input), - OutputTokens: shared.NumberToInt64Ptr(outputTokens), - ReasoningTokens: shared.NumberToInt64Ptr(reasoning), - CacheReadTokens: shared.NumberToInt64Ptr(cacheRead), - CacheWriteTokens: shared.NumberToInt64Ptr(cacheWrite), - TotalTokens: shared.NumberToInt64Ptr(total), - CostUSD: shared.NumberToFloat64Ptr(cost), - } - if result.TotalTokens == nil { - combined := int64(0) - hasAny := false - for _, ptr := range []*int64{result.InputTokens, result.OutputTokens, result.ReasoningTokens, result.CacheReadTokens, result.CacheWriteTokens} { - if ptr != nil { - combined += *ptr - hasAny = true - } - } - if hasAny { - result.TotalTokens = core.Int64Ptr(combined) - } - } - return result -} - -func extractContextSummary(output map[string]any) map[string]any { - if len(output) == 0 { - return map[string]any{} - } - partsTotal := shared.FirstPathNumber(output, []string{"context", "parts_total"}, []string{"context", "partsTotal"}, []string{"parts_count"}) - partsByType := map[string]any{} - if m, ok := shared.PathMap(output, "context", "parts_by_type"); ok { - for key, value := range m { - if count, ok := shared.NumberFromAny(value); ok { - partsByType[strings.TrimSpace(key)] = int64(count) - } - } - } - if len(partsByType) == 0 { - if arr, ok := shared.PathSlice(output, "parts"); ok { - typeCounts := make(map[string]int64) - for _, part := range arr { - partMap, ok := part.(map[string]any) - if !ok { - typeCounts["unknown"]++ - continue - } - partType := "unknown" - if rawType, ok := partMap["type"].(string); ok && strings.TrimSpace(rawType) != "" { - partType = strings.TrimSpace(rawType) - } - typeCounts[partType]++ - } - for key, value := range typeCounts { - partsByType[key] = value - } - if partsTotal == nil { - v := float64(len(arr)) - partsTotal = &v - } - } - } - return map[string]any{ - "parts_total": ptrInt64Value(shared.NumberToInt64Ptr(partsTotal)), - "parts_by_type": partsByType, - } -} - -func decodeRawMessageMap(root map[string]json.RawMessage) map[string]any { - out := make(map[string]any, len(root)) - for key, raw := range root { - if len(raw) == 0 { - out[key] = nil - continue - } - var decoded any - if err := json.Unmarshal(raw, &decoded); err != nil { - out[key] = string(raw) - continue - } - out[key] = decoded - } - return out -} - -func decodeJSONMap(raw []byte) map[string]any { - var out map[string]any - if err := json.Unmarshal(raw, &out); err == nil && len(out) > 0 { - return out - } - return map[string]any{"_raw_json": string(raw)} -} - -func mergePayload(rawPayload map[string]any, normalized map[string]any) map[string]any { - if len(rawPayload) == 0 && len(normalized) == 0 { - return map[string]any{} - } - out := make(map[string]any, 8) - if len(normalized) > 0 { - out["_normalized"] = normalized - for key, value := range normalized { - if _, exists := out[key]; !exists { - out[key] = value - } - } - } - rawSummary := summarizeRawPayload(rawPayload) - if len(rawSummary) > 0 { - out["_raw"] = rawSummary - for key, value := range rawSummary { - if _, exists := out[key]; !exists { - out[key] = value - } - } - } - return out -} - -func summarizeRawPayload(rawPayload map[string]any) map[string]any { - if len(rawPayload) == 0 { - return map[string]any{} - } - out := map[string]any{ - "raw_keys": len(rawPayload), - } - - if hook := shared.FirstPathString(rawPayload, []string{"hook"}); hook != "" { - out["hook"] = hook - } - if typ := shared.FirstPathString(rawPayload, []string{"type"}); typ != "" { - out["type"] = typ - } - - if value := core.FirstNonEmpty( - shared.FirstPathString(rawPayload, []string{"hook"}), - shared.FirstPathString(rawPayload, []string{"event"}), - shared.FirstPathString(rawPayload, []string{"type"}), - ); value != "" { - out["event"] = value - } - if value := core.FirstNonEmpty( - shared.FirstPathString(rawPayload, []string{"sessionID"}), - shared.FirstPathString(rawPayload, []string{"session_id"}), - shared.FirstPathString(rawPayload, []string{"input", "sessionID"}), - shared.FirstPathString(rawPayload, []string{"output", "message", "sessionID"}), - ); value != "" { - out["session_id"] = value - } - if value := core.FirstNonEmpty( - shared.FirstPathString(rawPayload, []string{"messageID"}), - shared.FirstPathString(rawPayload, []string{"message_id"}), - shared.FirstPathString(rawPayload, []string{"input", "messageID"}), - shared.FirstPathString(rawPayload, []string{"output", "message", "id"}), - ); value != "" { - out["message_id"] = value - } - if value := core.FirstNonEmpty( - shared.FirstPathString(rawPayload, []string{"toolCallID"}), - shared.FirstPathString(rawPayload, []string{"tool_call_id"}), - shared.FirstPathString(rawPayload, []string{"input", "callID"}), - ); value != "" { - out["tool_call_id"] = value - } - if value := core.FirstNonEmpty( - shared.FirstPathString(rawPayload, []string{"providerID"}), - shared.FirstPathString(rawPayload, []string{"provider_id"}), - shared.FirstPathString(rawPayload, []string{"input", "model", "providerID"}), - shared.FirstPathString(rawPayload, []string{"output", "message", "model", "providerID"}), - ); value != "" { - out["provider_id"] = value - } - if value := core.FirstNonEmpty( - shared.FirstPathString(rawPayload, []string{"modelID"}), - shared.FirstPathString(rawPayload, []string{"model_id"}), - shared.FirstPathString(rawPayload, []string{"input", "model", "modelID"}), - shared.FirstPathString(rawPayload, []string{"output", "message", "model", "modelID"}), - ); value != "" { - out["model_id"] = value - } - if ts := shared.FirstPathString(rawPayload, []string{"timestamp"}, []string{"time"}); ts != "" { - out["timestamp"] = ts - } - return out -} - -func ptrInt64Value(v *int64) any { - if v == nil { - return nil - } - return *v -} - -func parseHookTimestampAny(root map[string]any) time.Time { - if root == nil { - return time.Now().UTC() - } - if ts := shared.FirstPathNumber(root, - []string{"timestamp"}, - []string{"time"}, - []string{"event", "timestamp"}, - []string{"event", "properties", "info", "time", "completed"}, - []string{"event", "properties", "info", "time", "created"}, - ); ts != nil && *ts > 0 { - return hookTimestampOrNow(int64(*ts)) - } - if raw := shared.FirstPathString(root, []string{"timestamp"}, []string{"time"}, []string{"event", "timestamp"}); raw != "" { - if ts, ok := shared.ParseFlexibleTimestamp(raw); ok { - return shared.UnixAuto(ts) - } - } - return time.Now().UTC() -} - -func parseHookTimestamp(root map[string]json.RawMessage) time.Time { - if raw, ok := root["timestamp"]; ok { - var intVal int64 - if err := json.Unmarshal(raw, &intVal); err == nil && intVal > 0 { - return hookTimestampOrNow(intVal) - } - var strVal string - if err := json.Unmarshal(raw, &strVal); err == nil { - if ts, ok := shared.ParseFlexibleTimestamp(strVal); ok { - return shared.UnixAuto(ts) - } - } - } - return time.Now().UTC() -} - -func hookTimestampOrNow(ts int64) time.Time { - if ts <= 0 { - return time.Now().UTC() - } - return shared.UnixAuto(ts) -} - -func ptrInt64FromFloat(v *float64) int64 { - if v == nil { - return 0 - } - return int64(*v) -} diff --git a/internal/providers/opencode/telemetry_event_file.go b/internal/providers/opencode/telemetry_event_file.go new file mode 100644 index 0000000..efbf50a --- /dev/null +++ b/internal/providers/opencode/telemetry_event_file.go @@ -0,0 +1,173 @@ +package opencode + +import ( + "bufio" + "encoding/json" + "fmt" + "os" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +// ParseTelemetryEventFile parses OpenCode event jsonl/ndjson files. +func ParseTelemetryEventFile(path string) ([]shared.TelemetryEvent, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var out []shared.TelemetryEvent + scanner := bufio.NewScanner(f) + scanner.Buffer(make([]byte, 0, 512*1024), 8*1024*1024) + lineNumber := 0 + + for scanner.Scan() { + lineNumber++ + var ev eventEnvelope + if err := json.Unmarshal(scanner.Bytes(), &ev); err != nil { + continue + } + + switch eventType := telemetryEventType(ev); eventType { + case "message.updated": + props, ok := decodeMessageUpdatedProps(ev.Properties) + if !ok || strings.ToLower(strings.TrimSpace(props.Info.Role)) != "assistant" { + continue + } + out = append(out, buildJSONLMessageUsageEvent(path, lineNumber, props.Info)) + case "tool.execute.after": + tool, rawPayloadMap, ok := decodeToolPayload(ev.Payload) + if !ok { + continue + } + out = append(out, buildJSONLToolUsageEvent(path, lineNumber, tool, rawPayloadMap)) + } + } + if err := scanner.Err(); err != nil { + return out, err + } + return out, nil +} + +func telemetryEventType(ev eventEnvelope) string { + eventType := strings.TrimSpace(ev.Type) + if eventType == "" { + eventType = strings.TrimSpace(ev.Event) + } + return eventType +} + +func decodeMessageUpdatedProps(raw json.RawMessage) (messageUpdatedProps, bool) { + var props messageUpdatedProps + if err := json.Unmarshal(raw, &props); err != nil { + return messageUpdatedProps{}, false + } + return props, true +} + +func buildJSONLMessageUsageEvent(path string, lineNumber int, info assistantInfo) shared.TelemetryEvent { + messageID := strings.TrimSpace(info.ID) + if messageID == "" { + messageID = fmt.Sprintf("%s:%d", path, lineNumber) + } + + total := info.Tokens.Input + info.Tokens.Output + info.Tokens.Reasoning + info.Tokens.Cache.Read + info.Tokens.Cache.Write + occurredAt := shared.UnixAuto(info.Time.Created) + if info.Time.Completed > 0 { + occurredAt = shared.UnixAuto(info.Time.Completed) + } + + return shared.TelemetryEvent{ + SchemaVersion: telemetryEventSchema, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + WorkspaceID: shared.SanitizeWorkspace(info.Path.CWD), + SessionID: strings.TrimSpace(info.SessionID), + TurnID: strings.TrimSpace(info.ParentID), + MessageID: messageID, + ProviderID: core.FirstNonEmpty(strings.TrimSpace(info.ProviderID), "opencode"), + AgentName: "opencode", + EventType: shared.TelemetryEventTypeMessageUsage, + ModelRaw: strings.TrimSpace(info.ModelID), + TokenUsage: core.TokenUsage{ + InputTokens: core.Int64Ptr(info.Tokens.Input), + OutputTokens: core.Int64Ptr(info.Tokens.Output), + ReasoningTokens: core.Int64Ptr(info.Tokens.Reasoning), + CacheReadTokens: core.Int64Ptr(info.Tokens.Cache.Read), + CacheWriteTokens: core.Int64Ptr(info.Tokens.Cache.Write), + TotalTokens: core.Int64Ptr(total), + CostUSD: core.Float64Ptr(info.Cost), + }, + Status: shared.TelemetryStatusOK, + Payload: map[string]any{ + "file": path, + "line": lineNumber, + }, + } +} + +func decodeToolPayload(raw json.RawMessage) (toolPayload, map[string]any, bool) { + if len(raw) == 0 { + return toolPayload{}, nil, false + } + var tool toolPayload + if err := json.Unmarshal(raw, &tool); err != nil { + return toolPayload{}, nil, false + } + var rawPayloadMap map[string]any + if err := json.Unmarshal(raw, &rawPayloadMap); err != nil { + rawPayloadMap = nil + } + return tool, rawPayloadMap, true +} + +func buildJSONLToolUsageEvent(path string, lineNumber int, tool toolPayload, rawPayloadMap map[string]any) shared.TelemetryEvent { + toolCallID := strings.TrimSpace(tool.ToolCallID) + if toolCallID == "" { + toolCallID = fmt.Sprintf("%s:%d", path, lineNumber) + } + + toolName := strings.TrimSpace(tool.ToolName) + if toolName == "" { + toolName = strings.TrimSpace(tool.Name) + } + if toolName == "" { + toolName = "unknown" + } + + occurredAt := time.Now().UTC() + if tool.Timestamp > 0 { + occurredAt = shared.UnixAuto(tool.Timestamp) + } + + toolFilePath := "" + if paths := shared.ExtractFilePathsFromPayload(rawPayloadMap); len(paths) > 0 { + toolFilePath = paths[0] + } + + return shared.TelemetryEvent{ + SchemaVersion: telemetryEventSchema, + Channel: shared.TelemetryChannelJSONL, + OccurredAt: occurredAt, + SessionID: strings.TrimSpace(tool.SessionID), + MessageID: strings.TrimSpace(tool.MessageID), + ToolCallID: toolCallID, + ProviderID: "opencode", + AgentName: "opencode", + EventType: shared.TelemetryEventTypeToolUsage, + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(1), + }, + ToolName: strings.ToLower(toolName), + Status: shared.TelemetryStatusOK, + Payload: map[string]any{ + "source_file": path, + "line": lineNumber, + "file": toolFilePath, + }, + } +} diff --git a/internal/providers/opencode/telemetry_hooks.go b/internal/providers/opencode/telemetry_hooks.go new file mode 100644 index 0000000..aed3252 --- /dev/null +++ b/internal/providers/opencode/telemetry_hooks.go @@ -0,0 +1,757 @@ +package opencode + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +// ParseTelemetryHookPayload parses OpenCode plugin hook payloads. +func ParseTelemetryHookPayload(raw []byte) ([]shared.TelemetryEvent, error) { + trimmed := strings.TrimSpace(string(raw)) + if trimmed == "" { + return nil, nil + } + + var root map[string]json.RawMessage + if err := json.Unmarshal([]byte(trimmed), &root); err != nil { + return nil, fmt.Errorf("decode hook payload: %w", err) + } + rootPayload := decodeRawMessageMap(root) + + if eventRaw, ok := root["event"]; ok && len(eventRaw) > 0 { + return parseEventJSON(eventRaw, decodeJSONMap(eventRaw), true) + } + if hookRaw, ok := root["hook"]; ok { + var hook string + if err := json.Unmarshal(hookRaw, &hook); err != nil { + return nil, fmt.Errorf("decode hook name: %w", err) + } + switch strings.TrimSpace(hook) { + case "tool.execute.after": + return parseToolExecuteAfterHook(root, rootPayload) + case "chat.message": + return parseChatMessageHook(root, rootPayload) + default: + return []shared.TelemetryEvent{buildRawEnvelope(rootPayload, telemetryHookSchema, strings.TrimSpace(hook))}, nil + } + } + if _, ok := root["type"]; ok { + return parseEventJSON([]byte(trimmed), decodeJSONMap([]byte(trimmed)), true) + } + + return []shared.TelemetryEvent{buildRawEnvelope(rootPayload, telemetryHookSchema, "")}, nil +} + +func parseEventJSON(raw []byte, rawPayload map[string]any, includeUnknown bool) ([]shared.TelemetryEvent, error) { + var ev eventEnvelope + if err := json.Unmarshal(raw, &ev); err != nil { + return nil, fmt.Errorf("decode opencode event: %w", err) + } + + switch eventType := telemetryEventType(ev); eventType { + case "message.updated": + props, ok := decodeMessageUpdatedProps(ev.Properties) + if !ok { + return nil, fmt.Errorf("decode message.updated properties") + } + info := props.Info + if strings.ToLower(strings.TrimSpace(info.Role)) != "assistant" { + if includeUnknown { + return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, eventType)}, nil + } + return nil, nil + } + if strings.TrimSpace(info.ID) == "" { + if includeUnknown { + return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, eventType)}, nil + } + return nil, nil + } + event := buildJSONLMessageUsageEvent("", 0, info) + event.Channel = shared.TelemetryChannelHook + event.Payload = mergePayload(rawPayload, map[string]any{"event_type": "message.updated"}) + return []shared.TelemetryEvent{event}, nil + case "tool.execute.after": + payload, _, ok := decodeToolPayload(ev.Payload) + if !ok || strings.TrimSpace(payload.ToolCallID) == "" { + if includeUnknown { + return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, eventType)}, nil + } + return nil, nil + } + event := buildJSONLToolUsageEvent("", 0, payload, nil) + event.Channel = shared.TelemetryChannelHook + event.OccurredAt = hookTimestampOrNow(payload.Timestamp) + event.Payload = mergePayload(rawPayload, map[string]any{"event_type": "tool.execute.after"}) + return []shared.TelemetryEvent{event}, nil + } + + if includeUnknown { + return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryEventSchema, telemetryEventType(ev))}, nil + } + return nil, nil +} + +func parseToolExecuteAfterHook(root map[string]json.RawMessage, rawPayload map[string]any) ([]shared.TelemetryEvent, error) { + var input hookToolExecuteAfterInput + if rawInput, ok := root["input"]; ok { + if err := json.Unmarshal(rawInput, &input); err != nil { + return nil, fmt.Errorf("decode tool.execute.after hook input: %w", err) + } + } + var output hookToolExecuteAfterOutput + if rawOutput, ok := root["output"]; ok { + _ = json.Unmarshal(rawOutput, &output) + } + + toolCallID := strings.TrimSpace(input.CallID) + if toolCallID == "" { + return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryHookSchema, "tool.execute.after")}, nil + } + + return []shared.TelemetryEvent{{ + SchemaVersion: telemetryHookSchema, + Channel: shared.TelemetryChannelHook, + OccurredAt: parseHookTimestamp(root), + SessionID: strings.TrimSpace(input.SessionID), + ToolCallID: toolCallID, + ProviderID: "opencode", + AgentName: "opencode", + EventType: shared.TelemetryEventTypeToolUsage, + ToolName: strings.ToLower(core.FirstNonEmpty(strings.TrimSpace(input.Tool), "unknown")), + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(1), + }, + Status: shared.TelemetryStatusOK, + Payload: mergePayload(rawPayload, map[string]any{ + "hook": "tool.execute.after", + "title": strings.TrimSpace(output.Title), + }), + }}, nil +} + +func parseChatMessageHook(root map[string]json.RawMessage, rawPayload map[string]any) ([]shared.TelemetryEvent, error) { + var input hookChatMessageInput + if rawInput, ok := root["input"]; ok { + if err := json.Unmarshal(rawInput, &input); err != nil { + return nil, fmt.Errorf("decode chat.message hook input: %w", err) + } + } + var output hookChatMessageOutput + if rawOutput, ok := root["output"]; ok { + _ = json.Unmarshal(rawOutput, &output) + } + var outputMap map[string]any + if rawOutput, ok := root["output"]; ok { + _ = json.Unmarshal(rawOutput, &outputMap) + } + + sessionID := core.FirstNonEmpty(input.SessionID, output.Message.SessionID) + turnID := core.FirstNonEmpty(input.MessageID, output.Message.ID) + messageID := core.FirstNonEmpty(output.Message.ID, input.MessageID) + outputProviderID := shared.FirstPathString(outputMap, + []string{"message", "model", "providerID"}, + []string{"message", "model", "provider_id"}, + []string{"message", "info", "providerID"}, + []string{"message", "info", "provider_id"}, + []string{"message", "info", "model", "providerID"}, + []string{"message", "info", "model", "provider_id"}, + []string{"model", "providerID"}, + []string{"model", "provider_id"}, + []string{"providerID"}, + []string{"provider_id"}, + []string{"message", "providerID"}, + []string{"message", "provider_id"}, + ) + outputModelID := shared.FirstPathString(outputMap, + []string{"message", "model", "modelID"}, + []string{"message", "model", "model_id"}, + []string{"message", "info", "modelID"}, + []string{"message", "info", "model_id"}, + []string{"message", "info", "model", "modelID"}, + []string{"message", "info", "model", "model_id"}, + []string{"model", "modelID"}, + []string{"model", "model_id"}, + []string{"modelID"}, + []string{"model_id"}, + []string{"message", "modelID"}, + []string{"message", "model_id"}, + ) + u := extractUsage(outputMap) + providerID := core.FirstNonEmpty(outputProviderID, input.Model.ProviderID, "opencode") + modelRaw := strings.TrimSpace(outputModelID) + if !hasUsage(u) { + modelRaw = core.FirstNonEmpty(outputModelID, strings.TrimSpace(input.Model.ModelID)) + } + upstreamProvider := extractHookUpstreamProvider(outputMap, outputProviderID) + contextSummary := extractContextSummary(outputMap) + + if turnID == "" && sessionID == "" { + return []shared.TelemetryEvent{buildRawEnvelope(rawPayload, telemetryHookSchema, "chat.message")}, nil + } + + normalized := map[string]any{ + "hook": "chat.message", + "agent": strings.TrimSpace(input.Agent), + "variant": strings.TrimSpace(input.Variant), + "parts_count": output.PartsCount, + "context": contextSummary, + } + if upstreamProvider != "" { + normalized["upstream_provider"] = upstreamProvider + } + + return []shared.TelemetryEvent{{ + SchemaVersion: telemetryHookSchema, + Channel: shared.TelemetryChannelHook, + OccurredAt: parseHookTimestamp(root), + SessionID: sessionID, + TurnID: turnID, + MessageID: messageID, + ProviderID: providerID, + AgentName: "opencode", + EventType: shared.TelemetryEventTypeMessageUsage, + ModelRaw: modelRaw, + TokenUsage: core.TokenUsage{ + InputTokens: u.InputTokens, + OutputTokens: u.OutputTokens, + ReasoningTokens: u.ReasoningTokens, + CacheReadTokens: u.CacheReadTokens, + CacheWriteTokens: u.CacheWriteTokens, + TotalTokens: u.TotalTokens, + CostUSD: u.CostUSD, + Requests: core.Int64Ptr(1), + }, + Status: shared.TelemetryStatusOK, + Payload: mergePayload(rawPayload, normalized), + }}, nil +} + +func extractHookUpstreamProvider(outputMap map[string]any, outputProviderID string) string { + upstreamProvider := sanitizeUpstreamProviderCandidate(core.FirstNonEmpty( + shared.FirstPathString(outputMap, + []string{"upstream_provider"}, + []string{"upstreamProvider"}, + []string{"route", "provider_name"}, + []string{"route", "providerName"}, + []string{"route", "provider"}, + []string{"router", "provider_name"}, + []string{"router", "providerName"}, + []string{"router", "provider"}, + []string{"routing", "provider_name"}, + []string{"routing", "providerName"}, + []string{"routing", "provider"}, + []string{"endpoint", "provider_name"}, + []string{"endpoint", "providerName"}, + []string{"endpoint", "provider"}, + []string{"provider_name"}, + []string{"providerName"}, + []string{"provider"}, + []string{"message", "provider_name"}, + []string{"message", "providerName"}, + []string{"message", "provider"}, + []string{"message", "info", "provider_name"}, + []string{"message", "info", "providerName"}, + []string{"message", "info", "provider"}, + ), + )) + if upstreamProvider != "" { + return upstreamProvider + } + return sanitizeUpstreamProviderCandidate(core.FirstNonEmpty( + shared.FirstPathString(outputMap, + []string{"message", "model", "provider"}, + []string{"message", "model", "provider_name"}, + []string{"message", "model", "providerName"}, + []string{"model", "provider"}, + []string{"model", "provider_name"}, + []string{"model", "providerName"}, + ), + outputProviderID, + )) +} + +func sanitizeUpstreamProviderCandidate(value string) string { + name := strings.TrimSpace(value) + if name == "" { + return "" + } + clean := strings.ToLower(name) + switch clean { + case "openrouter", "openusage", "opencode", "unknown": + return "" + default: + return clean + } +} + +func extractUpstreamProviderFromMaps(payloads ...map[string]any) string { + for _, payload := range payloads { + if len(payload) == 0 { + continue + } + if candidate := extractHookUpstreamProvider(payload, shared.FirstPathString(payload, []string{"model", "providerID"})); candidate != "" { + return candidate + } + + rawResponseBody := core.FirstNonEmpty( + shared.FirstPathString(payload, []string{"error", "data", "responseBody"}), + shared.FirstPathString(payload, []string{"error", "responseBody"}), + ) + if rawResponseBody == "" { + continue + } + responseBodyPayload := decodeJSONMap([]byte(rawResponseBody)) + candidate := sanitizeUpstreamProviderCandidate(core.FirstNonEmpty( + shared.FirstPathString(responseBodyPayload, + []string{"error", "metadata", "provider_name"}, + []string{"error", "metadata", "providerName"}, + []string{"metadata", "provider_name"}, + []string{"metadata", "providerName"}, + []string{"metadata", "provider"}, + []string{"provider_name"}, + []string{"providerName"}, + []string{"provider"}, + ), + )) + if candidate != "" { + return candidate + } + } + return "" +} + +func buildRawEnvelope(rawPayload map[string]any, schemaVersion, detectedType string) shared.TelemetryEvent { + return shared.TelemetryEvent{ + SchemaVersion: schemaVersion, + Channel: shared.TelemetryChannelHook, + OccurredAt: parseHookTimestampAny(rawPayload), + WorkspaceID: shared.SanitizeWorkspace(shared.FirstPathString(rawPayload, + []string{"workspace_id"}, + []string{"workspaceID"}, + []string{"event", "properties", "info", "path", "cwd"}, + )), + SessionID: shared.FirstPathString(rawPayload, + []string{"session_id"}, + []string{"sessionID"}, + []string{"input", "sessionID"}, + []string{"output", "message", "sessionID"}, + []string{"event", "properties", "info", "sessionID"}, + ), + TurnID: shared.FirstPathString(rawPayload, + []string{"turn_id"}, + []string{"turnID"}, + []string{"input", "messageID"}, + []string{"output", "message", "id"}, + []string{"event", "properties", "info", "parentID"}, + ), + MessageID: shared.FirstPathString(rawPayload, + []string{"message_id"}, + []string{"messageID"}, + []string{"input", "messageID"}, + []string{"output", "message", "id"}, + []string{"event", "properties", "info", "id"}, + ), + ToolCallID: shared.FirstPathString(rawPayload, + []string{"tool_call_id"}, + []string{"toolCallID"}, + []string{"input", "callID"}, + []string{"event", "payload", "toolCallID"}, + ), + ProviderID: core.FirstNonEmpty( + shared.FirstPathString(rawPayload, + []string{"provider_id"}, + []string{"providerID"}, + []string{"input", "model", "providerID"}, + []string{"output", "message", "model", "providerID"}, + []string{"output", "model", "providerID"}, + []string{"model", "providerID"}, + []string{"event", "properties", "info", "providerID"}, + ), + "opencode", + ), + AgentName: "opencode", + EventType: shared.TelemetryEventTypeRawEnvelope, + ModelRaw: shared.FirstPathString(rawPayload, + []string{"model_id"}, + []string{"modelID"}, + []string{"input", "model", "modelID"}, + []string{"output", "message", "model", "modelID"}, + []string{"output", "model", "modelID"}, + []string{"model", "modelID"}, + []string{"event", "properties", "info", "modelID"}, + ), + Status: shared.TelemetryStatusUnknown, + Payload: mergePayload(rawPayload, map[string]any{ + "captured_as": "raw_envelope", + "detected_event": core.FirstNonEmpty( + detectedType, + shared.FirstPathString(rawPayload, []string{"hook"}), + shared.FirstPathString(rawPayload, []string{"type"}), + shared.FirstPathString(rawPayload, []string{"event"}), + ), + }), + } +} + +func mapToolStatus(status string) (shared.TelemetryStatus, bool) { + status = strings.ToLower(strings.TrimSpace(status)) + switch status { + case "", "completed", "complete", "success", "succeeded": + return shared.TelemetryStatusOK, true + case "error", "failed", "failure": + return shared.TelemetryStatusError, true + case "aborted", "cancelled", "canceled", "terminated": + return shared.TelemetryStatusAborted, true + case "running", "pending", "queued", "in_progress", "in-progress": + return shared.TelemetryStatusUnknown, false + default: + return shared.TelemetryStatusUnknown, true + } +} + +func mapMessageStatus(reason string) shared.TelemetryStatus { + reason = strings.ToLower(strings.TrimSpace(reason)) + switch { + case strings.Contains(reason, "error"), strings.Contains(reason, "fail"): + return shared.TelemetryStatusError + case strings.Contains(reason, "abort"), strings.Contains(reason, "cancel"): + return shared.TelemetryStatusAborted + default: + return shared.TelemetryStatusOK + } +} + +func appendDedupTelemetryEvents( + out *[]shared.TelemetryEvent, + events []shared.TelemetryEvent, + seenMessage map[string]bool, + seenTools map[string]bool, + accountID string, +) { + for _, ev := range events { + ev.AccountID = core.FirstNonEmpty(accountID, ev.AccountID) + switch ev.EventType { + case shared.TelemetryEventTypeToolUsage: + key := core.FirstNonEmpty(strings.TrimSpace(ev.ToolCallID)) + if key == "" { + key = core.FirstNonEmpty(strings.TrimSpace(ev.SessionID), strings.TrimSpace(ev.MessageID)) + "|" + strings.ToLower(strings.TrimSpace(ev.ToolName)) + } + if key != "" { + if seenTools[key] { + continue + } + seenTools[key] = true + } + case shared.TelemetryEventTypeMessageUsage: + key := core.FirstNonEmpty(strings.TrimSpace(ev.MessageID)) + if key == "" { + key = core.FirstNonEmpty(strings.TrimSpace(ev.SessionID), strings.TrimSpace(ev.TurnID)) + } + if key != "" { + if seenMessage[key] { + continue + } + seenMessage[key] = true + } + } + *out = append(*out, ev) + } +} + +func hasUsage(u usage) bool { + for _, value := range []*int64{ + u.InputTokens, u.OutputTokens, u.ReasoningTokens, u.CacheReadTokens, u.CacheWriteTokens, u.TotalTokens, + } { + if value != nil && *value > 0 { + return true + } + } + return u.CostUSD != nil && *u.CostUSD > 0 +} + +func extractUsage(output map[string]any) usage { + if len(output) == 0 { + return usage{} + } + input := shared.FirstPathNumber(output, + []string{"usage", "input_tokens"}, []string{"usage", "inputTokens"}, []string{"usage", "input"}, + []string{"message", "usage", "input_tokens"}, []string{"message", "usage", "inputTokens"}, []string{"message", "usage", "input"}, + []string{"tokens", "input"}, []string{"input_tokens"}, []string{"inputTokens"}, + ) + outputTokens := shared.FirstPathNumber(output, + []string{"usage", "output_tokens"}, []string{"usage", "outputTokens"}, []string{"usage", "output"}, + []string{"message", "usage", "output_tokens"}, []string{"message", "usage", "outputTokens"}, []string{"message", "usage", "output"}, + []string{"tokens", "output"}, []string{"output_tokens"}, []string{"outputTokens"}, + ) + reasoning := shared.FirstPathNumber(output, + []string{"usage", "reasoning_tokens"}, []string{"usage", "reasoningTokens"}, []string{"usage", "reasoning"}, + []string{"message", "usage", "reasoning_tokens"}, []string{"message", "usage", "reasoningTokens"}, []string{"message", "usage", "reasoning"}, + []string{"tokens", "reasoning"}, []string{"reasoning_tokens"}, []string{"reasoningTokens"}, + ) + cacheRead := shared.FirstPathNumber(output, + []string{"usage", "cache_read_input_tokens"}, []string{"usage", "cacheReadInputTokens"}, []string{"usage", "cache_read_tokens"}, + []string{"usage", "cacheReadTokens"}, []string{"usage", "cache", "read"}, + []string{"message", "usage", "cache_read_input_tokens"}, []string{"message", "usage", "cacheReadInputTokens"}, []string{"message", "usage", "cache", "read"}, + []string{"tokens", "cache", "read"}, + ) + cacheWrite := shared.FirstPathNumber(output, + []string{"usage", "cache_creation_input_tokens"}, []string{"usage", "cacheCreationInputTokens"}, []string{"usage", "cache_write_tokens"}, + []string{"usage", "cacheWriteTokens"}, []string{"usage", "cache", "write"}, + []string{"message", "usage", "cache_creation_input_tokens"}, []string{"message", "usage", "cacheCreationInputTokens"}, []string{"message", "usage", "cache", "write"}, + []string{"tokens", "cache", "write"}, + ) + total := shared.FirstPathNumber(output, + []string{"usage", "total_tokens"}, []string{"usage", "totalTokens"}, []string{"usage", "total"}, + []string{"message", "usage", "total_tokens"}, []string{"message", "usage", "totalTokens"}, []string{"message", "usage", "total"}, + []string{"tokens", "total"}, []string{"total_tokens"}, []string{"totalTokens"}, + ) + cost := shared.FirstPathNumber(output, + []string{"usage", "cost_usd"}, []string{"usage", "costUSD"}, []string{"usage", "cost"}, + []string{"message", "usage", "cost_usd"}, []string{"message", "usage", "costUSD"}, []string{"message", "usage", "cost"}, + []string{"cost_usd"}, []string{"costUSD"}, []string{"cost"}, + ) + + result := usage{ + InputTokens: shared.NumberToInt64Ptr(input), + OutputTokens: shared.NumberToInt64Ptr(outputTokens), + ReasoningTokens: shared.NumberToInt64Ptr(reasoning), + CacheReadTokens: shared.NumberToInt64Ptr(cacheRead), + CacheWriteTokens: shared.NumberToInt64Ptr(cacheWrite), + TotalTokens: shared.NumberToInt64Ptr(total), + CostUSD: shared.NumberToFloat64Ptr(cost), + } + if result.TotalTokens == nil { + combined := int64(0) + hasAny := false + for _, ptr := range []*int64{result.InputTokens, result.OutputTokens, result.ReasoningTokens, result.CacheReadTokens, result.CacheWriteTokens} { + if ptr != nil { + combined += *ptr + hasAny = true + } + } + if hasAny { + result.TotalTokens = core.Int64Ptr(combined) + } + } + return result +} + +func extractContextSummary(output map[string]any) map[string]any { + if len(output) == 0 { + return map[string]any{} + } + partsTotal := shared.FirstPathNumber(output, []string{"context", "parts_total"}, []string{"context", "partsTotal"}, []string{"parts_count"}) + partsByType := map[string]any{} + if m, ok := shared.PathMap(output, "context", "parts_by_type"); ok { + for key, value := range m { + if count, ok := shared.NumberFromAny(value); ok { + partsByType[strings.TrimSpace(key)] = int64(count) + } + } + } + if len(partsByType) == 0 { + if arr, ok := shared.PathSlice(output, "parts"); ok { + typeCounts := make(map[string]int64) + for _, part := range arr { + partMap, ok := part.(map[string]any) + if !ok { + typeCounts["unknown"]++ + continue + } + partType := "unknown" + if rawType, ok := partMap["type"].(string); ok && strings.TrimSpace(rawType) != "" { + partType = strings.TrimSpace(rawType) + } + typeCounts[partType]++ + } + for key, value := range typeCounts { + partsByType[key] = value + } + if partsTotal == nil { + v := float64(len(arr)) + partsTotal = &v + } + } + } + return map[string]any{ + "parts_total": ptrInt64Value(shared.NumberToInt64Ptr(partsTotal)), + "parts_by_type": partsByType, + } +} + +func decodeRawMessageMap(root map[string]json.RawMessage) map[string]any { + out := make(map[string]any, len(root)) + for key, raw := range root { + if len(raw) == 0 { + out[key] = nil + continue + } + var decoded any + if err := json.Unmarshal(raw, &decoded); err != nil { + out[key] = string(raw) + continue + } + out[key] = decoded + } + return out +} + +func decodeJSONMap(raw []byte) map[string]any { + var out map[string]any + if err := json.Unmarshal(raw, &out); err == nil && len(out) > 0 { + return out + } + return map[string]any{"_raw_json": string(raw)} +} + +func mergePayload(rawPayload map[string]any, normalized map[string]any) map[string]any { + if len(rawPayload) == 0 && len(normalized) == 0 { + return map[string]any{} + } + out := make(map[string]any, 8) + if len(normalized) > 0 { + out["_normalized"] = normalized + for key, value := range normalized { + if _, exists := out[key]; !exists { + out[key] = value + } + } + } + rawSummary := summarizeRawPayload(rawPayload) + if len(rawSummary) > 0 { + out["_raw"] = rawSummary + for key, value := range rawSummary { + if _, exists := out[key]; !exists { + out[key] = value + } + } + } + return out +} + +func summarizeRawPayload(rawPayload map[string]any) map[string]any { + if len(rawPayload) == 0 { + return map[string]any{} + } + out := map[string]any{"raw_keys": len(rawPayload)} + if hook := shared.FirstPathString(rawPayload, []string{"hook"}); hook != "" { + out["hook"] = hook + } + if typ := shared.FirstPathString(rawPayload, []string{"type"}); typ != "" { + out["type"] = typ + } + if value := core.FirstNonEmpty( + shared.FirstPathString(rawPayload, []string{"hook"}), + shared.FirstPathString(rawPayload, []string{"event"}), + shared.FirstPathString(rawPayload, []string{"type"}), + ); value != "" { + out["event"] = value + } + if value := core.FirstNonEmpty( + shared.FirstPathString(rawPayload, []string{"sessionID"}), + shared.FirstPathString(rawPayload, []string{"session_id"}), + shared.FirstPathString(rawPayload, []string{"input", "sessionID"}), + shared.FirstPathString(rawPayload, []string{"output", "message", "sessionID"}), + ); value != "" { + out["session_id"] = value + } + if value := core.FirstNonEmpty( + shared.FirstPathString(rawPayload, []string{"messageID"}), + shared.FirstPathString(rawPayload, []string{"message_id"}), + shared.FirstPathString(rawPayload, []string{"input", "messageID"}), + shared.FirstPathString(rawPayload, []string{"output", "message", "id"}), + ); value != "" { + out["message_id"] = value + } + if value := core.FirstNonEmpty( + shared.FirstPathString(rawPayload, []string{"toolCallID"}), + shared.FirstPathString(rawPayload, []string{"tool_call_id"}), + shared.FirstPathString(rawPayload, []string{"input", "callID"}), + ); value != "" { + out["tool_call_id"] = value + } + if value := core.FirstNonEmpty( + shared.FirstPathString(rawPayload, []string{"providerID"}), + shared.FirstPathString(rawPayload, []string{"provider_id"}), + shared.FirstPathString(rawPayload, []string{"input", "model", "providerID"}), + shared.FirstPathString(rawPayload, []string{"output", "message", "model", "providerID"}), + ); value != "" { + out["provider_id"] = value + } + if value := core.FirstNonEmpty( + shared.FirstPathString(rawPayload, []string{"modelID"}), + shared.FirstPathString(rawPayload, []string{"model_id"}), + shared.FirstPathString(rawPayload, []string{"input", "model", "modelID"}), + shared.FirstPathString(rawPayload, []string{"output", "message", "model", "modelID"}), + ); value != "" { + out["model_id"] = value + } + if ts := shared.FirstPathString(rawPayload, []string{"timestamp"}, []string{"time"}); ts != "" { + out["timestamp"] = ts + } + return out +} + +func ptrInt64Value(v *int64) any { + if v == nil { + return nil + } + return *v +} + +func parseHookTimestampAny(root map[string]any) time.Time { + if root == nil { + return time.Now().UTC() + } + if ts := shared.FirstPathNumber(root, + []string{"timestamp"}, + []string{"time"}, + []string{"event", "timestamp"}, + []string{"event", "properties", "info", "time", "completed"}, + []string{"event", "properties", "info", "time", "created"}, + ); ts != nil && *ts > 0 { + return hookTimestampOrNow(int64(*ts)) + } + if raw := shared.FirstPathString(root, []string{"timestamp"}, []string{"time"}, []string{"event", "timestamp"}); raw != "" { + if ts, ok := shared.ParseFlexibleTimestamp(raw); ok { + return shared.UnixAuto(ts) + } + } + return time.Now().UTC() +} + +func parseHookTimestamp(root map[string]json.RawMessage) time.Time { + if raw, ok := root["timestamp"]; ok { + var intVal int64 + if err := json.Unmarshal(raw, &intVal); err == nil && intVal > 0 { + return hookTimestampOrNow(intVal) + } + var strVal string + if err := json.Unmarshal(raw, &strVal); err == nil { + if ts, ok := shared.ParseFlexibleTimestamp(strVal); ok { + return shared.UnixAuto(ts) + } + } + } + return time.Now().UTC() +} + +func hookTimestampOrNow(ts int64) time.Time { + if ts <= 0 { + return time.Now().UTC() + } + return shared.UnixAuto(ts) +} + +func ptrInt64FromFloat(v *float64) int64 { + if v == nil { + return 0 + } + return int64(*v) +} diff --git a/internal/providers/opencode/telemetry_sqlite.go b/internal/providers/opencode/telemetry_sqlite.go new file mode 100644 index 0000000..4c1b62f --- /dev/null +++ b/internal/providers/opencode/telemetry_sqlite.go @@ -0,0 +1,600 @@ +package opencode + +import ( + "context" + "database/sql" + "os" + "strings" + + _ "github.com/mattn/go-sqlite3" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +// CollectTelemetryFromSQLite parses OpenCode SQLite data (message + part tables). +func CollectTelemetryFromSQLite(ctx context.Context, dbPath string) ([]shared.TelemetryEvent, error) { + if strings.TrimSpace(dbPath) == "" { + return nil, nil + } + if _, err := os.Stat(dbPath); err != nil { + return nil, nil + } + + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + return nil, err + } + defer db.Close() + + if !sqliteTableExists(ctx, db, "message") { + return nil, nil + } + + partSummaryByMessage := make(map[string]partSummary) + hasPartTable := sqliteTableExists(ctx, db, "part") + if hasPartTable { + partSummaryByMessage, _ = collectPartSummary(ctx, db) + } + + out, seenMessages, err := collectSQLiteMessageEvents(ctx, db, dbPath, partSummaryByMessage, hasPartTable) + if err != nil { + return out, err + } + if !hasPartTable { + return out, nil + } + + return collectSQLiteToolEvents(ctx, db, dbPath, partSummaryByMessage, seenMessages, out) +} + +func collectSQLiteMessageEvents( + ctx context.Context, + db *sql.DB, + dbPath string, + partSummaryByMessage map[string]partSummary, + hasPartTable bool, +) ([]shared.TelemetryEvent, map[string]bool, error) { + var out []shared.TelemetryEvent + seenMessages := map[string]bool{} + + if hasPartTable { + if err := appendSQLiteStepFinishEvents(ctx, db, dbPath, partSummaryByMessage, &out, seenMessages); err != nil { + return out, seenMessages, err + } + } + if err := appendSQLiteMessageTableEvents(ctx, db, dbPath, partSummaryByMessage, &out, seenMessages); err != nil { + return out, seenMessages, err + } + + return out, seenMessages, nil +} + +func appendSQLiteStepFinishEvents( + ctx context.Context, + db *sql.DB, + dbPath string, + partSummaryByMessage map[string]partSummary, + out *[]shared.TelemetryEvent, + seenMessages map[string]bool, +) error { + rows, err := db.QueryContext(ctx, ` + SELECT p.id, p.message_id, p.session_id, p.time_created, p.time_updated, p.data, COALESCE(m.data, '{}'), COALESCE(s.directory, '') + FROM part p + LEFT JOIN message m ON m.id = p.message_id + LEFT JOIN session s ON s.id = p.session_id + WHERE COALESCE(json_extract(p.data, '$.type'), '') = 'step-finish' + ORDER BY p.time_updated ASC + `) + if err != nil { + return nil + } + defer rows.Close() + + for rows.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + + var ( + partID string + messageIDDB string + sessionIDDB string + timeCreated int64 + timeUpdated int64 + partJSON string + messageJSON string + sessionDir string + ) + if err := rows.Scan(&partID, &messageIDDB, &sessionIDDB, &timeCreated, &timeUpdated, &partJSON, &messageJSON, &sessionDir); err != nil { + continue + } + + partPayload := decodeJSONMap([]byte(partJSON)) + messagePayload := decodeJSONMap([]byte(messageJSON)) + u := extractUsage(partPayload) + if !hasUsage(u) { + continue + } + + messageID := core.FirstNonEmpty(strings.TrimSpace(messageIDDB), shared.FirstPathString(messagePayload, []string{"id"}), shared.FirstPathString(messagePayload, []string{"messageID"})) + if messageID == "" || seenMessages[messageID] { + continue + } + + *out = append(*out, buildSQLiteStepFinishEvent( + dbPath, + partID, + messageIDDB, + sessionIDDB, + timeCreated, + timeUpdated, + sessionDir, + partPayload, + messagePayload, + partSummaryByMessage[messageID], + u, + )) + seenMessages[messageID] = true + } + return nil +} + +func buildSQLiteStepFinishEvent( + dbPath, partID, messageIDDB, sessionIDDB string, + timeCreated, timeUpdated int64, + sessionDir string, + partPayload, messagePayload map[string]any, + summary partSummary, + u usage, +) shared.TelemetryEvent { + messageID := core.FirstNonEmpty(strings.TrimSpace(messageIDDB), shared.FirstPathString(messagePayload, []string{"id"}), shared.FirstPathString(messagePayload, []string{"messageID"})) + sessionID := core.FirstNonEmpty(strings.TrimSpace(sessionIDDB), shared.FirstPathString(messagePayload, []string{"sessionID"})) + turnID := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"parentID"}), shared.FirstPathString(messagePayload, []string{"turnID"})) + providerID := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"providerID"}), shared.FirstPathString(messagePayload, []string{"model", "providerID"}), "opencode") + modelRaw := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"modelID"}), shared.FirstPathString(messagePayload, []string{"model", "modelID"})) + + occurredAt := shared.UnixAuto(timeUpdated) + if timeCreated > 0 { + occurredAt = shared.UnixAuto(timeCreated) + } + + return shared.TelemetryEvent{ + SchemaVersion: telemetrySQLiteSchema, + Channel: shared.TelemetryChannelSQLite, + OccurredAt: occurredAt, + WorkspaceID: shared.SanitizeWorkspace(core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"path", "cwd"}), shared.FirstPathString(messagePayload, []string{"path", "root"}), strings.TrimSpace(sessionDir))), + SessionID: sessionID, + TurnID: turnID, + MessageID: messageID, + ProviderID: providerID, + AgentName: core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"agent"}), "opencode"), + EventType: shared.TelemetryEventTypeMessageUsage, + ModelRaw: modelRaw, + TokenUsage: core.TokenUsage{ + InputTokens: u.InputTokens, + OutputTokens: u.OutputTokens, + ReasoningTokens: u.ReasoningTokens, + CacheReadTokens: u.CacheReadTokens, + CacheWriteTokens: u.CacheWriteTokens, + TotalTokens: u.TotalTokens, + CostUSD: u.CostUSD, + Requests: core.Int64Ptr(1), + }, + Status: mapMessageStatus(shared.FirstPathString(partPayload, []string{"reason"})), + Payload: map[string]any{ + "source": map[string]any{ + "db_path": dbPath, + "table": "part", + "type": "step-finish", + }, + "db": map[string]any{ + "part_id": strings.TrimSpace(partID), + "message_id": strings.TrimSpace(messageIDDB), + "session_id": strings.TrimSpace(sessionIDDB), + "time_created": timeCreated, + "time_updated": timeUpdated, + }, + "message": map[string]any{ + "provider_id": providerID, + "model_id": modelRaw, + "mode": shared.FirstPathString(messagePayload, []string{"mode"}), + "finish": shared.FirstPathString(messagePayload, []string{"finish"}), + }, + "step": map[string]any{ + "type": shared.FirstPathString(partPayload, []string{"type"}), + "reason": shared.FirstPathString(partPayload, []string{"reason"}), + }, + "upstream_provider": extractUpstreamProviderFromMaps(partPayload, messagePayload), + "context": contextSummaryFromPartSummary(summary), + }, + } +} + +func appendSQLiteMessageTableEvents( + ctx context.Context, + db *sql.DB, + dbPath string, + partSummaryByMessage map[string]partSummary, + out *[]shared.TelemetryEvent, + seenMessages map[string]bool, +) error { + rows, err := db.QueryContext(ctx, ` + SELECT m.id, m.session_id, m.time_created, m.time_updated, m.data, COALESCE(s.directory, '') + FROM message m + LEFT JOIN session s ON s.id = m.session_id + ORDER BY m.time_updated ASC + `) + if err != nil { + return nil + } + defer rows.Close() + + for rows.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + + var ( + messageIDRaw string + sessionIDRaw string + timeCreated int64 + timeUpdated int64 + messageJSON string + sessionDir string + ) + if err := rows.Scan(&messageIDRaw, &sessionIDRaw, &timeCreated, &timeUpdated, &messageJSON, &sessionDir); err != nil { + continue + } + + payload := decodeJSONMap([]byte(messageJSON)) + if strings.ToLower(shared.FirstPathString(payload, []string{"role"})) != "assistant" { + continue + } + u := extractUsage(payload) + completedAt := ptrInt64FromFloat(shared.FirstPathNumber(payload, []string{"time", "completed"})) + createdAt := ptrInt64FromFloat(shared.FirstPathNumber(payload, []string{"time", "created"})) + if !hasUsage(u) && completedAt <= 0 { + continue + } + + messageID := core.FirstNonEmpty(strings.TrimSpace(messageIDRaw), shared.FirstPathString(payload, []string{"id"}), shared.FirstPathString(payload, []string{"messageID"})) + if messageID == "" || seenMessages[messageID] || !hasUsage(u) { + continue + } + + *out = append(*out, buildSQLiteMessageTableEvent( + dbPath, + messageIDRaw, + sessionIDRaw, + timeCreated, + timeUpdated, + completedAt, + createdAt, + sessionDir, + payload, + partSummaryByMessage[messageID], + u, + )) + seenMessages[messageID] = true + } + return nil +} + +func buildSQLiteMessageTableEvent( + dbPath, messageIDRaw, sessionIDRaw string, + timeCreated, timeUpdated, completedAt, createdAt int64, + sessionDir string, + payload map[string]any, + summary partSummary, + u usage, +) shared.TelemetryEvent { + messageID := core.FirstNonEmpty(strings.TrimSpace(messageIDRaw), shared.FirstPathString(payload, []string{"id"}), shared.FirstPathString(payload, []string{"messageID"})) + providerID := core.FirstNonEmpty(shared.FirstPathString(payload, []string{"providerID"}), shared.FirstPathString(payload, []string{"model", "providerID"}), "opencode") + modelRaw := core.FirstNonEmpty(shared.FirstPathString(payload, []string{"modelID"}), shared.FirstPathString(payload, []string{"model", "modelID"})) + sessionID := core.FirstNonEmpty(strings.TrimSpace(sessionIDRaw), shared.FirstPathString(payload, []string{"sessionID"})) + turnID := core.FirstNonEmpty(shared.FirstPathString(payload, []string{"parentID"}), shared.FirstPathString(payload, []string{"turnID"})) + + occurredAt := shared.UnixAuto(timeUpdated) + switch { + case completedAt > 0: + occurredAt = shared.UnixAuto(completedAt) + case createdAt > 0: + occurredAt = shared.UnixAuto(createdAt) + case timeCreated > 0: + occurredAt = shared.UnixAuto(timeCreated) + } + + return shared.TelemetryEvent{ + SchemaVersion: telemetrySQLiteSchema, + Channel: shared.TelemetryChannelSQLite, + OccurredAt: occurredAt, + WorkspaceID: shared.SanitizeWorkspace(core.FirstNonEmpty(shared.FirstPathString(payload, []string{"path", "cwd"}), shared.FirstPathString(payload, []string{"path", "root"}), strings.TrimSpace(sessionDir))), + SessionID: sessionID, + TurnID: turnID, + MessageID: messageID, + ProviderID: providerID, + AgentName: core.FirstNonEmpty(shared.FirstPathString(payload, []string{"agent"}), "opencode"), + EventType: shared.TelemetryEventTypeMessageUsage, + ModelRaw: modelRaw, + TokenUsage: core.TokenUsage{ + InputTokens: u.InputTokens, + OutputTokens: u.OutputTokens, + ReasoningTokens: u.ReasoningTokens, + CacheReadTokens: u.CacheReadTokens, + CacheWriteTokens: u.CacheWriteTokens, + TotalTokens: u.TotalTokens, + CostUSD: u.CostUSD, + Requests: core.Int64Ptr(1), + }, + Status: finishStatus(shared.FirstPathString(payload, []string{"finish"})), + Payload: sqliteMessagePayload(dbPath, messageIDRaw, sessionIDRaw, timeCreated, timeUpdated, payload, providerID, modelRaw, summary), + } +} + +func finishStatus(finish string) shared.TelemetryStatus { + status := shared.TelemetryStatusOK + finish = strings.ToLower(finish) + if strings.Contains(finish, "error") || strings.Contains(finish, "fail") { + status = shared.TelemetryStatusError + } + if strings.Contains(finish, "abort") || strings.Contains(finish, "cancel") { + status = shared.TelemetryStatusAborted + } + return status +} + +func sqliteMessagePayload( + dbPath, messageIDRaw, sessionIDRaw string, + timeCreated, timeUpdated int64, + payload map[string]any, + providerID, modelRaw string, + summary partSummary, +) map[string]any { + return map[string]any{ + "source": map[string]any{ + "db_path": dbPath, + "table": "message", + }, + "db": map[string]any{ + "message_id": strings.TrimSpace(messageIDRaw), + "session_id": strings.TrimSpace(sessionIDRaw), + "time_created": timeCreated, + "time_updated": timeUpdated, + }, + "message": map[string]any{ + "provider_id": providerID, + "model_id": modelRaw, + "role": shared.FirstPathString(payload, []string{"role"}), + "mode": shared.FirstPathString(payload, []string{"mode"}), + "finish": shared.FirstPathString(payload, []string{"finish"}), + "error_name": shared.FirstPathString(payload, []string{"error", "name"}), + }, + "upstream_provider": extractUpstreamProviderFromMaps(payload), + "context": contextSummaryFromPartSummary(summary), + } +} + +func collectSQLiteToolEvents( + ctx context.Context, + db *sql.DB, + dbPath string, + partSummaryByMessage map[string]partSummary, + seenMessages map[string]bool, + out []shared.TelemetryEvent, +) ([]shared.TelemetryEvent, error) { + _ = partSummaryByMessage + _ = seenMessages + + rows, err := db.QueryContext(ctx, ` + SELECT p.id, p.message_id, p.session_id, p.time_created, p.time_updated, p.data, COALESCE(m.data, '{}'), COALESCE(s.directory, '') + FROM part p + LEFT JOIN message m ON m.id = p.message_id + LEFT JOIN session s ON s.id = p.session_id + WHERE COALESCE(json_extract(p.data, '$.type'), '') = 'tool' + ORDER BY p.time_updated ASC + `) + if err != nil { + return out, nil + } + defer rows.Close() + + seenTools := map[string]bool{} + for rows.Next() { + if ctx.Err() != nil { + return out, ctx.Err() + } + + var ( + partID string + messageIDDB string + sessionIDDB string + timeCreated int64 + timeUpdated int64 + partJSON string + messageJSON string + sessionDir string + ) + if err := rows.Scan(&partID, &messageIDDB, &sessionIDDB, &timeCreated, &timeUpdated, &partJSON, &messageJSON, &sessionDir); err != nil { + continue + } + + partPayload := decodeJSONMap([]byte(partJSON)) + messagePayload := decodeJSONMap([]byte(messageJSON)) + + toolCallID := core.FirstNonEmpty(shared.FirstPathString(partPayload, []string{"callID"}), shared.FirstPathString(partPayload, []string{"call_id"}), strings.TrimSpace(partID)) + if toolCallID == "" || seenTools[toolCallID] { + continue + } + + statusRaw := strings.ToLower(shared.FirstPathString(partPayload, []string{"state", "status"})) + status, include := mapToolStatus(statusRaw) + if !include { + continue + } + seenTools[toolCallID] = true + + out = append(out, buildSQLiteToolEvent( + dbPath, + partID, + messageIDDB, + sessionIDDB, + timeCreated, + timeUpdated, + sessionDir, + partPayload, + messagePayload, + status, + statusRaw, + )) + } + + return out, nil +} + +func buildSQLiteToolEvent( + dbPath, partID, messageIDDB, sessionIDDB string, + timeCreated, timeUpdated int64, + sessionDir string, + partPayload, messagePayload map[string]any, + status shared.TelemetryStatus, + statusRaw string, +) shared.TelemetryEvent { + toolCallID := core.FirstNonEmpty(shared.FirstPathString(partPayload, []string{"callID"}), shared.FirstPathString(partPayload, []string{"call_id"}), strings.TrimSpace(partID)) + toolName := strings.ToLower(core.FirstNonEmpty(shared.FirstPathString(partPayload, []string{"tool"}), shared.FirstPathString(partPayload, []string{"name"}), "unknown")) + sessionID := core.FirstNonEmpty(strings.TrimSpace(sessionIDDB), shared.FirstPathString(partPayload, []string{"sessionID"}), shared.FirstPathString(messagePayload, []string{"sessionID"})) + messageID := core.FirstNonEmpty(strings.TrimSpace(messageIDDB), shared.FirstPathString(partPayload, []string{"messageID"}), shared.FirstPathString(messagePayload, []string{"id"})) + providerID := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"providerID"}), shared.FirstPathString(messagePayload, []string{"model", "providerID"}), "opencode") + modelRaw := core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"modelID"}), shared.FirstPathString(messagePayload, []string{"model", "modelID"})) + + occurredAt := shared.UnixAuto(timeUpdated) + if ts := ptrInt64FromFloat(shared.FirstPathNumber(partPayload, + []string{"state", "time", "end"}, + []string{"state", "time", "start"}, + []string{"time", "end"}, + []string{"time", "start"}, + )); ts > 0 { + occurredAt = shared.UnixAuto(ts) + } else if timeCreated > 0 { + occurredAt = shared.UnixAuto(timeCreated) + } + + return shared.TelemetryEvent{ + SchemaVersion: telemetrySQLiteSchema, + Channel: shared.TelemetryChannelSQLite, + OccurredAt: occurredAt, + WorkspaceID: shared.SanitizeWorkspace(core.FirstNonEmpty( + shared.FirstPathString(messagePayload, []string{"path", "cwd"}), + shared.FirstPathString(messagePayload, []string{"path", "root"}), + strings.TrimSpace(sessionDir), + )), + SessionID: sessionID, + MessageID: messageID, + ToolCallID: toolCallID, + ProviderID: providerID, + AgentName: core.FirstNonEmpty(shared.FirstPathString(messagePayload, []string{"agent"}), "opencode"), + EventType: shared.TelemetryEventTypeToolUsage, + ModelRaw: modelRaw, + ToolName: toolName, + TokenUsage: core.TokenUsage{ + Requests: core.Int64Ptr(1), + }, + Status: status, + Payload: map[string]any{ + "source": map[string]any{ + "db_path": dbPath, + "table": "part", + }, + "db": map[string]any{ + "part_id": strings.TrimSpace(partID), + "message_id": strings.TrimSpace(messageIDDB), + "session_id": strings.TrimSpace(sessionIDDB), + "time_created": timeCreated, + "time_updated": timeUpdated, + }, + "message": map[string]any{ + "provider_id": providerID, + "model_id": modelRaw, + "mode": shared.FirstPathString(messagePayload, []string{"mode"}), + }, + "upstream_provider": extractUpstreamProviderFromMaps(partPayload, messagePayload), + "status": statusRaw, + "file": extractToolFilePath(partPayload), + }, + } +} + +func extractToolFilePath(partPayload map[string]any) string { + if stateInput, ok := partPayload["state"].(map[string]any); ok { + if paths := shared.ExtractFilePathsFromPayload(stateInput); len(paths) > 0 { + return paths[0] + } + } + if paths := shared.ExtractFilePathsFromPayload(partPayload); len(paths) > 0 { + return paths[0] + } + return "" +} + +func contextSummaryFromPartSummary(summary partSummary) map[string]any { + if summary.PartsTotal == 0 && len(summary.PartsByType) == 0 { + return map[string]any{} + } + partsByType := make(map[string]any, len(summary.PartsByType)) + for partType, count := range summary.PartsByType { + partsByType[partType] = count + } + return map[string]any{ + "parts_total": summary.PartsTotal, + "parts_by_type": partsByType, + } +} + +func collectPartSummary(ctx context.Context, db *sql.DB) (map[string]partSummary, error) { + rows, err := db.QueryContext(ctx, ` + SELECT message_id, COALESCE(NULLIF(TRIM(json_extract(data, '$.type')), ''), 'unknown') AS part_type, COUNT(*) + FROM part + GROUP BY message_id, part_type + `) + if err != nil { + return nil, err + } + defer rows.Close() + + out := make(map[string]partSummary) + for rows.Next() { + var ( + messageID string + partType string + count int64 + ) + if err := rows.Scan(&messageID, &partType, &count); err != nil { + continue + } + messageID = strings.TrimSpace(messageID) + if messageID == "" { + continue + } + partType = strings.TrimSpace(partType) + if partType == "" { + partType = "unknown" + } + s := out[messageID] + if s.PartsByType == nil { + s.PartsByType = map[string]int64{} + } + s.PartsTotal += count + s.PartsByType[partType] += count + out[messageID] = s + } + if err := rows.Err(); err != nil { + return out, err + } + return out, nil +} + +func sqliteTableExists(ctx context.Context, db *sql.DB, table string) bool { + var exists int + err := db.QueryRowContext(ctx, `SELECT 1 FROM sqlite_master WHERE type='table' AND name=? LIMIT 1`, strings.TrimSpace(table)).Scan(&exists) + return err == nil && exists == 1 +} diff --git a/internal/providers/zai/monitor_helpers.go b/internal/providers/zai/monitor_helpers.go new file mode 100644 index 0000000..71bfcfd --- /dev/null +++ b/internal/providers/zai/monitor_helpers.go @@ -0,0 +1,175 @@ +package zai + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math" + "net/http" + "net/url" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func resolveAPIBases(acct core.AccountConfig) (codingBase, monitorBase, region string) { + planType := "" + if acct.ExtraData != nil { + planType = strings.TrimSpace(acct.ExtraData["plan_type"]) + } + + isChina := strings.Contains(strings.ToLower(planType), "china") + if acct.BaseURL != "" { + base := strings.TrimRight(acct.BaseURL, "/") + parsed, err := url.Parse(base) + if err == nil && parsed.Scheme != "" && parsed.Host != "" { + root := parsed.Scheme + "://" + parsed.Host + path := strings.TrimRight(parsed.Path, "/") + switch { + case strings.Contains(path, "/api/coding/paas/v4"): + codingBase = root + "/api/coding/paas/v4" + case strings.HasSuffix(path, "/models"): + codingBase = root + strings.TrimSuffix(path, "/models") + case path == "" || path == "/": + codingBase = root + "/api/coding/paas/v4" + default: + codingBase = root + path + } + monitorBase = root + hostLower := strings.ToLower(parsed.Host) + if strings.Contains(hostLower, "bigmodel.cn") { + isChina = true + } + } else { + codingBase = base + monitorBase = strings.TrimSuffix(base, "/api/coding/paas/v4") + monitorBase = strings.TrimSuffix(monitorBase, "/") + } + } + + if codingBase == "" || monitorBase == "" { + if isChina { + codingBase = defaultChinaCodingBaseURL + monitorBase = defaultChinaMonitorBaseURL + } else { + codingBase = defaultGlobalCodingBaseURL + monitorBase = defaultGlobalMonitorBaseURL + } + } + + region = "global" + if isChina || strings.Contains(strings.ToLower(monitorBase), "bigmodel.cn") { + region = "china" + } + return codingBase, monitorBase, region +} + +func doMonitorRequest(ctx context.Context, reqURL, token string, bearer bool, client *http.Client) (int, []byte, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL, nil) + if err != nil { + return 0, nil, fmt.Errorf("creating request: %w", err) + } + + authValue := token + if bearer { + authValue = "Bearer " + token + } + req.Header.Set("Authorization", authValue) + req.Header.Set("Accept-Language", "en-US,en") + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return 0, nil, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return resp.StatusCode, nil, fmt.Errorf("reading response: %w", err) + } + return resp.StatusCode, body, nil +} + +func applyQuotaData(raw json.RawMessage, snap *core.UsageSnapshot, state *providerState) bool { + var payload any + if err := json.Unmarshal(raw, &payload); err != nil { + return false + } + + rows := extractLimitRows(payload) + if len(rows) == 0 { + return false + } + + found := false + for _, row := range rows { + kind := strings.ToUpper(strings.TrimSpace(firstStringFromMap(row, "type", "limitType"))) + percentage, hasPct := parseNumberFromMap(row, "percentage", "usedPercent", "used_percentage") + if hasPct && percentage <= 1 { + percentage *= 100 + } + + switch kind { + case "TOKENS_LIMIT": + if hasPct { + snap.Metrics["usage_five_hour"] = core.Metric{ + Used: core.Float64Ptr(clamp(percentage, 0, 100)), + Limit: core.Float64Ptr(100), + Unit: "%", + Window: "5h", + } + if percentage >= 100 { + state.limited = true + } else if percentage >= 80 { + state.nearLimit = true + } + } + + limit, hasLimit := parseNumberFromMap(row, "usage", "limit", "quota") + current, hasCurrent := parseNumberFromMap(row, "currentValue", "current", "used") + if hasLimit && hasCurrent { + remaining := math.Max(limit-current, 0) + snap.Metrics["tokens_five_hour"] = core.Metric{ + Limit: core.Float64Ptr(limit), + Used: core.Float64Ptr(current), + Remaining: core.Float64Ptr(remaining), + Unit: "tokens", + Window: "5h", + } + } + + if resetRaw := firstAnyFromMap(row, "nextResetTime", "resetTime", "reset_at"); resetRaw != nil { + if reset, ok := parseTimeValue(resetRaw); ok { + snap.Resets["usage_five_hour"] = reset + } + } + found = true + + case "TIME_LIMIT": + limit, hasLimit := parseNumberFromMap(row, "usage", "limit", "quota") + current, hasCurrent := parseNumberFromMap(row, "currentValue", "current", "used") + if hasLimit && hasCurrent { + remaining := math.Max(limit-current, 0) + snap.Metrics["mcp_monthly_usage"] = core.Metric{ + Limit: core.Float64Ptr(limit), + Used: core.Float64Ptr(current), + Remaining: core.Float64Ptr(remaining), + Unit: "calls", + Window: "1mo", + } + found = true + } + if hasPct { + if percentage >= 100 { + state.limited = true + } else if percentage >= 80 { + state.nearLimit = true + } + } + } + } + + return found +} diff --git a/internal/providers/zai/usage_extract.go b/internal/providers/zai/usage_extract.go new file mode 100644 index 0000000..f6d82e8 --- /dev/null +++ b/internal/providers/zai/usage_extract.go @@ -0,0 +1,459 @@ +package zai + +import ( + "encoding/json" + "maps" + "sort" + "strings" + "time" +) + +func extractUsageSamples(raw json.RawMessage, kind string) []usageSample { + if isJSONEmpty(raw) { + return nil + } + + var payload any + if err := json.Unmarshal(raw, &payload); err != nil { + return nil + } + + rows := extractUsageRows(payload) + if len(rows) == 0 { + return nil + } + + samples := make([]usageSample, 0, len(rows)) + for _, row := range rows { + sample := usageSample{ + Date: normalizeDate(firstAnyByPaths(row, + []string{"date"}, + []string{"day"}, + []string{"time"}, + []string{"timestamp"}, + []string{"created_at"}, + []string{"createdAt"}, + []string{"ts"}, + []string{"meta", "date"}, + []string{"meta", "timestamp"}, + )), + } + + if kind == "model" { + sample.Name = firstStringByPaths(row, + []string{"model"}, + []string{"model_id"}, + []string{"modelId"}, + []string{"model_name"}, + []string{"modelName"}, + []string{"name"}, + []string{"model", "id"}, + []string{"model", "name"}, + []string{"model", "modelId"}, + []string{"meta", "model"}, + ) + } else { + sample.Name = firstStringByPaths(row, + []string{"tool"}, + []string{"tool_name"}, + []string{"toolName"}, + []string{"name"}, + []string{"tool_id"}, + []string{"toolId"}, + []string{"tool", "name"}, + []string{"tool", "id"}, + []string{"meta", "tool"}, + ) + } + sample.Client = normalizeUsageDimension(firstStringByPaths(row, + []string{"client"}, + []string{"client_name"}, + []string{"clientName"}, + []string{"application"}, + []string{"app"}, + []string{"sdk"}, + []string{"meta", "client"}, + []string{"client", "name"}, + []string{"context", "client"}, + )) + sample.Source = normalizeUsageDimension(firstStringByPaths(row, + []string{"source"}, + []string{"source_name"}, + []string{"sourceName"}, + []string{"origin"}, + []string{"channel"}, + []string{"meta", "source"}, + []string{"meta", "origin"}, + )) + sample.Provider = normalizeUsageDimension(firstStringByPaths(row, + []string{"provider"}, + []string{"provider_name"}, + []string{"providerName"}, + []string{"upstream_provider"}, + []string{"upstreamProvider"}, + []string{"model", "provider"}, + []string{"model", "provider_name"}, + []string{"route", "provider_name"}, + )) + sample.Interface = normalizeUsageDimension(firstStringByPaths(row, + []string{"interface"}, + []string{"interface_name"}, + []string{"interfaceName"}, + []string{"mode"}, + []string{"client_type"}, + []string{"entrypoint"}, + []string{"meta", "interface"}, + )) + sample.Endpoint = normalizeUsageDimension(firstStringByPaths(row, + []string{"endpoint"}, + []string{"endpoint_name"}, + []string{"endpointName"}, + []string{"route"}, + []string{"path"}, + []string{"meta", "endpoint"}, + )) + sample.Language = normalizeUsageDimension(firstStringByPaths(row, + []string{"language"}, + []string{"language_name"}, + []string{"languageName"}, + []string{"lang"}, + []string{"programming_language"}, + []string{"programmingLanguage"}, + []string{"code_language"}, + []string{"codeLanguage"}, + []string{"input_language"}, + []string{"inputLanguage"}, + []string{"file_language"}, + []string{"meta", "language"}, + )) + bucket := strings.ToLower(strings.TrimSpace(firstStringByPaths(row, []string{"__usage_bucket"}))) + usageKey := normalizeUsageDimension(firstStringByPaths(row, []string{"__usage_key"})) + + if sample.Language == "" && usageKey != "" && strings.Contains(bucket, "language") { + sample.Language = usageKey + } + if sample.Client == "" && usageKey != "" && strings.Contains(bucket, "client") { + sample.Client = usageKey + } + if sample.Source == "" && usageKey != "" && strings.Contains(bucket, "source") { + sample.Source = usageKey + } + if sample.Provider == "" && usageKey != "" && strings.Contains(bucket, "provider") { + sample.Provider = usageKey + } + if sample.Interface == "" && usageKey != "" && strings.Contains(bucket, "interface") { + sample.Interface = usageKey + } + if sample.Endpoint == "" && usageKey != "" && strings.Contains(bucket, "endpoint") { + sample.Endpoint = usageKey + } + if kind == "model" && sample.Name == "" && usageKey != "" && (strings.Contains(bucket, "model") || bucket == "") { + sample.Name = usageKey + } + if kind == "tool" && sample.Name == "" && usageKey != "" && (strings.Contains(bucket, "tool") || bucket == "") { + sample.Name = usageKey + } + + if sample.Source == "" && sample.Client != "" { + sample.Source = sample.Client + } + if sample.Client == "" && sample.Source != "" { + sample.Client = sample.Source + } + + if sample.Provider == "" { + modelProviderHint := normalizeUsageDimension(firstStringByPaths(row, + []string{"model", "provider"}, + []string{"model", "provider_name"}, + []string{"model", "vendor"}, + )) + if modelProviderHint != "" { + sample.Provider = modelProviderHint + } + } + + sample.Requests, _ = firstNumberByPaths(row, + []string{"requests"}, + []string{"request_count"}, + []string{"requestCount"}, + []string{"request_num"}, + []string{"requestNum"}, + []string{"calls"}, + []string{"count"}, + []string{"usageCount"}, + []string{"usage", "requests"}, + []string{"stats", "requests"}, + ) + sample.Input, _ = firstNumberByPaths(row, + []string{"input_tokens"}, + []string{"inputTokens"}, + []string{"input_token_count"}, + []string{"prompt_tokens"}, + []string{"promptTokens"}, + []string{"usage", "input_tokens"}, + []string{"usage", "inputTokens"}, + ) + sample.Output, _ = firstNumberByPaths(row, + []string{"output_tokens"}, + []string{"outputTokens"}, + []string{"completion_tokens"}, + []string{"completionTokens"}, + []string{"usage", "output_tokens"}, + []string{"usage", "outputTokens"}, + ) + sample.Reasoning, _ = firstNumberByPaths(row, + []string{"reasoning_tokens"}, + []string{"reasoningTokens"}, + []string{"thinking_tokens"}, + []string{"thinkingTokens"}, + []string{"usage", "reasoning_tokens"}, + ) + sample.Total, _ = firstNumberByPaths(row, + []string{"total_tokens"}, + []string{"totalTokens"}, + []string{"tokens"}, + []string{"token_count"}, + []string{"tokenCount"}, + []string{"usage", "total_tokens"}, + []string{"usage", "totalTokens"}, + ) + if sample.Total == 0 { + sample.Total = sample.Input + sample.Output + sample.Reasoning + } + sample.CostUSD = parseCostUSD(row) + if kind == "model" && sample.Language == "" { + sample.Language = inferModelUsageLanguage(sample.Name) + } + + if sample.Requests > 0 || sample.Total > 0 || sample.CostUSD > 0 || sample.Name != "" { + samples = append(samples, sample) + } + } + + return samples +} + +func extractUsageRows(v any) []map[string]any { + switch value := v.(type) { + case []any: + rows := mapsFromArray(value) + if len(rows) > 0 { + return rows + } + var nested []map[string]any + for _, item := range value { + nested = append(nested, extractUsageRows(item)...) + } + return nested + case map[string]any: + if looksLikeUsageRow(value) { + return []map[string]any{value} + } + + keys := []string{ + "data", "items", "list", "rows", "records", "usage", + "model_usage", "modelUsage", + "tool_usage", "toolUsage", + "language_usage", "languageUsage", + "client_usage", "clientUsage", + "source_usage", "sourceUsage", + "provider_usage", "providerUsage", + "endpoint_usage", "endpointUsage", + "result", + } + var combined []map[string]any + for _, key := range keys { + if nested, ok := mapValue(value, key); ok { + rows := extractUsageRows(nested) + if len(rows) > 0 { + for _, row := range rows { + tagged := row + if firstStringFromMap(row, "__usage_bucket") == "" { + tagged = cloneStringAnyMap(row) + tagged["__usage_bucket"] = key + } + combined = append(combined, tagged) + } + } + } + } + if len(combined) > 0 { + return combined + } + + mapKeys := make([]string, 0, len(value)) + for key := range value { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var all []map[string]any + for _, key := range mapKeys { + nested := value[key] + rows := extractUsageRows(nested) + if len(rows) > 0 { + for _, row := range rows { + tagged := row + if firstStringFromMap(row, "__usage_key") == "" { + tagged = cloneStringAnyMap(row) + tagged["__usage_key"] = key + } + all = append(all, tagged) + } + continue + } + if numeric, ok := parseFloat(nested); ok { + all = append(all, map[string]any{ + "requests": numeric, + "__usage_key": key, + }) + } + } + return all + default: + return nil + } +} + +func extractLimitRows(v any) []map[string]any { + switch value := v.(type) { + case []any: + return mapsFromArray(value) + case map[string]any: + if _, ok := value["type"]; ok { + return []map[string]any{value} + } + for _, key := range []string{"limits", "items", "data"} { + if nested, ok := value[key]; ok { + rows := extractLimitRows(nested) + if len(rows) > 0 { + return rows + } + } + } + var all []map[string]any + for _, nested := range value { + rows := extractLimitRows(nested) + all = append(all, rows...) + } + return all + default: + return nil + } +} + +func extractCreditGrantRows(v any) []map[string]any { + switch value := v.(type) { + case []any: + var rows []map[string]any + for _, item := range value { + row, ok := item.(map[string]any) + if !ok { + continue + } + if looksLikeCreditGrantRow(row) { + rows = append(rows, row) + continue + } + rows = append(rows, extractCreditGrantRows(row)...) + } + return rows + case map[string]any: + if looksLikeCreditGrantRow(value) { + return []map[string]any{value} + } + + var rows []map[string]any + for _, key := range []string{"credit_grants", "creditGrants", "grants", "items", "list", "data"} { + nested, ok := mapValue(value, key) + if !ok { + continue + } + rows = append(rows, extractCreditGrantRows(nested)...) + } + if len(rows) > 0 { + return rows + } + + keys := make([]string, 0, len(value)) + for key := range value { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + rows = append(rows, extractCreditGrantRows(value[key])...) + } + return rows + default: + return nil + } +} + +func looksLikeCreditGrantRow(row map[string]any) bool { + if row == nil { + return false + } + _, hasAmount := parseNumberFromMap(row, + "grant_amount", "grantAmount", + "total_granted", "totalGranted", + "amount", "total_amount", "totalAmount") + _, hasUsed := parseNumberFromMap(row, + "used_amount", "usedAmount", + "used", "usage", "spent") + _, hasAvailable := parseNumberFromMap(row, + "available_amount", "availableAmount", + "remaining_amount", "remainingAmount", + "remaining_balance", "remainingBalance", + "available_balance", "availableBalance", + "available", "remaining") + return hasAmount || hasUsed || hasAvailable +} + +func parseCreditGrantExpiry(row map[string]any) (time.Time, bool) { + raw := firstAnyFromMap(row, + "expires_at", "expiresAt", "expiry_time", "expiryTime", + "expire_at", "expireAt", "expiration_time", "expirationTime") + if raw == nil { + return time.Time{}, false + } + return parseTimeValue(raw) +} + +func mapsFromArray(values []any) []map[string]any { + rows := make([]map[string]any, 0, len(values)) + for _, item := range values { + row, ok := item.(map[string]any) + if !ok { + continue + } + rows = append(rows, row) + } + return rows +} + +func cloneStringAnyMap(in map[string]any) map[string]any { + return maps.Clone(in) +} + +func looksLikeUsageRow(row map[string]any) bool { + if row == nil { + return false + } + hasName := firstStringByPaths(row, + []string{"model"}, + []string{"model_id"}, + []string{"modelName"}, + []string{"tool"}, + []string{"tool_name"}, + []string{"name"}, + []string{"model", "name"}, + []string{"tool", "name"}, + ) != "" + if hasName { + return true + } + _, hasReq := firstNumberByPaths(row, []string{"requests"}, []string{"request_count"}, []string{"calls"}, []string{"count"}, []string{"usage", "requests"}) + _, hasTokens := firstNumberByPaths(row, []string{"total_tokens"}, []string{"tokens"}, []string{"input_tokens"}, []string{"output_tokens"}, []string{"usage", "total_tokens"}) + _, hasCost := firstNumberByPaths(row, []string{"cost"}, []string{"total_cost"}, []string{"cost_usd"}, []string{"total_cost_usd"}, []string{"usage", "cost_usd"}) + return hasReq || hasTokens || hasCost +} diff --git a/internal/providers/zai/usage_helpers.go b/internal/providers/zai/usage_helpers.go new file mode 100644 index 0000000..852f195 --- /dev/null +++ b/internal/providers/zai/usage_helpers.go @@ -0,0 +1,744 @@ +package zai + +import ( + "encoding/json" + "fmt" + "math" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func captureEndpointPayload(snap *core.UsageSnapshot, endpoint string, body []byte) { + if snap == nil { + return + } + endpointSlug := sanitizeMetricSlug(endpoint) + if endpointSlug == "" { + endpointSlug = "unknown" + } + prefix := "api_" + endpointSlug + + if len(body) == 0 { + return + } + setUsedMetric(snap, prefix+"_payload_bytes", float64(len(body)), "bytes", "current") + + var payload any + if err := json.Unmarshal(body, &payload); err != nil { + snap.Raw[prefix+"_parse"] = "non_json" + return + } + snap.Raw[prefix+"_parse"] = "json" + + numericByPath := make(map[string]*payloadNumericStat) + leafCount := 0 + objectCount := 0 + arrayCount := 0 + walkPayloadStats("", payload, numericByPath, &leafCount, &objectCount, &arrayCount) + + setUsedMetric(snap, prefix+"_field_count", float64(leafCount), "fields", "current") + setUsedMetric(snap, prefix+"_object_nodes", float64(objectCount), "objects", "current") + setUsedMetric(snap, prefix+"_array_nodes", float64(arrayCount), "arrays", "current") + setUsedMetric(snap, prefix+"_numeric_count", float64(len(numericByPath)), "fields", "current") + + type numericEntry struct { + path string + stat *payloadNumericStat + } + entries := make([]numericEntry, 0, len(numericByPath)) + for path, stat := range numericByPath { + if stat == nil { + continue + } + entries = append(entries, numericEntry{path: path, stat: stat}) + } + sort.Slice(entries, func(i, j int) bool { + left := math.Abs(entries[i].stat.Sum) + right := math.Abs(entries[j].stat.Sum) + if left != right { + return left > right + } + return entries[i].path < entries[j].path + }) + + if len(entries) > 0 { + top := entries + if len(top) > 8 { + top = top[:8] + } + parts := make([]string, 0, len(top)) + for _, entry := range top { + value := entry.stat.Last + if entry.stat.Count > 1 { + value = entry.stat.Sum + } + path := strings.TrimSpace(entry.path) + if path == "" { + path = "root" + } + parts = append(parts, fmt.Sprintf("%s=%s", path, formatPayloadValue(value))) + } + snap.Raw[prefix+"_numeric_top"] = strings.Join(parts, ", ") + } + + sort.Slice(entries, func(i, j int) bool { + return entries[i].path < entries[j].path + }) + emitted := 0 + maxDynamicMetrics := 96 + for _, entry := range entries { + if emitted >= maxDynamicMetrics { + break + } + pathSlug := sanitizeMetricSlug(strings.Trim(entry.path, "._")) + if pathSlug == "" { + pathSlug = "root" + } + metricKey := prefix + "_" + pathSlug + if _, exists := snap.Metrics[metricKey]; exists { + continue + } + value := entry.stat.Last + if entry.stat.Count > 1 { + value = entry.stat.Sum + } + setUsedMetric(snap, metricKey, value, "value", "current") + emitted++ + } + if len(entries) > emitted { + snap.Raw[prefix+"_numeric_omitted"] = strconv.Itoa(len(entries) - emitted) + } +} + +func walkPayloadStats(path string, v any, numericByPath map[string]*payloadNumericStat, leafCount, objectCount, arrayCount *int) { + switch value := v.(type) { + case map[string]any: + if objectCount != nil { + *objectCount = *objectCount + 1 + } + keys := make([]string, 0, len(value)) + for key := range value { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + next := appendPayloadPath(path, key) + walkPayloadStats(next, value[key], numericByPath, leafCount, objectCount, arrayCount) + } + case []any: + if arrayCount != nil { + *arrayCount = *arrayCount + 1 + } + next := appendPayloadPath(path, "items") + for _, item := range value { + walkPayloadStats(next, item, numericByPath, leafCount, objectCount, arrayCount) + } + default: + if leafCount != nil { + *leafCount = *leafCount + 1 + } + if numericByPath == nil { + return + } + numeric, ok := parseFloat(v) + if !ok { + return + } + key := strings.TrimSpace(path) + if key == "" { + key = "root" + } + stat := numericByPath[key] + if stat == nil { + stat = &payloadNumericStat{Min: numeric, Max: numeric} + numericByPath[key] = stat + } + stat.Count++ + stat.Sum += numeric + stat.Last = numeric + if numeric < stat.Min { + stat.Min = numeric + } + if numeric > stat.Max { + stat.Max = numeric + } + } +} + +func appendPayloadPath(path, segment string) string { + path = strings.TrimSpace(path) + segment = strings.TrimSpace(segment) + if segment == "" { + return path + } + if path == "" { + return segment + } + return path + "." + segment +} + +func formatPayloadValue(v float64) string { + return strconv.FormatFloat(v, 'f', -1, 64) +} + +func applyUsageRange(reqURL string) (string, error) { + parsed, err := url.Parse(reqURL) + if err != nil { + return "", err + } + start, end := usageWindow() + q := parsed.Query() + q.Set("startTime", start) + q.Set("endTime", end) + parsed.RawQuery = q.Encode() + return parsed.String(), nil +} + +func usageWindow() (start, end string) { + now := time.Now().UTC() + startTime := time.Date(now.Year(), now.Month(), now.Day()-6, 0, 0, 0, 0, time.UTC) + endTime := time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, 0, time.UTC) + return startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05") +} + +func joinURL(base, endpoint string) string { + trimmedBase := strings.TrimRight(base, "/") + trimmedEndpoint := strings.TrimLeft(endpoint, "/") + return trimmedBase + "/" + trimmedEndpoint +} + +func parseAPIError(body []byte) (code, msg string) { + var payload struct { + Code any `json:"code"` + Msg string `json:"msg"` + Message string `json:"message"` + Error *apiError `json:"error"` + } + if err := json.Unmarshal(body, &payload); err != nil { + return "", "" + } + + if payload.Error != nil { + if payload.Error.Message != "" { + msg = payload.Error.Message + } + if payload.Error.Code != nil { + code = anyToString(payload.Error.Code) + } + } + if code == "" && payload.Code != nil { + code = anyToString(payload.Code) + } + if msg == "" { + msg = core.FirstNonEmpty(payload.Message, payload.Msg) + } + return code, msg +} + +func parseCostUSD(row map[string]any) float64 { + if cents, ok := firstNumberByPaths(row, + []string{"cost_cents"}, + []string{"costCents"}, + []string{"total_cost_cents"}, + []string{"totalCostCents"}, + []string{"usage", "cost_cents"}, + ); ok { + return cents / 100 + } + + if micros, ok := firstNumberByPaths(row, + []string{"cost_micros"}, + []string{"costMicros"}, + []string{"total_cost_micros"}, + []string{"totalCostMicros"}, + ); ok { + return micros / 1_000_000 + } + + value, ok := firstNumberByPaths(row, + []string{"cost_usd"}, + []string{"costUSD"}, + []string{"total_cost_usd"}, + []string{"totalCostUSD"}, + []string{"total_cost"}, + []string{"totalCost"}, + []string{"api_cost"}, + []string{"apiCost"}, + []string{"cost"}, + []string{"amount"}, + []string{"total_amount"}, + []string{"totalAmount"}, + []string{"usage", "cost_usd"}, + []string{"usage", "costUSD"}, + []string{"usage", "cost"}, + ) + if ok { + return value + } + return 0 +} + +func parseNumberFromMap(row map[string]any, keys ...string) (float64, bool) { + value, _, ok := firstNumberWithKey(row, keys...) + return value, ok +} + +func firstNumberWithKey(row map[string]any, keys ...string) (float64, string, bool) { + for _, key := range keys { + raw, ok := mapValue(row, key) + if !ok { + continue + } + if parsed, ok := parseFloat(raw); ok { + return parsed, key, true + } + } + return 0, "", false +} + +func parseFloat(v any) (float64, bool) { + switch value := v.(type) { + case float64: + return value, true + case float32: + return float64(value), true + case int: + return float64(value), true + case int64: + return float64(value), true + case int32: + return float64(value), true + case int16: + return float64(value), true + case int8: + return float64(value), true + case uint: + return float64(value), true + case uint64: + return float64(value), true + case uint32: + return float64(value), true + case uint16: + return float64(value), true + case uint8: + return float64(value), true + case json.Number: + parsed, err := value.Float64() + return parsed, err == nil + case string: + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return 0, false + } + parsed, err := strconv.ParseFloat(trimmed, 64) + if err != nil { + return 0, false + } + return parsed, true + default: + return 0, false + } +} + +func firstStringFromMap(row map[string]any, keys ...string) string { + for _, key := range keys { + raw, ok := mapValue(row, key) + if !ok || raw == nil { + continue + } + str := strings.TrimSpace(anyToString(raw)) + if str != "" { + return str + } + } + return "" +} + +func firstAnyFromMap(row map[string]any, keys ...string) any { + for _, key := range keys { + if raw, ok := mapValue(row, key); ok { + return raw + } + } + return nil +} + +func mapValue(row map[string]any, key string) (any, bool) { + if row == nil { + return nil, false + } + if raw, ok := row[key]; ok { + return raw, true + } + for candidate, raw := range row { + if strings.EqualFold(candidate, key) { + return raw, true + } + } + return nil, false +} + +func valueAtPath(row map[string]any, path []string) (any, bool) { + if len(path) == 0 { + return nil, false + } + + var current any = row + for _, segment := range path { + node, ok := current.(map[string]any) + if !ok { + return nil, false + } + next, ok := mapValue(node, segment) + if !ok { + return nil, false + } + current = next + } + return current, true +} + +func firstAnyByPaths(row map[string]any, paths ...[]string) any { + for _, path := range paths { + if raw, ok := valueAtPath(row, path); ok { + return raw + } + } + return nil +} + +func firstStringByPaths(row map[string]any, paths ...[]string) string { + for _, path := range paths { + raw, ok := valueAtPath(row, path) + if !ok || raw == nil { + continue + } + text := strings.TrimSpace(anyToString(raw)) + if text != "" { + return text + } + } + return "" +} + +func firstNumberByPaths(row map[string]any, paths ...[]string) (float64, bool) { + for _, path := range paths { + raw, ok := valueAtPath(row, path) + if !ok { + continue + } + if parsed, ok := parseFloat(raw); ok { + return parsed, true + } + } + return 0, false +} + +func normalizeUsageDimension(raw string) string { + value := strings.TrimSpace(raw) + value = strings.Trim(value, "\"'") + if value == "" { + return "" + } + switch strings.ToLower(value) { + case "null", "nil", "n/a", "na", "unknown": + return "" + default: + return value + } +} + +func accumulateRollupValues(acc *usageRollup, sample usageSample) { + if acc == nil { + return + } + acc.Requests += sample.Requests + acc.Input += sample.Input + acc.Output += sample.Output + acc.Reasoning += sample.Reasoning + acc.Total += sample.Total + acc.CostUSD += sample.CostUSD +} + +func accumulateUsageRollup(target map[string]*usageRollup, key string, sample usageSample) { + key = strings.TrimSpace(key) + if key == "" { + return + } + acc, ok := target[key] + if !ok { + acc = &usageRollup{} + target[key] = acc + } + accumulateRollupValues(acc, sample) +} + +func sortedUsageRollupKeys(values map[string]*usageRollup) []string { + keys := make([]string, 0, len(values)) + for key := range values { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func summarizeShareUsage(values map[string]float64, maxItems int) string { + type item struct { + name string + value float64 + } + var ( + list []item + total float64 + ) + for name, value := range values { + if value <= 0 { + continue + } + list = append(list, item{name: name, value: value}) + total += value + } + if len(list) == 0 || total <= 0 { + return "" + } + sort.Slice(list, func(i, j int) bool { + if list[i].value != list[j].value { + return list[i].value > list[j].value + } + return list[i].name < list[j].name + }) + if maxItems > 0 && len(list) > maxItems { + list = list[:maxItems] + } + parts := make([]string, 0, len(list)) + for _, entry := range list { + parts = append(parts, fmt.Sprintf("%s: %.0f%%", normalizeUsageLabel(entry.name), entry.value/total*100)) + } + return strings.Join(parts, ", ") +} + +func summarizeCountUsage(values map[string]float64, unit string, maxItems int) string { + type item struct { + name string + value float64 + } + var list []item + for name, value := range values { + if value <= 0 { + continue + } + list = append(list, item{name: name, value: value}) + } + if len(list) == 0 { + return "" + } + sort.Slice(list, func(i, j int) bool { + if list[i].value != list[j].value { + return list[i].value > list[j].value + } + return list[i].name < list[j].name + }) + if maxItems > 0 && len(list) > maxItems { + list = list[:maxItems] + } + parts := make([]string, 0, len(list)) + for _, entry := range list { + parts = append(parts, fmt.Sprintf("%s: %.0f %s", normalizeUsageLabel(entry.name), entry.value, unit)) + } + return strings.Join(parts, ", ") +} + +func normalizeUsageLabel(value string) string { + value = strings.TrimSpace(value) + if value == "" { + return "unknown" + } + replacer := strings.NewReplacer("_", " ", "-", " ") + return replacer.Replace(value) +} + +func inferModelUsageLanguage(model string) string { + model = strings.ToLower(strings.TrimSpace(model)) + if model == "" { + return "" + } + switch { + case strings.Contains(model, "coder"), strings.Contains(model, "code"), strings.Contains(model, "codestral"), strings.Contains(model, "devstral"): + return "code" + case strings.Contains(model, "vision"), strings.Contains(model, "image"), strings.Contains(model, "multimodal"), strings.Contains(model, "omni"), strings.Contains(model, "vl"): + return "multimodal" + case strings.Contains(model, "audio"), strings.Contains(model, "speech"), strings.Contains(model, "voice"), strings.Contains(model, "whisper"), strings.Contains(model, "tts"), strings.Contains(model, "stt"): + return "audio" + case strings.Contains(model, "reason"), strings.Contains(model, "thinking"): + return "reasoning" + default: + return "general" + } +} + +func anyToString(v any) string { + switch value := v.(type) { + case string: + return strings.TrimSpace(value) + case json.Number: + return value.String() + case float64: + if math.Mod(value, 1) == 0 { + return strconv.FormatInt(int64(value), 10) + } + return strconv.FormatFloat(value, 'f', -1, 64) + case float32: + return strconv.FormatFloat(float64(value), 'f', -1, 32) + case int: + return strconv.Itoa(value) + case int64: + return strconv.FormatInt(value, 10) + case int32: + return strconv.FormatInt(int64(value), 10) + case uint: + return strconv.FormatUint(uint64(value), 10) + case uint64: + return strconv.FormatUint(value, 10) + case bool: + return strconv.FormatBool(value) + default: + return strings.TrimSpace(fmt.Sprint(value)) + } +} + +func normalizeDate(raw any) string { + if raw == nil { + return "" + } + + if ts, ok := parseTimeValue(raw); ok { + return ts.UTC().Format("2006-01-02") + } + + value := strings.TrimSpace(anyToString(raw)) + if value == "" { + return "" + } + if len(value) >= 10 { + candidate := value[:10] + if _, err := time.Parse("2006-01-02", candidate); err == nil { + return candidate + } + } + return "" +} + +func parseTimeValue(raw any) (time.Time, bool) { + if raw == nil { + return time.Time{}, false + } + + if n, ok := parseFloat(raw); ok { + if n <= 0 { + return time.Time{}, false + } + sec := int64(n) + if n > 1e12 { + sec = int64(n / 1000) + } + return time.Unix(sec, 0).UTC(), true + } + + value := strings.TrimSpace(anyToString(raw)) + if value == "" { + return time.Time{}, false + } + + for _, layout := range []string{ + time.RFC3339, + "2006-01-02 15:04:05", + "2006-01-02T15:04:05Z07:00", + "2006-01-02", + } { + if parsed, err := time.Parse(layout, value); err == nil { + return parsed.UTC(), true + } + } + + if n, err := strconv.ParseInt(value, 10, 64); err == nil { + if n > 1e12 { + return time.Unix(n/1000, 0).UTC(), true + } + return time.Unix(n, 0).UTC(), true + } + + return time.Time{}, false +} + +func isJSONEmpty(raw json.RawMessage) bool { + trimmed := strings.TrimSpace(string(raw)) + return trimmed == "" || trimmed == "null" || trimmed == "{}" || trimmed == "[]" +} + +func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { + if key == "" || value <= 0 { + return + } + snap.Metrics[key] = core.Metric{ + Used: core.Float64Ptr(value), + Unit: unit, + Window: window, + } +} + +func sanitizeMetricSlug(value string) string { + trimmed := strings.TrimSpace(strings.ToLower(value)) + if trimmed == "" { + return "unknown" + } + + var b strings.Builder + lastUnderscore := false + for _, r := range trimmed { + switch { + case (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9'): + b.WriteRune(r) + lastUnderscore = false + case r == '-' || r == '_': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore { + b.WriteRune('_') + lastUnderscore = true + } + } + } + slug := strings.Trim(b.String(), "_") + if slug == "" { + return "unknown" + } + return slug +} + +func clamp(value, minVal, maxVal float64) float64 { + return math.Min(math.Max(value, minVal), maxVal) +} + +func apiErrorMessage(err *apiError) string { + if err == nil { + return "" + } + return strings.TrimSpace(err.Message) +} + +func isNoPackageCode(code, msg string) bool { + code = strings.TrimSpace(code) + if code == "1113" { + return true + } + lowerMsg := strings.ToLower(strings.TrimSpace(msg)) + return strings.Contains(lowerMsg, "insufficient balance") || + strings.Contains(lowerMsg, "no resource package") || + strings.Contains(lowerMsg, "no active coding package") +} diff --git a/internal/providers/zai/zai.go b/internal/providers/zai/zai.go index 64c8913..f4e7fa0 100644 --- a/internal/providers/zai/zai.go +++ b/internal/providers/zai/zai.go @@ -5,10 +5,8 @@ import ( "encoding/json" "fmt" "io" - "maps" "math" "net/http" - "net/url" "sort" "strconv" "strings" @@ -726,167 +724,6 @@ func (p *Provider) finalizeStatusAndMessage(snap *core.UsageSnapshot, state *pro snap.Message = "OK" } -func resolveAPIBases(acct core.AccountConfig) (codingBase, monitorBase, region string) { - planType := "" - if acct.ExtraData != nil { - planType = strings.TrimSpace(acct.ExtraData["plan_type"]) - } - - isChina := strings.Contains(strings.ToLower(planType), "china") - if acct.BaseURL != "" { - base := strings.TrimRight(acct.BaseURL, "/") - parsed, err := url.Parse(base) - if err == nil && parsed.Scheme != "" && parsed.Host != "" { - root := parsed.Scheme + "://" + parsed.Host - path := strings.TrimRight(parsed.Path, "/") - switch { - case strings.Contains(path, "/api/coding/paas/v4"): - codingBase = root + "/api/coding/paas/v4" - case strings.HasSuffix(path, "/models"): - codingBase = root + strings.TrimSuffix(path, "/models") - case path == "" || path == "/": - codingBase = root + "/api/coding/paas/v4" - default: - codingBase = root + path - } - monitorBase = root - hostLower := strings.ToLower(parsed.Host) - if strings.Contains(hostLower, "bigmodel.cn") { - isChina = true - } - } else { - codingBase = base - monitorBase = strings.TrimSuffix(base, "/api/coding/paas/v4") - monitorBase = strings.TrimSuffix(monitorBase, "/") - } - } - - if codingBase == "" || monitorBase == "" { - if isChina { - codingBase = defaultChinaCodingBaseURL - monitorBase = defaultChinaMonitorBaseURL - } else { - codingBase = defaultGlobalCodingBaseURL - monitorBase = defaultGlobalMonitorBaseURL - } - } - - region = "global" - if isChina || strings.Contains(strings.ToLower(monitorBase), "bigmodel.cn") { - region = "china" - } - return codingBase, monitorBase, region -} - -func doMonitorRequest(ctx context.Context, reqURL, token string, bearer bool, client *http.Client) (int, []byte, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL, nil) - if err != nil { - return 0, nil, fmt.Errorf("creating request: %w", err) - } - - authValue := token - if bearer { - authValue = "Bearer " + token - } - req.Header.Set("Authorization", authValue) - req.Header.Set("Accept-Language", "en-US,en") - req.Header.Set("Content-Type", "application/json") - - resp, err := client.Do(req) - if err != nil { - return 0, nil, fmt.Errorf("request failed: %w", err) - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return resp.StatusCode, nil, fmt.Errorf("reading response: %w", err) - } - return resp.StatusCode, body, nil -} - -func applyQuotaData(raw json.RawMessage, snap *core.UsageSnapshot, state *providerState) bool { - var payload any - if err := json.Unmarshal(raw, &payload); err != nil { - return false - } - - rows := extractLimitRows(payload) - if len(rows) == 0 { - return false - } - - found := false - for _, row := range rows { - kind := strings.ToUpper(strings.TrimSpace(firstStringFromMap(row, "type", "limitType"))) - percentage, hasPct := parseNumberFromMap(row, "percentage", "usedPercent", "used_percentage") - if hasPct && percentage <= 1 { - percentage *= 100 - } - - switch kind { - case "TOKENS_LIMIT": - if hasPct { - snap.Metrics["usage_five_hour"] = core.Metric{ - Used: core.Float64Ptr(clamp(percentage, 0, 100)), - Limit: core.Float64Ptr(100), - Unit: "%", - Window: "5h", - } - if percentage >= 100 { - state.limited = true - } else if percentage >= 80 { - state.nearLimit = true - } - } - - limit, hasLimit := parseNumberFromMap(row, "usage", "limit", "quota") - current, hasCurrent := parseNumberFromMap(row, "currentValue", "current", "used") - if hasLimit && hasCurrent { - remaining := math.Max(limit-current, 0) - snap.Metrics["tokens_five_hour"] = core.Metric{ - Limit: core.Float64Ptr(limit), - Used: core.Float64Ptr(current), - Remaining: core.Float64Ptr(remaining), - Unit: "tokens", - Window: "5h", - } - } - - if resetRaw := firstAnyFromMap(row, "nextResetTime", "resetTime", "reset_at"); resetRaw != nil { - if reset, ok := parseTimeValue(resetRaw); ok { - snap.Resets["usage_five_hour"] = reset - } - } - found = true - - case "TIME_LIMIT": - limit, hasLimit := parseNumberFromMap(row, "usage", "limit", "quota") - current, hasCurrent := parseNumberFromMap(row, "currentValue", "current", "used") - if hasLimit && hasCurrent { - remaining := math.Max(limit-current, 0) - snap.Metrics["mcp_monthly_usage"] = core.Metric{ - Limit: core.Float64Ptr(limit), - Used: core.Float64Ptr(current), - Remaining: core.Float64Ptr(remaining), - Unit: "calls", - Window: "1mo", - } - found = true - } - if hasPct { - if percentage >= 100 { - state.limited = true - } else if percentage >= 80 { - state.nearLimit = true - } - } - } - } - - return found -} - func applyModelUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { today := time.Now().UTC().Format("2006-01-02") hasNamedModelRows := false @@ -1301,1183 +1138,3 @@ func applyToolUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { snap.Raw["tool_usage"] = summary } } - -func extractUsageSamples(raw json.RawMessage, kind string) []usageSample { - if isJSONEmpty(raw) { - return nil - } - - var payload any - if err := json.Unmarshal(raw, &payload); err != nil { - return nil - } - - rows := extractUsageRows(payload) - if len(rows) == 0 { - return nil - } - - samples := make([]usageSample, 0, len(rows)) - for _, row := range rows { - sample := usageSample{ - Date: normalizeDate(firstAnyByPaths(row, - []string{"date"}, - []string{"day"}, - []string{"time"}, - []string{"timestamp"}, - []string{"created_at"}, - []string{"createdAt"}, - []string{"ts"}, - []string{"meta", "date"}, - []string{"meta", "timestamp"}, - )), - } - - if kind == "model" { - sample.Name = firstStringByPaths(row, - []string{"model"}, - []string{"model_id"}, - []string{"modelId"}, - []string{"model_name"}, - []string{"modelName"}, - []string{"name"}, - []string{"model", "id"}, - []string{"model", "name"}, - []string{"model", "modelId"}, - []string{"meta", "model"}, - ) - } else { - sample.Name = firstStringByPaths(row, - []string{"tool"}, - []string{"tool_name"}, - []string{"toolName"}, - []string{"name"}, - []string{"tool_id"}, - []string{"toolId"}, - []string{"tool", "name"}, - []string{"tool", "id"}, - []string{"meta", "tool"}, - ) - } - sample.Client = normalizeUsageDimension(firstStringByPaths(row, - []string{"client"}, - []string{"client_name"}, - []string{"clientName"}, - []string{"application"}, - []string{"app"}, - []string{"sdk"}, - []string{"meta", "client"}, - []string{"client", "name"}, - []string{"context", "client"}, - )) - sample.Source = normalizeUsageDimension(firstStringByPaths(row, - []string{"source"}, - []string{"source_name"}, - []string{"sourceName"}, - []string{"origin"}, - []string{"channel"}, - []string{"meta", "source"}, - []string{"meta", "origin"}, - )) - sample.Provider = normalizeUsageDimension(firstStringByPaths(row, - []string{"provider"}, - []string{"provider_name"}, - []string{"providerName"}, - []string{"upstream_provider"}, - []string{"upstreamProvider"}, - []string{"model", "provider"}, - []string{"model", "provider_name"}, - []string{"route", "provider_name"}, - )) - sample.Interface = normalizeUsageDimension(firstStringByPaths(row, - []string{"interface"}, - []string{"interface_name"}, - []string{"interfaceName"}, - []string{"mode"}, - []string{"client_type"}, - []string{"entrypoint"}, - []string{"meta", "interface"}, - )) - sample.Endpoint = normalizeUsageDimension(firstStringByPaths(row, - []string{"endpoint"}, - []string{"endpoint_name"}, - []string{"endpointName"}, - []string{"route"}, - []string{"path"}, - []string{"meta", "endpoint"}, - )) - sample.Language = normalizeUsageDimension(firstStringByPaths(row, - []string{"language"}, - []string{"language_name"}, - []string{"languageName"}, - []string{"lang"}, - []string{"programming_language"}, - []string{"programmingLanguage"}, - []string{"code_language"}, - []string{"codeLanguage"}, - []string{"input_language"}, - []string{"inputLanguage"}, - []string{"file_language"}, - []string{"meta", "language"}, - )) - bucket := strings.ToLower(strings.TrimSpace(firstStringByPaths(row, []string{"__usage_bucket"}))) - usageKey := normalizeUsageDimension(firstStringByPaths(row, []string{"__usage_key"})) - - if sample.Language == "" && usageKey != "" && strings.Contains(bucket, "language") { - sample.Language = usageKey - } - if sample.Client == "" && usageKey != "" && strings.Contains(bucket, "client") { - sample.Client = usageKey - } - if sample.Source == "" && usageKey != "" && strings.Contains(bucket, "source") { - sample.Source = usageKey - } - if sample.Provider == "" && usageKey != "" && strings.Contains(bucket, "provider") { - sample.Provider = usageKey - } - if sample.Interface == "" && usageKey != "" && strings.Contains(bucket, "interface") { - sample.Interface = usageKey - } - if sample.Endpoint == "" && usageKey != "" && strings.Contains(bucket, "endpoint") { - sample.Endpoint = usageKey - } - if kind == "model" && sample.Name == "" && usageKey != "" && (strings.Contains(bucket, "model") || bucket == "") { - sample.Name = usageKey - } - if kind == "tool" && sample.Name == "" && usageKey != "" && (strings.Contains(bucket, "tool") || bucket == "") { - sample.Name = usageKey - } - - if sample.Source == "" && sample.Client != "" { - sample.Source = sample.Client - } - if sample.Client == "" && sample.Source != "" { - sample.Client = sample.Source - } - - if sample.Provider == "" { - modelProviderHint := normalizeUsageDimension(firstStringByPaths(row, - []string{"model", "provider"}, - []string{"model", "provider_name"}, - []string{"model", "vendor"}, - )) - if modelProviderHint != "" { - sample.Provider = modelProviderHint - } - } - - sample.Requests, _ = firstNumberByPaths(row, - []string{"requests"}, - []string{"request_count"}, - []string{"requestCount"}, - []string{"request_num"}, - []string{"requestNum"}, - []string{"calls"}, - []string{"count"}, - []string{"usageCount"}, - []string{"usage", "requests"}, - []string{"stats", "requests"}, - ) - sample.Input, _ = firstNumberByPaths(row, - []string{"input_tokens"}, - []string{"inputTokens"}, - []string{"input_token_count"}, - []string{"prompt_tokens"}, - []string{"promptTokens"}, - []string{"usage", "input_tokens"}, - []string{"usage", "inputTokens"}, - ) - sample.Output, _ = firstNumberByPaths(row, - []string{"output_tokens"}, - []string{"outputTokens"}, - []string{"completion_tokens"}, - []string{"completionTokens"}, - []string{"usage", "output_tokens"}, - []string{"usage", "outputTokens"}, - ) - sample.Reasoning, _ = firstNumberByPaths(row, - []string{"reasoning_tokens"}, - []string{"reasoningTokens"}, - []string{"thinking_tokens"}, - []string{"thinkingTokens"}, - []string{"usage", "reasoning_tokens"}, - ) - sample.Total, _ = firstNumberByPaths(row, - []string{"total_tokens"}, - []string{"totalTokens"}, - []string{"tokens"}, - []string{"token_count"}, - []string{"tokenCount"}, - []string{"usage", "total_tokens"}, - []string{"usage", "totalTokens"}, - ) - if sample.Total == 0 { - sample.Total = sample.Input + sample.Output + sample.Reasoning - } - sample.CostUSD = parseCostUSD(row) - if kind == "model" && sample.Language == "" { - sample.Language = inferModelUsageLanguage(sample.Name) - } - - if sample.Requests > 0 || sample.Total > 0 || sample.CostUSD > 0 || sample.Name != "" { - samples = append(samples, sample) - } - } - - return samples -} - -func extractUsageRows(v any) []map[string]any { - switch value := v.(type) { - case []any: - rows := mapsFromArray(value) - if len(rows) > 0 { - return rows - } - var nested []map[string]any - for _, item := range value { - nested = append(nested, extractUsageRows(item)...) - } - return nested - case map[string]any: - if looksLikeUsageRow(value) { - return []map[string]any{value} - } - - keys := []string{ - "data", "items", "list", "rows", "records", "usage", - "model_usage", "modelUsage", - "tool_usage", "toolUsage", - "language_usage", "languageUsage", - "client_usage", "clientUsage", - "source_usage", "sourceUsage", - "provider_usage", "providerUsage", - "endpoint_usage", "endpointUsage", - "result", - } - var combined []map[string]any - for _, key := range keys { - if nested, ok := mapValue(value, key); ok { - rows := extractUsageRows(nested) - if len(rows) > 0 { - for _, row := range rows { - tagged := row - if firstStringFromMap(row, "__usage_bucket") == "" { - tagged = cloneStringAnyMap(row) - tagged["__usage_bucket"] = key - } - combined = append(combined, tagged) - } - } - } - } - if len(combined) > 0 { - return combined - } - - mapKeys := make([]string, 0, len(value)) - for key := range value { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var all []map[string]any - for _, key := range mapKeys { - nested := value[key] - rows := extractUsageRows(nested) - if len(rows) > 0 { - for _, row := range rows { - tagged := row - if firstStringFromMap(row, "__usage_key") == "" { - tagged = cloneStringAnyMap(row) - tagged["__usage_key"] = key - } - all = append(all, tagged) - } - continue - } - if numeric, ok := parseFloat(nested); ok { - all = append(all, map[string]any{ - "requests": numeric, - "__usage_key": key, - }) - } - } - return all - default: - return nil - } -} - -func extractLimitRows(v any) []map[string]any { - switch value := v.(type) { - case []any: - return mapsFromArray(value) - case map[string]any: - if _, ok := value["type"]; ok { - return []map[string]any{value} - } - for _, key := range []string{"limits", "items", "data"} { - if nested, ok := value[key]; ok { - rows := extractLimitRows(nested) - if len(rows) > 0 { - return rows - } - } - } - var all []map[string]any - for _, nested := range value { - rows := extractLimitRows(nested) - all = append(all, rows...) - } - return all - default: - return nil - } -} - -func extractCreditGrantRows(v any) []map[string]any { - switch value := v.(type) { - case []any: - var rows []map[string]any - for _, item := range value { - row, ok := item.(map[string]any) - if !ok { - continue - } - if looksLikeCreditGrantRow(row) { - rows = append(rows, row) - continue - } - rows = append(rows, extractCreditGrantRows(row)...) - } - return rows - case map[string]any: - if looksLikeCreditGrantRow(value) { - return []map[string]any{value} - } - - var rows []map[string]any - for _, key := range []string{"credit_grants", "creditGrants", "grants", "items", "list", "data"} { - nested, ok := mapValue(value, key) - if !ok { - continue - } - rows = append(rows, extractCreditGrantRows(nested)...) - } - if len(rows) > 0 { - return rows - } - - keys := make([]string, 0, len(value)) - for key := range value { - keys = append(keys, key) - } - sort.Strings(keys) - for _, key := range keys { - rows = append(rows, extractCreditGrantRows(value[key])...) - } - return rows - default: - return nil - } -} - -func looksLikeCreditGrantRow(row map[string]any) bool { - if row == nil { - return false - } - _, hasAmount := parseNumberFromMap(row, - "grant_amount", "grantAmount", - "total_granted", "totalGranted", - "amount", "total_amount", "totalAmount") - _, hasUsed := parseNumberFromMap(row, - "used_amount", "usedAmount", - "used", "usage", "spent") - _, hasAvailable := parseNumberFromMap(row, - "available_amount", "availableAmount", - "remaining_amount", "remainingAmount", - "remaining_balance", "remainingBalance", - "available_balance", "availableBalance", - "available", "remaining") - return hasAmount || hasUsed || hasAvailable -} - -func parseCreditGrantExpiry(row map[string]any) (time.Time, bool) { - raw := firstAnyFromMap(row, - "expires_at", "expiresAt", "expiry_time", "expiryTime", - "expire_at", "expireAt", "expiration_time", "expirationTime") - if raw == nil { - return time.Time{}, false - } - return parseTimeValue(raw) -} - -func mapsFromArray(values []any) []map[string]any { - rows := make([]map[string]any, 0, len(values)) - for _, item := range values { - row, ok := item.(map[string]any) - if !ok { - continue - } - rows = append(rows, row) - } - return rows -} - -func cloneStringAnyMap(in map[string]any) map[string]any { - return maps.Clone(in) -} - -func looksLikeUsageRow(row map[string]any) bool { - if row == nil { - return false - } - hasName := firstStringByPaths(row, - []string{"model"}, - []string{"model_id"}, - []string{"modelName"}, - []string{"tool"}, - []string{"tool_name"}, - []string{"name"}, - []string{"model", "name"}, - []string{"tool", "name"}, - ) != "" - if hasName { - return true - } - _, hasReq := firstNumberByPaths(row, []string{"requests"}, []string{"request_count"}, []string{"calls"}, []string{"count"}, []string{"usage", "requests"}) - _, hasTokens := firstNumberByPaths(row, []string{"total_tokens"}, []string{"tokens"}, []string{"input_tokens"}, []string{"output_tokens"}, []string{"usage", "total_tokens"}) - _, hasCost := firstNumberByPaths(row, []string{"cost"}, []string{"total_cost"}, []string{"cost_usd"}, []string{"total_cost_usd"}, []string{"usage", "cost_usd"}) - return hasReq || hasTokens || hasCost -} - -func captureEndpointPayload(snap *core.UsageSnapshot, endpoint string, body []byte) { - if snap == nil { - return - } - endpointSlug := sanitizeMetricSlug(endpoint) - if endpointSlug == "" { - endpointSlug = "unknown" - } - prefix := "api_" + endpointSlug - - if len(body) == 0 { - return - } - setUsedMetric(snap, prefix+"_payload_bytes", float64(len(body)), "bytes", "current") - - var payload any - if err := json.Unmarshal(body, &payload); err != nil { - snap.Raw[prefix+"_parse"] = "non_json" - return - } - snap.Raw[prefix+"_parse"] = "json" - - numericByPath := make(map[string]*payloadNumericStat) - leafCount := 0 - objectCount := 0 - arrayCount := 0 - walkPayloadStats("", payload, numericByPath, &leafCount, &objectCount, &arrayCount) - - setUsedMetric(snap, prefix+"_field_count", float64(leafCount), "fields", "current") - setUsedMetric(snap, prefix+"_object_nodes", float64(objectCount), "objects", "current") - setUsedMetric(snap, prefix+"_array_nodes", float64(arrayCount), "arrays", "current") - setUsedMetric(snap, prefix+"_numeric_count", float64(len(numericByPath)), "fields", "current") - - type numericEntry struct { - path string - stat *payloadNumericStat - } - entries := make([]numericEntry, 0, len(numericByPath)) - for path, stat := range numericByPath { - if stat == nil { - continue - } - entries = append(entries, numericEntry{path: path, stat: stat}) - } - sort.Slice(entries, func(i, j int) bool { - left := math.Abs(entries[i].stat.Sum) - right := math.Abs(entries[j].stat.Sum) - if left != right { - return left > right - } - return entries[i].path < entries[j].path - }) - - if len(entries) > 0 { - top := entries - if len(top) > 8 { - top = top[:8] - } - parts := make([]string, 0, len(top)) - for _, entry := range top { - value := entry.stat.Last - if entry.stat.Count > 1 { - value = entry.stat.Sum - } - path := strings.TrimSpace(entry.path) - if path == "" { - path = "root" - } - parts = append(parts, fmt.Sprintf("%s=%s", path, formatPayloadValue(value))) - } - snap.Raw[prefix+"_numeric_top"] = strings.Join(parts, ", ") - } - - sort.Slice(entries, func(i, j int) bool { - return entries[i].path < entries[j].path - }) - emitted := 0 - maxDynamicMetrics := 96 - for _, entry := range entries { - if emitted >= maxDynamicMetrics { - break - } - pathSlug := sanitizeMetricSlug(strings.Trim(entry.path, "._")) - if pathSlug == "" { - pathSlug = "root" - } - metricKey := prefix + "_" + pathSlug - if _, exists := snap.Metrics[metricKey]; exists { - continue - } - value := entry.stat.Last - if entry.stat.Count > 1 { - value = entry.stat.Sum - } - setUsedMetric(snap, metricKey, value, "value", "current") - emitted++ - } - if len(entries) > emitted { - snap.Raw[prefix+"_numeric_omitted"] = strconv.Itoa(len(entries) - emitted) - } -} - -func walkPayloadStats(path string, v any, numericByPath map[string]*payloadNumericStat, leafCount, objectCount, arrayCount *int) { - switch value := v.(type) { - case map[string]any: - if objectCount != nil { - *objectCount = *objectCount + 1 - } - keys := make([]string, 0, len(value)) - for key := range value { - keys = append(keys, key) - } - sort.Strings(keys) - for _, key := range keys { - next := appendPayloadPath(path, key) - walkPayloadStats(next, value[key], numericByPath, leafCount, objectCount, arrayCount) - } - case []any: - if arrayCount != nil { - *arrayCount = *arrayCount + 1 - } - next := appendPayloadPath(path, "items") - for _, item := range value { - walkPayloadStats(next, item, numericByPath, leafCount, objectCount, arrayCount) - } - default: - if leafCount != nil { - *leafCount = *leafCount + 1 - } - if numericByPath == nil { - return - } - numeric, ok := parseFloat(v) - if !ok { - return - } - key := strings.TrimSpace(path) - if key == "" { - key = "root" - } - stat := numericByPath[key] - if stat == nil { - stat = &payloadNumericStat{Min: numeric, Max: numeric} - numericByPath[key] = stat - } - stat.Count++ - stat.Sum += numeric - stat.Last = numeric - if numeric < stat.Min { - stat.Min = numeric - } - if numeric > stat.Max { - stat.Max = numeric - } - } -} - -func appendPayloadPath(path, segment string) string { - path = strings.TrimSpace(path) - segment = strings.TrimSpace(segment) - if segment == "" { - return path - } - if path == "" { - return segment - } - return path + "." + segment -} - -func formatPayloadValue(v float64) string { - return strconv.FormatFloat(v, 'f', -1, 64) -} - -func applyUsageRange(reqURL string) (string, error) { - parsed, err := url.Parse(reqURL) - if err != nil { - return "", err - } - start, end := usageWindow() - q := parsed.Query() - q.Set("startTime", start) - q.Set("endTime", end) - parsed.RawQuery = q.Encode() - return parsed.String(), nil -} - -func usageWindow() (start, end string) { - now := time.Now().UTC() - startTime := time.Date(now.Year(), now.Month(), now.Day()-6, 0, 0, 0, 0, time.UTC) - endTime := time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, 0, time.UTC) - return startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05") -} - -func joinURL(base, endpoint string) string { - trimmedBase := strings.TrimRight(base, "/") - trimmedEndpoint := strings.TrimLeft(endpoint, "/") - return trimmedBase + "/" + trimmedEndpoint -} - -func parseAPIError(body []byte) (code, msg string) { - var payload struct { - Code any `json:"code"` - Msg string `json:"msg"` - Message string `json:"message"` - Error *apiError `json:"error"` - } - if err := json.Unmarshal(body, &payload); err != nil { - return "", "" - } - - if payload.Error != nil { - if payload.Error.Message != "" { - msg = payload.Error.Message - } - if payload.Error.Code != nil { - code = anyToString(payload.Error.Code) - } - } - if code == "" && payload.Code != nil { - code = anyToString(payload.Code) - } - if msg == "" { - msg = core.FirstNonEmpty(payload.Message, payload.Msg) - } - return code, msg -} - -func parseCostUSD(row map[string]any) float64 { - if cents, ok := firstNumberByPaths(row, - []string{"cost_cents"}, - []string{"costCents"}, - []string{"total_cost_cents"}, - []string{"totalCostCents"}, - []string{"usage", "cost_cents"}, - ); ok { - return cents / 100 - } - - if micros, ok := firstNumberByPaths(row, - []string{"cost_micros"}, - []string{"costMicros"}, - []string{"total_cost_micros"}, - []string{"totalCostMicros"}, - ); ok { - return micros / 1_000_000 - } - - value, ok := firstNumberByPaths(row, - []string{"cost_usd"}, - []string{"costUSD"}, - []string{"total_cost_usd"}, - []string{"totalCostUSD"}, - []string{"total_cost"}, - []string{"totalCost"}, - []string{"api_cost"}, - []string{"apiCost"}, - []string{"cost"}, - []string{"amount"}, - []string{"total_amount"}, - []string{"totalAmount"}, - []string{"usage", "cost_usd"}, - []string{"usage", "costUSD"}, - []string{"usage", "cost"}, - ) - if ok { - return value - } - return 0 -} - -func parseNumberFromMap(row map[string]any, keys ...string) (float64, bool) { - value, _, ok := firstNumberWithKey(row, keys...) - return value, ok -} - -func firstNumberWithKey(row map[string]any, keys ...string) (float64, string, bool) { - for _, key := range keys { - raw, ok := mapValue(row, key) - if !ok { - continue - } - if parsed, ok := parseFloat(raw); ok { - return parsed, key, true - } - } - return 0, "", false -} - -func parseFloat(v any) (float64, bool) { - switch value := v.(type) { - case float64: - return value, true - case float32: - return float64(value), true - case int: - return float64(value), true - case int64: - return float64(value), true - case int32: - return float64(value), true - case int16: - return float64(value), true - case int8: - return float64(value), true - case uint: - return float64(value), true - case uint64: - return float64(value), true - case uint32: - return float64(value), true - case uint16: - return float64(value), true - case uint8: - return float64(value), true - case json.Number: - parsed, err := value.Float64() - return parsed, err == nil - case string: - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return 0, false - } - parsed, err := strconv.ParseFloat(trimmed, 64) - if err != nil { - return 0, false - } - return parsed, true - default: - return 0, false - } -} - -func firstStringFromMap(row map[string]any, keys ...string) string { - for _, key := range keys { - raw, ok := mapValue(row, key) - if !ok || raw == nil { - continue - } - str := strings.TrimSpace(anyToString(raw)) - if str != "" { - return str - } - } - return "" -} - -func firstAnyFromMap(row map[string]any, keys ...string) any { - for _, key := range keys { - if raw, ok := mapValue(row, key); ok { - return raw - } - } - return nil -} - -func mapValue(row map[string]any, key string) (any, bool) { - if row == nil { - return nil, false - } - if raw, ok := row[key]; ok { - return raw, true - } - for candidate, raw := range row { - if strings.EqualFold(candidate, key) { - return raw, true - } - } - return nil, false -} - -func valueAtPath(row map[string]any, path []string) (any, bool) { - if len(path) == 0 { - return nil, false - } - - var current any = row - for _, segment := range path { - node, ok := current.(map[string]any) - if !ok { - return nil, false - } - next, ok := mapValue(node, segment) - if !ok { - return nil, false - } - current = next - } - return current, true -} - -func firstAnyByPaths(row map[string]any, paths ...[]string) any { - for _, path := range paths { - if raw, ok := valueAtPath(row, path); ok { - return raw - } - } - return nil -} - -func firstStringByPaths(row map[string]any, paths ...[]string) string { - for _, path := range paths { - raw, ok := valueAtPath(row, path) - if !ok || raw == nil { - continue - } - text := strings.TrimSpace(anyToString(raw)) - if text != "" { - return text - } - } - return "" -} - -func firstNumberByPaths(row map[string]any, paths ...[]string) (float64, bool) { - for _, path := range paths { - raw, ok := valueAtPath(row, path) - if !ok { - continue - } - if parsed, ok := parseFloat(raw); ok { - return parsed, true - } - } - return 0, false -} - -func normalizeUsageDimension(raw string) string { - value := strings.TrimSpace(raw) - value = strings.Trim(value, "\"'") - if value == "" { - return "" - } - switch strings.ToLower(value) { - case "null", "nil", "n/a", "na", "unknown": - return "" - default: - return value - } -} - -func accumulateRollupValues(acc *usageRollup, sample usageSample) { - if acc == nil { - return - } - acc.Requests += sample.Requests - acc.Input += sample.Input - acc.Output += sample.Output - acc.Reasoning += sample.Reasoning - acc.Total += sample.Total - acc.CostUSD += sample.CostUSD -} - -func accumulateUsageRollup(target map[string]*usageRollup, key string, sample usageSample) { - key = strings.TrimSpace(key) - if key == "" { - return - } - acc, ok := target[key] - if !ok { - acc = &usageRollup{} - target[key] = acc - } - accumulateRollupValues(acc, sample) -} - -func sortedUsageRollupKeys(values map[string]*usageRollup) []string { - keys := make([]string, 0, len(values)) - for key := range values { - keys = append(keys, key) - } - sort.Strings(keys) - return keys -} - -func summarizeShareUsage(values map[string]float64, maxItems int) string { - type item struct { - name string - value float64 - } - var ( - list []item - total float64 - ) - for name, value := range values { - if value <= 0 { - continue - } - list = append(list, item{name: name, value: value}) - total += value - } - if len(list) == 0 || total <= 0 { - return "" - } - sort.Slice(list, func(i, j int) bool { - if list[i].value != list[j].value { - return list[i].value > list[j].value - } - return list[i].name < list[j].name - }) - if maxItems > 0 && len(list) > maxItems { - list = list[:maxItems] - } - parts := make([]string, 0, len(list)) - for _, entry := range list { - parts = append(parts, fmt.Sprintf("%s: %.0f%%", normalizeUsageLabel(entry.name), entry.value/total*100)) - } - return strings.Join(parts, ", ") -} - -func summarizeCountUsage(values map[string]float64, unit string, maxItems int) string { - type item struct { - name string - value float64 - } - var list []item - for name, value := range values { - if value <= 0 { - continue - } - list = append(list, item{name: name, value: value}) - } - if len(list) == 0 { - return "" - } - sort.Slice(list, func(i, j int) bool { - if list[i].value != list[j].value { - return list[i].value > list[j].value - } - return list[i].name < list[j].name - }) - if maxItems > 0 && len(list) > maxItems { - list = list[:maxItems] - } - parts := make([]string, 0, len(list)) - for _, entry := range list { - parts = append(parts, fmt.Sprintf("%s: %.0f %s", normalizeUsageLabel(entry.name), entry.value, unit)) - } - return strings.Join(parts, ", ") -} - -func normalizeUsageLabel(value string) string { - value = strings.TrimSpace(value) - if value == "" { - return "unknown" - } - replacer := strings.NewReplacer("_", " ", "-", " ") - return replacer.Replace(value) -} - -func inferModelUsageLanguage(model string) string { - model = strings.ToLower(strings.TrimSpace(model)) - if model == "" { - return "" - } - switch { - case strings.Contains(model, "coder"), strings.Contains(model, "code"), strings.Contains(model, "codestral"), strings.Contains(model, "devstral"): - return "code" - case strings.Contains(model, "vision"), strings.Contains(model, "image"), strings.Contains(model, "multimodal"), strings.Contains(model, "omni"), strings.Contains(model, "vl"): - return "multimodal" - case strings.Contains(model, "audio"), strings.Contains(model, "speech"), strings.Contains(model, "voice"), strings.Contains(model, "whisper"), strings.Contains(model, "tts"), strings.Contains(model, "stt"): - return "audio" - case strings.Contains(model, "reason"), strings.Contains(model, "thinking"): - return "reasoning" - default: - return "general" - } -} - -func anyToString(v any) string { - switch value := v.(type) { - case string: - return strings.TrimSpace(value) - case json.Number: - return value.String() - case float64: - if math.Mod(value, 1) == 0 { - return strconv.FormatInt(int64(value), 10) - } - return strconv.FormatFloat(value, 'f', -1, 64) - case float32: - return strconv.FormatFloat(float64(value), 'f', -1, 32) - case int: - return strconv.Itoa(value) - case int64: - return strconv.FormatInt(value, 10) - case int32: - return strconv.FormatInt(int64(value), 10) - case uint: - return strconv.FormatUint(uint64(value), 10) - case uint64: - return strconv.FormatUint(value, 10) - case bool: - return strconv.FormatBool(value) - default: - return strings.TrimSpace(fmt.Sprint(value)) - } -} - -func normalizeDate(raw any) string { - if raw == nil { - return "" - } - - if ts, ok := parseTimeValue(raw); ok { - return ts.UTC().Format("2006-01-02") - } - - value := strings.TrimSpace(anyToString(raw)) - if value == "" { - return "" - } - if len(value) >= 10 { - candidate := value[:10] - if _, err := time.Parse("2006-01-02", candidate); err == nil { - return candidate - } - } - return "" -} - -func parseTimeValue(raw any) (time.Time, bool) { - if raw == nil { - return time.Time{}, false - } - - if n, ok := parseFloat(raw); ok { - if n <= 0 { - return time.Time{}, false - } - sec := int64(n) - if n > 1e12 { - sec = int64(n / 1000) - } - return time.Unix(sec, 0).UTC(), true - } - - value := strings.TrimSpace(anyToString(raw)) - if value == "" { - return time.Time{}, false - } - - for _, layout := range []string{ - time.RFC3339, - "2006-01-02 15:04:05", - "2006-01-02T15:04:05Z07:00", - "2006-01-02", - } { - if parsed, err := time.Parse(layout, value); err == nil { - return parsed.UTC(), true - } - } - - if n, err := strconv.ParseInt(value, 10, 64); err == nil { - if n > 1e12 { - return time.Unix(n/1000, 0).UTC(), true - } - return time.Unix(n, 0).UTC(), true - } - - return time.Time{}, false -} - -func isJSONEmpty(raw json.RawMessage) bool { - trimmed := strings.TrimSpace(string(raw)) - return trimmed == "" || trimmed == "null" || trimmed == "{}" || trimmed == "[]" -} - -func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { - if key == "" || value <= 0 { - return - } - snap.Metrics[key] = core.Metric{ - Used: core.Float64Ptr(value), - Unit: unit, - Window: window, - } -} - -func sanitizeMetricSlug(value string) string { - trimmed := strings.TrimSpace(strings.ToLower(value)) - if trimmed == "" { - return "unknown" - } - - var b strings.Builder - lastUnderscore := false - for _, r := range trimmed { - switch { - case (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9'): - b.WriteRune(r) - lastUnderscore = false - case r == '-' || r == '_': - b.WriteRune(r) - lastUnderscore = false - default: - if !lastUnderscore { - b.WriteRune('_') - lastUnderscore = true - } - } - } - slug := strings.Trim(b.String(), "_") - if slug == "" { - return "unknown" - } - return slug -} - -func clamp(value, minVal, maxVal float64) float64 { - return math.Min(math.Max(value, minVal), maxVal) -} - -func apiErrorMessage(err *apiError) string { - if err == nil { - return "" - } - return strings.TrimSpace(err.Message) -} - -func isNoPackageCode(code, msg string) bool { - code = strings.TrimSpace(code) - if code == "1113" { - return true - } - lowerMsg := strings.ToLower(strings.TrimSpace(msg)) - return strings.Contains(lowerMsg, "insufficient balance") || - strings.Contains(lowerMsg, "no resource package") || - strings.Contains(lowerMsg, "no active coding package") -} diff --git a/internal/tui/tiles_composition.go b/internal/tui/tiles_composition.go index 37fac1b..8c257ec 100644 --- a/internal/tui/tiles_composition.go +++ b/internal/tui/tiles_composition.go @@ -3,7 +3,6 @@ package tui import ( "fmt" "math" - "sort" "strings" "github.com/charmbracelet/lipgloss" @@ -71,10 +70,10 @@ func buildProviderModelCompositionLines(snap core.UsageSnapshot, innerW int, exp totalCost := float64(0) totalTokens := float64(0) totalRequests := float64(0) - for _, m := range allModels { - totalCost += m.cost - totalTokens += m.input + m.output - totalRequests += m.requests + for _, model := range allModels { + totalCost += model.cost + totalTokens += model.input + model.output + totalRequests += model.requests } mode, total := selectBurnMode(totalTokens, totalCost, totalRequests) @@ -124,19 +123,12 @@ func buildProviderModelCompositionLines(snap core.UsageSnapshot, innerW int, exp valueStr := fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(model.requests)) switch mode { case "tokens": - valueStr = fmt.Sprintf("%2.0f%% %s tok", - pct, - shortCompact(model.input+model.output), - ) + valueStr = fmt.Sprintf("%2.0f%% %s tok", pct, shortCompact(model.input+model.output)) if model.cost > 0 { valueStr += fmt.Sprintf(" · %s", formatUSD(model.cost)) } case "cost": - valueStr = fmt.Sprintf("%2.0f%% %s tok · %s", - pct, - shortCompact(model.input+model.output), - formatUSD(model.cost), - ) + valueStr = fmt.Sprintf("%2.0f%% %s tok · %s", pct, shortCompact(model.input+model.output), formatUSD(model.cost)) case "requests": if model.requests1d > 0 { valueStr += fmt.Sprintf(" · today %s", shortCompact(model.requests1d)) @@ -148,7 +140,6 @@ func buildProviderModelCompositionLines(snap core.UsageSnapshot, innerW int, exp trendEntries := limitModelTrendEntries(models, expanded) if len(trendEntries) > 0 { lines = append(lines, dimStyle.Render(" Trend (daily by model)")) - labelW := 12 if innerW < 55 { labelW = 10 @@ -216,7 +207,6 @@ func buildModelColorMap(models []modelMixEntry, providerID string) map[string]li if len(models) == 0 { return colors } - base := stablePaletteOffset("model", providerID) for i, model := range models { colors[model.name] = distributedPaletteColor(base, i) @@ -242,7 +232,7 @@ func modelMixValue(model modelMixEntry, mode string) float64 { } } -func selectBurnMode(totalTokens, totalCost, totalRequests float64) (mode string, total float64) { +func selectBurnMode(totalTokens, totalCost, totalRequests float64) (string, float64) { switch { case totalCost > 0: return "cost", totalCost @@ -254,844 +244,20 @@ func selectBurnMode(totalTokens, totalCost, totalRequests float64) (mode string, } func collectProviderModelMix(snap core.UsageSnapshot) ([]modelMixEntry, map[string]bool) { - entries, usedKeys := core.ExtractModelBreakdown(snap) - models := make([]modelMixEntry, 0, len(entries)) - for _, entry := range entries { - models = append(models, modelMixEntry{ - name: entry.Name, - cost: entry.Cost, - input: entry.Input, - output: entry.Output, - requests: entry.Requests, - requests1d: entry.Requests1d, - series: entry.Series, - }) - } - return models, usedKeys -} - -func buildProviderVendorCompositionLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { - allProviders, usedKeys := collectProviderVendorMix(snap) - if len(allProviders) == 0 { - return nil, nil - } - providers, hiddenCount := limitProviderMix(allProviders, expanded, 4) - providerColors := buildProviderColorMap(allProviders, snap.AccountID) - - totalCost := float64(0) - totalTokens := float64(0) - totalRequests := float64(0) - for _, p := range allProviders { - totalCost += p.cost - totalTokens += p.input + p.output - totalRequests += p.requests - } - - mode, total := selectBurnMode(totalTokens, totalCost, totalRequests) - if total <= 0 { - return nil, nil - } - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - heading := "Provider Burn (tokens)" - if mode == "cost" { - heading = "Provider Burn (credits)" - } else if mode == "requests" { - heading = "Provider Activity (requests)" - } - - providerClients := make([]clientMixEntry, 0, len(allProviders)) - for _, p := range allProviders { - value := p.requests - if mode == "cost" { - value = p.cost - } else if mode == "tokens" { - value = p.input + p.output - } - if value <= 0 { - continue - } - providerClients = append(providerClients, clientMixEntry{name: p.name, total: value}) - } - if len(providerClients) == 0 { - return nil, nil - } - - lines := []string{ - lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render(heading), - " " + renderClientMixBar(providerClients, total, barW, providerColors, "tokens"), - } - - for idx, provider := range providers { - value := provider.requests - if mode == "cost" { - value = provider.cost - } else if mode == "tokens" { - value = provider.input + provider.output - } - if value <= 0 { - continue - } - pct := value / total * 100 - label := prettifyModelName(provider.name) - colorDot := lipgloss.NewStyle().Foreground(providerColors[provider.name]).Render("■") - - maxLabelLen := tableLabelMaxLen(innerW) - if len(label) > maxLabelLen { - label = label[:maxLabelLen-1] + "…" - } - displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) - - valueStr := fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(provider.requests)) - if mode == "tokens" { - valueStr = fmt.Sprintf("%2.0f%% %s tok · %s req", - pct, - shortCompact(provider.input+provider.output), - shortCompact(provider.requests), - ) - if provider.cost > 0 { - valueStr += fmt.Sprintf(" · %s", formatUSD(provider.cost)) - } - } else if mode == "cost" { - valueStr = fmt.Sprintf("%2.0f%% %s tok · %s req · %s", - pct, - shortCompact(provider.input+provider.output), - shortCompact(provider.requests), - formatUSD(provider.cost), - ) - } - lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) - } - if hiddenCount > 0 { - lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more providers (Ctrl+O)", hiddenCount))) - } - - return lines, usedKeys -} - -func collectProviderVendorMix(snap core.UsageSnapshot) ([]providerMixEntry, map[string]bool) { - entries, usedKeys := core.ExtractProviderBreakdown(snap) - providers := make([]providerMixEntry, 0, len(entries)) - for _, entry := range entries { - providers = append(providers, providerMixEntry{ - name: entry.Name, - cost: entry.Cost, - input: entry.Input, - output: entry.Output, - requests: entry.Requests, - }) - } - return providers, usedKeys -} - -func buildUpstreamProviderCompositionLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { - allProviders, usedKeys := collectUpstreamProviderMix(snap) - if len(allProviders) == 0 { - return nil, nil - } - providers, hiddenCount := limitProviderMix(allProviders, expanded, 4) - providerColors := buildProviderColorMap(allProviders, snap.AccountID) - - totalCost := float64(0) - totalTokens := float64(0) - totalRequests := float64(0) - for _, p := range allProviders { - totalCost += p.cost - totalTokens += p.input + p.output - totalRequests += p.requests - } - - mode, total := selectBurnMode(totalTokens, totalCost, totalRequests) - if total <= 0 { - return nil, nil - } - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - heading := "Hosting Providers (tokens)" - if mode == "cost" { - heading = "Hosting Providers (credits)" - } else if mode == "requests" { - heading = "Hosting Providers (requests)" - } - - providerClients := make([]clientMixEntry, 0, len(allProviders)) - for _, p := range allProviders { - value := p.requests - if mode == "cost" { - value = p.cost - } else if mode == "tokens" { - value = p.input + p.output - } - if value <= 0 { - continue - } - providerClients = append(providerClients, clientMixEntry{name: p.name, total: value}) - } - if len(providerClients) == 0 { - return nil, nil - } - - lines := []string{ - lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render(heading), - " " + renderClientMixBar(providerClients, total, barW, providerColors, "tokens"), - } - - for idx, provider := range providers { - value := provider.requests - if mode == "cost" { - value = provider.cost - } else if mode == "tokens" { - value = provider.input + provider.output - } - if value <= 0 { - continue - } - pct := value / total * 100 - label := prettifyModelName(provider.name) - colorDot := lipgloss.NewStyle().Foreground(providerColors[provider.name]).Render("■") - - maxLabelLen := tableLabelMaxLen(innerW) - if len(label) > maxLabelLen { - label = label[:maxLabelLen-1] + "…" - } - displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) - - valueStr := fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(provider.requests)) - if mode == "tokens" { - valueStr = fmt.Sprintf("%2.0f%% %s tok · %s req", - pct, - shortCompact(provider.input+provider.output), - shortCompact(provider.requests), - ) - if provider.cost > 0 { - valueStr += fmt.Sprintf(" · %s", formatUSD(provider.cost)) - } - } else if mode == "cost" { - valueStr = fmt.Sprintf("%2.0f%% %s tok · %s req · %s", - pct, - shortCompact(provider.input+provider.output), - shortCompact(provider.requests), - formatUSD(provider.cost), - ) - } - lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) - } - if hiddenCount > 0 { - lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more providers (Ctrl+O)", hiddenCount))) - } - - return lines, usedKeys -} - -func collectUpstreamProviderMix(snap core.UsageSnapshot) ([]providerMixEntry, map[string]bool) { - entries, usedKeys := core.ExtractUpstreamProviderBreakdown(snap) - result := make([]providerMixEntry, 0, len(entries)) - for _, entry := range entries { - result = append(result, providerMixEntry{ - name: entry.Name, - cost: entry.Cost, - input: entry.Input, - output: entry.Output, - requests: entry.Requests, - }) - } - return result, usedKeys -} - -func limitProviderMix(providers []providerMixEntry, expanded bool, maxVisible int) ([]providerMixEntry, int) { - if expanded || maxVisible <= 0 || len(providers) <= maxVisible { - return providers, 0 - } - return providers[:maxVisible], len(providers) - maxVisible -} - -func buildProviderColorMap(providers []providerMixEntry, providerID string) map[string]lipgloss.Color { - colors := make(map[string]lipgloss.Color, len(providers)) - if len(providers) == 0 { - return colors - } - - base := stablePaletteOffset("provider", providerID) - for i, provider := range providers { - colors[provider.name] = distributedPaletteColor(base, i) - } - return colors -} - -func buildProviderDailyTrendLines(snap core.UsageSnapshot, innerW int) []string { - type trendDef struct { - label string - keys []string - color lipgloss.Color - unit string - } - defs := []trendDef{ - {label: "Cost", keys: []string{"analytics_cost", "cost"}, color: colorTeal, unit: "USD"}, - {label: "Req", keys: []string{"analytics_requests", "requests"}, color: colorYellow, unit: "requests"}, - {label: "Tokens", keys: []string{"analytics_tokens"}, color: colorSapphire, unit: "tokens"}, - } - - lines := []string{} - labelW := 8 - if innerW < 55 { - labelW = 6 - } - sparkW := innerW - labelW - 14 - if sparkW < 10 { - sparkW = 10 - } - if sparkW > 30 { - sparkW = 30 - } - - for _, def := range defs { - var points []core.TimePoint - for _, key := range def.keys { - if got, ok := snap.DailySeries[key]; ok && len(got) > 1 { - points = got - break - } - } - if len(points) < 2 { - continue - } - values := tailSeriesValues(points, 14) - if len(values) < 2 { - continue - } - - last := values[len(values)-1] - lastLabel := shortCompact(last) - if def.unit == "USD" { - lastLabel = formatUSD(last) - } - - if len(lines) == 0 { - lines = append(lines, lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Daily Usage")) - } - - label := lipgloss.NewStyle().Foreground(colorSubtext).Width(labelW).Render(def.label) - spark := RenderSparkline(values, sparkW, def.color) - lines = append(lines, fmt.Sprintf(" %s %s %s", label, spark, dimStyle.Render(lastLabel))) - } - - if len(lines) == 0 { - return nil - } - return lines -} - -func tailSeriesValues(points []core.TimePoint, max int) []float64 { - if len(points) == 0 { - return nil - } - if max > 0 && len(points) > max { - points = points[len(points)-max:] - } - values := make([]float64, 0, len(points)) - for _, p := range points { - values = append(values, p.Value) - } - return values -} - -// collectInterfaceAsClients builds clientMixEntry items from interface_ metrics -// so the interface breakdown (composer, cli, human, tab) can be shown directly -// in the client composition section instead of a separate panel. -func collectInterfaceAsClients(snap core.UsageSnapshot) ([]clientMixEntry, map[string]bool) { - entries, usedKeys := core.ExtractInterfaceClientBreakdown(snap) - clients := make([]clientMixEntry, 0, len(entries)) - for _, entry := range entries { - clients = append(clients, clientMixEntry{ - name: entry.Name, - requests: entry.Requests, - seriesKind: entry.SeriesKind, - series: entry.Series, - }) - } - return clients, usedKeys -} - -func buildProviderClientCompositionLinesWithWidget(snap core.UsageSnapshot, innerW int, expanded bool, widget core.DashboardWidget) ([]string, map[string]bool) { - allClients, usedKeys := collectProviderClientMix(snap) - - if widget.ClientCompositionIncludeInterfaces { - ifaceClients, ifaceKeys := collectInterfaceAsClients(snap) - if len(ifaceClients) > 0 { - allClients = ifaceClients - for k, v := range ifaceKeys { - usedKeys[k] = v - } - } - } - - if len(allClients) == 0 { - return nil, nil - } - - clients, hiddenCount := limitClientMix(allClients, expanded, 4) - clientColors := buildClientColorMap(allClients, snap.AccountID) - - mode, total := selectClientMixMode(allClients) - if total <= 0 { - return nil, nil - } - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - headingName := widget.ClientCompositionHeading - if headingName == "" { - headingName = "Client Burn" - if mode == "requests" || mode == "sessions" { - headingName = "Client Activity" - } - } - var clientHeaderSuffix string - switch mode { - case "requests": - clientHeaderSuffix = shortCompact(total) + " req" - case "sessions": - clientHeaderSuffix = shortCompact(total) + " sess" - default: - clientHeaderSuffix = shortCompact(total) + " tok" - } - lines := []string{ - lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render(headingName) + - " " + dimStyle.Render(clientHeaderSuffix), - " " + renderClientMixBar(allClients, total, barW, clientColors, mode), - } - - for idx, client := range clients { - value := clientDisplayValue(client, mode) - if value <= 0 { - continue - } - pct := value / total * 100 - label := prettifyClientName(client.name) - clientColor := colorForClient(clientColors, client.name) - colorDot := lipgloss.NewStyle().Foreground(clientColor).Render("■") - - maxLabelLen := tableLabelMaxLen(innerW) - if len(label) > maxLabelLen { - label = label[:maxLabelLen-1] + "…" - } - displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) - - valueStr := fmt.Sprintf("%2.0f%% %s tok", pct, shortCompact(value)) - switch mode { - case "requests": - valueStr = fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(value)) - if client.sessions > 0 { - valueStr += fmt.Sprintf(" · %s sess", shortCompact(client.sessions)) - } - case "sessions": - valueStr = fmt.Sprintf("%2.0f%% %s sess", pct, shortCompact(value)) - default: - if client.requests > 0 { - valueStr += fmt.Sprintf(" · %s req", shortCompact(client.requests)) - } else if client.sessions > 0 { - valueStr += fmt.Sprintf(" · %s sess", shortCompact(client.sessions)) - } - } - lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) - } - - trendEntries := limitClientTrendEntries(clients, expanded) - if len(trendEntries) > 0 { - lines = append(lines, dimStyle.Render(" Trend (daily by client)")) - - labelW := 12 - if innerW < 55 { - labelW = 10 - } - sparkW := innerW - labelW - 5 - if sparkW < 10 { - sparkW = 10 - } - if sparkW > 28 { - sparkW = 28 - } - - for _, client := range trendEntries { - values := make([]float64, 0, len(client.series)) - for _, point := range client.series { - values = append(values, point.Value) - } - if len(values) < 2 { - continue - } - label := truncateToWidth(prettifyClientName(client.name), labelW) - spark := RenderSparkline(values, sparkW, colorForClient(clientColors, client.name)) - lines = append(lines, fmt.Sprintf(" %s %s", - lipgloss.NewStyle().Foreground(colorSubtext).Width(labelW).Render(label), - spark, - )) - } - } - - if hiddenCount > 0 { - lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more clients (Ctrl+O)", hiddenCount))) - } - - return lines, usedKeys -} - -func buildProviderProjectBreakdownLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { - allProjects, usedKeys := collectProviderProjectMix(snap) - if len(allProjects) == 0 { - return nil, nil - } - - projects, hiddenCount := limitProjectMix(allProjects, expanded, 6) - projectColors := buildProjectColorMap(allProjects, snap.AccountID) - - totalRequests := float64(0) - for _, project := range allProjects { - totalRequests += project.requests - } - if totalRequests <= 0 { - return nil, nil - } - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - barEntries := make([]toolMixEntry, 0, len(allProjects)) - for _, project := range allProjects { - barEntries = append(barEntries, toolMixEntry{name: project.name, count: project.requests}) - } - - lines := []string{ - lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Project Breakdown") + - " " + dimStyle.Render(shortCompact(totalRequests)+" req"), - " " + renderToolMixBar(barEntries, totalRequests, barW, projectColors), - } - - for idx, project := range projects { - if project.requests <= 0 { - continue - } - pct := project.requests / totalRequests * 100 - label := project.name - projectColor := colorForProject(projectColors, project.name) - colorDot := lipgloss.NewStyle().Foreground(projectColor).Render("■") - - maxLabelLen := tableLabelMaxLen(innerW) - if len(label) > maxLabelLen { - label = label[:maxLabelLen-1] + "…" - } - displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) - valueStr := fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(project.requests)) - if project.requests1d > 0 { - valueStr += fmt.Sprintf(" · today %s", shortCompact(project.requests1d)) - } - lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) - } - - if hiddenCount > 0 { - lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more projects (Ctrl+O)", hiddenCount))) - } - - return lines, usedKeys -} - -func collectProviderProjectMix(snap core.UsageSnapshot) ([]projectMixEntry, map[string]bool) { - projectUsage, usedKeys := core.ExtractProjectUsage(snap) - if len(projectUsage) == 0 { - return nil, usedKeys - } - projects := make([]projectMixEntry, 0, len(projectUsage)) - for _, project := range projectUsage { - projects = append(projects, projectMixEntry{ - name: project.Name, - requests: project.Requests, - requests1d: project.Requests1d, - series: project.Series, - }) - } - return projects, usedKeys -} - -func limitProjectMix(projects []projectMixEntry, expanded bool, maxVisible int) ([]projectMixEntry, int) { - if expanded || maxVisible <= 0 || len(projects) <= maxVisible { - return projects, 0 - } - return projects[:maxVisible], len(projects) - maxVisible -} - -func buildProjectColorMap(projects []projectMixEntry, providerID string) map[string]lipgloss.Color { - colors := make(map[string]lipgloss.Color, len(projects)) - if len(projects) == 0 { - return colors - } - - base := stablePaletteOffset("project", providerID) - for i, project := range projects { - colors[project.name] = distributedPaletteColor(base, i) - } - return colors -} - -func colorForProject(colors map[string]lipgloss.Color, name string) lipgloss.Color { - if color, ok := colors[name]; ok { - return color - } - return stableModelColor("project:"+name, "project") -} - -func collectProviderClientMix(snap core.UsageSnapshot) ([]clientMixEntry, map[string]bool) { - entries, usedKeys := core.ExtractClientBreakdown(snap) - clients := make([]clientMixEntry, 0, len(entries)) - for _, entry := range entries { - clients = append(clients, clientMixEntry{ - name: entry.Name, - total: entry.Total, - input: entry.Input, - output: entry.Output, - cached: entry.Cached, - reasoning: entry.Reasoning, - requests: entry.Requests, - sessions: entry.Sessions, - seriesKind: entry.SeriesKind, - series: entry.Series, - }) - } - return clients, usedKeys -} - -func clientTokenValue(client clientMixEntry) float64 { - if client.total > 0 { - return client.total - } - if client.input > 0 || client.output > 0 || client.cached > 0 || client.reasoning > 0 { - return client.input + client.output + client.cached + client.reasoning - } - return 0 -} - -func clientMixValue(client clientMixEntry) float64 { - if v := clientTokenValue(client); v > 0 { - return v - } - if client.requests > 0 { - return client.requests - } - if len(client.series) > 0 { - return sumSeriesValues(client.series) - } - return 0 -} - -func clientDisplayValue(client clientMixEntry, mode string) float64 { - switch mode { - case "sessions": - return client.sessions - case "requests": - if client.requests > 0 { - return client.requests - } - return sumSeriesValues(client.series) - default: - return clientMixValue(client) - } -} - -func selectClientMixMode(clients []clientMixEntry) (mode string, total float64) { - totalTokens := float64(0) - totalRequests := float64(0) - totalSessions := float64(0) - for _, client := range clients { - totalTokens += clientTokenValue(client) - totalRequests += client.requests - totalSessions += client.sessions - } - if totalTokens > 0 { - return "tokens", totalTokens - } - if totalRequests > 0 { - return "requests", totalRequests - } - return "sessions", totalSessions -} - -func sumSeriesValues(points []core.TimePoint) float64 { - total := float64(0) - for _, p := range points { - total += p.Value - } - return total -} - -func mergeSeriesByDay(seriesByClient map[string]map[string]float64, client string, points []core.TimePoint) { - if client == "" || len(points) == 0 { - return - } - if seriesByClient[client] == nil { - seriesByClient[client] = make(map[string]float64) - } - for _, point := range points { - if point.Date == "" { - continue - } - seriesByClient[client][point.Date] += point.Value - } -} - -func limitClientMix(clients []clientMixEntry, expanded bool, maxVisible int) ([]clientMixEntry, int) { - if expanded || maxVisible <= 0 || len(clients) <= maxVisible { - return clients, 0 - } - return clients[:maxVisible], len(clients) - maxVisible -} - -func limitClientTrendEntries(clients []clientMixEntry, expanded bool) []clientMixEntry { - maxVisible := 2 - if expanded { - maxVisible = 4 - } - - trend := make([]clientMixEntry, 0, maxVisible) - for _, client := range clients { - if len(client.series) < 2 { - continue - } - trend = append(trend, client) - if len(trend) >= maxVisible { - break - } - } - return trend -} - -func prettifyClientName(name string) string { - switch name { - case "cli": - return "CLI Agents" - case "ide": - return "IDE" - case "exec": - return "Exec" - case "desktop_app": - return "Desktop App" - case "other": - return "Other" - case "composer": - return "Composer" - case "human": - return "Human" - case "tab": - return "Tab Completion" - } - - parts := strings.Split(name, "_") - for i := range parts { - switch parts[i] { - case "cli": - parts[i] = "CLI" - case "ide": - parts[i] = "IDE" - case "api": - parts[i] = "API" - default: - parts[i] = titleCase(parts[i]) - } - } - return strings.Join(parts, " ") -} - -func prettifyMCPServerName(raw string) string { - s := strings.ToLower(strings.TrimSpace(raw)) - if s == "" { - return "unknown" - } - - // Strip known prefixes from claude.ai marketplace and plugin system. - s = strings.TrimPrefix(s, "claude_ai_") - s = strings.TrimPrefix(s, "plugin_") - - // Strip trailing _mcp suffix (redundant — everything here is MCP). - s = strings.TrimSuffix(s, "_mcp") - - // Deduplicate: "slack_slack" → "slack". - parts := strings.Split(s, "_") - if len(parts) >= 2 && parts[0] == parts[len(parts)-1] { - parts = parts[:len(parts)-1] - } - s = strings.Join(parts, "_") - - if s == "" { - return raw - } - - // Title case with separators preserved. - return prettifyMCPName(s) -} - -// prettifyMCPFunctionName cleans up raw MCP function names for display. -func prettifyMCPFunctionName(raw string) string { - s := strings.ToLower(strings.TrimSpace(raw)) - if s == "" { - return raw - } - return prettifyMCPName(s) -} - -// prettifyMCPName converts snake_case/kebab-case to Title Case. -func prettifyMCPName(s string) string { - // Replace underscores and hyphens with spaces, then title-case each word. - s = strings.NewReplacer("_", " ", "-", " ").Replace(s) - words := strings.Fields(s) - for i, w := range words { - if len(w) > 0 { - words[i] = strings.ToUpper(w[:1]) + w[1:] - } - } - return strings.Join(words, " ") -} - -func buildClientColorMap(clients []clientMixEntry, providerID string) map[string]lipgloss.Color { - colors := make(map[string]lipgloss.Color, len(clients)) - if len(clients) == 0 { - return colors - } - - base := stablePaletteOffset("client", providerID) - for i, client := range clients { - colors[client.name] = distributedPaletteColor(base, i) - } - return colors -} - -func colorForClient(colors map[string]lipgloss.Color, name string) lipgloss.Color { - if color, ok := colors[name]; ok { - return color + entries, usedKeys := core.ExtractModelBreakdown(snap) + models := make([]modelMixEntry, 0, len(entries)) + for _, entry := range entries { + models = append(models, modelMixEntry{ + name: entry.Name, + cost: entry.Cost, + input: entry.Input, + output: entry.Output, + requests: entry.Requests, + requests1d: entry.Requests1d, + series: entry.Series, + }) } - return stableModelColor("client:"+name, "client") + return models, usedKeys } func stablePaletteOffset(prefix, value string) int { @@ -1110,8 +276,7 @@ func distributedPaletteColor(base, position int) lipgloss.Color { if len(modelColorPalette) == 0 { return colorSubtext } - idx := distributedPaletteIndex(base, position, len(modelColorPalette)) - return modelColorPalette[idx] + return modelColorPalette[distributedPaletteIndex(base, position, len(modelColorPalette))] } func distributedPaletteIndex(base, position, size int) int { @@ -1175,16 +340,10 @@ func renderClientMixBar(top []clientMixEntry, total float64, barW int, colors ma continue } sumTop += value - segs = append(segs, seg{ - val: value, - color: colorForClient(colors, client.name), - }) + segs = append(segs, seg{val: value, color: colorForClient(colors, client.name)}) } if sumTop < total { - segs = append(segs, seg{ - val: total - sumTop, - color: colorSurface1, - }) + segs = append(segs, seg{val: total - sumTop, color: colorSurface1}) } if len(segs) == 0 { return "" @@ -1234,22 +393,16 @@ func renderModelMixBar(models []modelMixEntry, total float64, barW int, mode str } segs := make([]seg, 0, len(models)+1) sumTop := float64(0) - for _, m := range models { - v := modelMixValue(m, mode) - if v <= 0 { + for _, model := range models { + value := modelMixValue(model, mode) + if value <= 0 { continue } - sumTop += v - segs = append(segs, seg{ - val: v, - color: colorForModel(colors, m.name), - }) + sumTop += value + segs = append(segs, seg{val: value, color: colorForModel(colors, model.name)}) } if sumTop < total { - segs = append(segs, seg{ - val: total - sumTop, - color: colorSurface1, - }) + segs = append(segs, seg{val: total - sumTop, color: colorSurface1}) } if len(segs) == 0 { return "" @@ -1305,16 +458,10 @@ func renderToolMixBar(top []toolMixEntry, total float64, barW int, colors map[st continue } sumTop += tool.count - segs = append(segs, seg{ - val: tool.count, - color: colorForTool(colors, tool.name), - }) + segs = append(segs, seg{val: tool.count, color: colorForTool(colors, tool.name)}) } if sumTop < total { - segs = append(segs, seg{ - val: total - sumTop, - color: colorSurface1, - }) + segs = append(segs, seg{val: total - sumTop, color: colorSurface1}) } if len(segs) == 0 { return "" @@ -1352,480 +499,3 @@ func renderToolMixBar(top []toolMixEntry, total float64, barW int, colors map[st } return sb.String() } - -func buildProviderToolCompositionLines(snap core.UsageSnapshot, innerW int, expanded bool, widget core.DashboardWidget) ([]string, map[string]bool) { - allTools, usedKeys := collectProviderToolMix(snap) - if len(allTools) == 0 { - return nil, nil - } - - tools, hiddenCount := limitToolMix(allTools, expanded, 4) - toolColors := buildToolColorMap(allTools, snap.AccountID) - - totalCalls := float64(0) - for _, tool := range allTools { - totalCalls += tool.count - } - if totalCalls <= 0 { - return nil, nil - } - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - toolHeadingName := "Tool Usage" - if widget.ToolCompositionHeading != "" { - toolHeadingName = widget.ToolCompositionHeading - } - toolHeaderSuffix := shortCompact(totalCalls) + " calls" - - lines := []string{ - lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render(toolHeadingName) + - " " + dimStyle.Render(toolHeaderSuffix), - " " + renderToolMixBar(allTools, totalCalls, barW, toolColors), - } - - for idx, tool := range tools { - if tool.count <= 0 { - continue - } - pct := tool.count / totalCalls * 100 - label := tool.name - toolColor := colorForTool(toolColors, tool.name) - colorDot := lipgloss.NewStyle().Foreground(toolColor).Render("■") - - maxLabelLen := tableLabelMaxLen(innerW) - if len(label) > maxLabelLen { - label = label[:maxLabelLen-1] + "…" - } - displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) - - valueStr := fmt.Sprintf("%2.0f%% %s calls", pct, shortCompact(tool.count)) - lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) - } - - if hiddenCount > 0 { - lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more tools (Ctrl+O)", hiddenCount))) - } - - return lines, usedKeys -} - -func collectProviderToolMix(snap core.UsageSnapshot) ([]toolMixEntry, map[string]bool) { - entries, usedKeys := core.ExtractInterfaceClientBreakdown(snap) - tools := make([]toolMixEntry, 0, len(entries)) - for _, entry := range entries { - tools = append(tools, toolMixEntry{ - name: entry.Name, - count: entry.Requests, - }) - } - return tools, usedKeys -} - -func sortToolMixEntries(tools []toolMixEntry) { - sort.Slice(tools, func(i, j int) bool { - if tools[i].count == tools[j].count { - return tools[i].name < tools[j].name - } - return tools[i].count > tools[j].count - }) -} - -func limitToolMix(tools []toolMixEntry, expanded bool, maxVisible int) ([]toolMixEntry, int) { - if expanded || maxVisible <= 0 || len(tools) <= maxVisible { - return tools, 0 - } - return tools[:maxVisible], len(tools) - maxVisible -} - -func buildToolColorMap(tools []toolMixEntry, providerID string) map[string]lipgloss.Color { - colors := make(map[string]lipgloss.Color, len(tools)) - if len(tools) == 0 { - return colors - } - - base := stablePaletteOffset("tool", providerID) - for i, tool := range tools { - colors[tool.name] = distributedPaletteColor(base, i) - } - return colors -} - -func colorForTool(colors map[string]lipgloss.Color, name string) lipgloss.Color { - if color, ok := colors[name]; ok { - return color - } - return stableModelColor("tool:"+name, "tool") -} - -func buildProviderLanguageCompositionLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { - allLangs, usedKeys := collectProviderLanguageMix(snap) - if len(allLangs) == 0 { - return nil, usedKeys - } - - langs, hiddenCount := limitToolMix(allLangs, expanded, 6) - langColors := buildLangColorMap(allLangs, snap.AccountID) - - totalReqs := float64(0) - for _, lang := range allLangs { - totalReqs += lang.count - } - if totalReqs <= 0 { - return nil, nil - } - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - langHeaderSuffix := shortCompact(totalReqs) + " req" - lines := []string{ - lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Language") + - " " + dimStyle.Render(langHeaderSuffix), - " " + renderToolMixBar(allLangs, totalReqs, barW, langColors), - } - - for idx, lang := range langs { - if lang.count <= 0 { - continue - } - pct := lang.count / totalReqs * 100 - label := lang.name - langColor := colorForTool(langColors, lang.name) - colorDot := lipgloss.NewStyle().Foreground(langColor).Render("■") - - maxLabelLen := tableLabelMaxLen(innerW) - if len(label) > maxLabelLen { - label = label[:maxLabelLen-1] + "…" - } - displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) - - valueStr := fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(lang.count)) - lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) - } - - if hiddenCount > 0 { - lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more languages (Ctrl+O)", hiddenCount))) - } - - return lines, usedKeys -} - -func collectProviderLanguageMix(snap core.UsageSnapshot) ([]toolMixEntry, map[string]bool) { - languageUsage, usedKeys := core.ExtractLanguageUsage(snap) - if len(languageUsage) == 0 { - return nil, usedKeys - } - langs := make([]toolMixEntry, 0, len(languageUsage)) - for _, language := range languageUsage { - langs = append(langs, toolMixEntry{name: language.Name, count: language.Requests}) - } - return langs, usedKeys -} - -func buildLangColorMap(langs []toolMixEntry, providerID string) map[string]lipgloss.Color { - colors := make(map[string]lipgloss.Color, len(langs)) - if len(langs) == 0 { - return colors - } - base := stablePaletteOffset("lang", providerID) - for i, lang := range langs { - colors[lang.name] = distributedPaletteColor(base, i) - } - return colors -} - -func buildProviderCodeStatsLines(snap core.UsageSnapshot, widget core.DashboardWidget, innerW int) ([]string, map[string]bool) { - cs := widget.CodeStatsMetrics - usedKeys := make(map[string]bool) - getVal := func(key string) float64 { - if key == "" { - return 0 - } - if m, ok := snap.Metrics[key]; ok && m.Used != nil { - usedKeys[key] = true - return *m.Used - } - return 0 - } - - added := getVal(cs.LinesAdded) - removed := getVal(cs.LinesRemoved) - files := getVal(cs.FilesChanged) - commits := getVal(cs.Commits) - aiPct := getVal(cs.AIPercent) - prompts := getVal(cs.Prompts) - - if added <= 0 && removed <= 0 && commits <= 0 && files <= 0 { - return nil, usedKeys - } - - var codeStatParts []string - if files > 0 { - codeStatParts = append(codeStatParts, shortCompact(files)+" files") - } - if added > 0 || removed > 0 { - codeStatParts = append(codeStatParts, shortCompact(added+removed)+" lines") - } - codeStatSuffix := strings.Join(codeStatParts, " · ") - codeStatHeading := lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Code Statistics") - if codeStatSuffix != "" { - codeStatHeading += " " + dimStyle.Render(codeStatSuffix) - } - lines := []string{ - codeStatHeading, - } - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - if added > 0 || removed > 0 { - total := added + removed - addedColor := colorGreen - removedColor := colorRed - addedW := int(math.Round(added / total * float64(barW))) - if addedW < 1 && added > 0 { - addedW = 1 - } - removedW := barW - addedW - bar := lipgloss.NewStyle().Foreground(addedColor).Render(strings.Repeat("█", addedW)) + - lipgloss.NewStyle().Foreground(removedColor).Render(strings.Repeat("█", removedW)) - lines = append(lines, " "+bar) - - addedDot := lipgloss.NewStyle().Foreground(addedColor).Render("■") - removedDot := lipgloss.NewStyle().Foreground(removedColor).Render("■") - addedLabel := fmt.Sprintf("%s +%s added", addedDot, shortCompact(added)) - removedLabel := fmt.Sprintf("%s -%s removed", removedDot, shortCompact(removed)) - lines = append(lines, renderDotLeaderRow(addedLabel, removedLabel, innerW)) - } - - if files > 0 { - lines = append(lines, renderDotLeaderRow("Files Changed", shortCompact(files)+" files", innerW)) - } - - if commits > 0 { - commitLabel := shortCompact(commits) + " commits" - if aiPct > 0 { - commitLabel += fmt.Sprintf(" · %.0f%% AI", aiPct) - } - lines = append(lines, renderDotLeaderRow("Commits", commitLabel, innerW)) - } - - if aiPct > 0 { - aiBarW := barW - aiFilledW := int(math.Round(aiPct / 100 * float64(aiBarW))) - if aiFilledW < 1 && aiPct > 0 { - aiFilledW = 1 - } - aiEmptyW := aiBarW - aiFilledW - if aiEmptyW < 0 { - aiEmptyW = 0 - } - aiColor := colorBlue - aiBar := lipgloss.NewStyle().Foreground(aiColor).Render(strings.Repeat("█", aiFilledW)) + - lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("░", aiEmptyW)) - lines = append(lines, " "+aiBar) - } - - if prompts > 0 { - lines = append(lines, renderDotLeaderRow("Prompts", shortCompact(prompts)+" total", innerW)) - } - - return lines, usedKeys -} - -func buildActualToolUsageLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { - rawTools, usedKeys := core.ExtractActualToolUsage(snap) - if len(rawTools) == 0 { - return nil, usedKeys - } - - allTools := make([]toolMixEntry, 0, len(rawTools)) - var totalCalls float64 - for _, rawTool := range rawTools { - allTools = append(allTools, toolMixEntry{name: rawTool.RawName, count: rawTool.Calls}) - totalCalls += rawTool.Calls - } - if totalCalls <= 0 { - return nil, nil - } - - sortToolMixEntries(allTools) - - displayLimit := 6 - if expanded { - displayLimit = len(allTools) - } - visibleTools := allTools - hiddenCount := 0 - if len(allTools) > displayLimit { - visibleTools = allTools[:displayLimit] - hiddenCount = len(allTools) - displayLimit - } - - toolColors := buildToolColorMap(allTools, snap.AccountID) - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - // Header with total call count and success rate. - headerSuffix := shortCompact(totalCalls) + " calls" - if m, ok := snap.Metrics["tool_success_rate"]; ok && m.Used != nil { - headerSuffix += fmt.Sprintf(" · %.0f%% ok", *m.Used) - } - heading := lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Tool Usage") + - " " + dimStyle.Render(headerSuffix) - - lines := []string{ - heading, - " " + renderToolMixBar(allTools, totalCalls, barW, toolColors), - } - - for idx, tool := range visibleTools { - if tool.count <= 0 { - continue - } - pct := tool.count / totalCalls * 100 - label := tool.name - toolColor := colorForTool(toolColors, tool.name) - colorDot := lipgloss.NewStyle().Foreground(toolColor).Render("■") - - maxLabelLen := tableLabelMaxLen(innerW) - if len(label) > maxLabelLen { - label = label[:maxLabelLen-1] + "…" - } - displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) - valueStr := fmt.Sprintf("%2.0f%% %s calls", pct, shortCompact(tool.count)) - lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) - } - - if hiddenCount > 0 { - lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more tools (Ctrl+O)", hiddenCount))) - } - - return lines, usedKeys -} - -func buildMCPUsageLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { - type funcEntry struct { - name string - calls float64 - } - type serverEntry struct { - name string - calls float64 - funcs []funcEntry - } - - rawServers, usedKeys := core.ExtractMCPUsage(snap) - servers := make([]serverEntry, 0, len(rawServers)) - var totalCalls float64 - for _, rawServer := range rawServers { - server := serverEntry{ - name: prettifyMCPServerName(rawServer.RawName), - calls: rawServer.Calls, - } - for _, rawFunc := range rawServer.Functions { - server.funcs = append(server.funcs, funcEntry{ - name: prettifyMCPFunctionName(rawFunc.RawName), - calls: rawFunc.Calls, - }) - } - servers = append(servers, server) - totalCalls += server.calls - } - - if len(servers) == 0 || totalCalls <= 0 { - return nil, usedKeys - } - - // Header. - headerSuffix := shortCompact(totalCalls) + " calls · " + fmt.Sprintf("%d servers", len(servers)) - heading := lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("MCP Usage") + - " " + dimStyle.Render(headerSuffix) - - // Build entries for the bar using prettified names. - var allEntries []toolMixEntry - for _, srv := range servers { - allEntries = append(allEntries, toolMixEntry{name: srv.name, count: srv.calls}) - } - - barW := innerW - 2 - if barW < 12 { - barW = 12 - } - if barW > 40 { - barW = 40 - } - - toolColors := buildToolColorMap(allEntries, snap.AccountID) - - lines := []string{ - heading, - " " + renderToolMixBar(allEntries, totalCalls, barW, toolColors), - } - - // Show up to 6 servers with nested function breakdown. - displayLimit := 6 - if expanded { - displayLimit = len(servers) - } - visible := servers - if len(visible) > displayLimit { - visible = visible[:displayLimit] - } - - for idx, srv := range visible { - pct := srv.calls / totalCalls * 100 - toolColor := colorForTool(toolColors, srv.name) - colorDot := lipgloss.NewStyle().Foreground(toolColor).Render("■") - displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, srv.name) - valueStr := fmt.Sprintf("%2.0f%% %s calls", pct, shortCompact(srv.calls)) - lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) - - // Show top 3 functions per server, indented. - maxFuncs := 3 - if expanded { - maxFuncs = len(srv.funcs) - } - if len(srv.funcs) < maxFuncs { - maxFuncs = len(srv.funcs) - } - for j := 0; j < maxFuncs; j++ { - fn := srv.funcs[j] - fnLabel := " " + fn.name - fnValue := fmt.Sprintf("%s calls", shortCompact(fn.calls)) - lines = append(lines, renderDotLeaderRow(fnLabel, fnValue, innerW)) - } - if !expanded && len(srv.funcs) > 3 { - lines = append(lines, dimStyle.Render(fmt.Sprintf(" + %d more (Ctrl+O)", len(srv.funcs)-3))) - } - } - - if !expanded && len(servers) > displayLimit { - lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more servers (Ctrl+O)", len(servers)-displayLimit))) - } - - return lines, usedKeys -} diff --git a/internal/tui/tiles_composition_clients.go b/internal/tui/tiles_composition_clients.go new file mode 100644 index 0000000..b70feec --- /dev/null +++ b/internal/tui/tiles_composition_clients.go @@ -0,0 +1,418 @@ +package tui + +import ( + "fmt" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func collectInterfaceAsClients(snap core.UsageSnapshot) ([]clientMixEntry, map[string]bool) { + entries, usedKeys := core.ExtractInterfaceClientBreakdown(snap) + clients := make([]clientMixEntry, 0, len(entries)) + for _, entry := range entries { + clients = append(clients, clientMixEntry{ + name: entry.Name, + requests: entry.Requests, + seriesKind: entry.SeriesKind, + series: entry.Series, + }) + } + return clients, usedKeys +} + +func buildProviderClientCompositionLinesWithWidget(snap core.UsageSnapshot, innerW int, expanded bool, widget core.DashboardWidget) ([]string, map[string]bool) { + allClients, usedKeys := collectProviderClientMix(snap) + if widget.ClientCompositionIncludeInterfaces { + ifaceClients, ifaceKeys := collectInterfaceAsClients(snap) + if len(ifaceClients) > 0 { + allClients = ifaceClients + for key, value := range ifaceKeys { + usedKeys[key] = value + } + } + } + if len(allClients) == 0 { + return nil, nil + } + + clients, hiddenCount := limitClientMix(allClients, expanded, 4) + clientColors := buildClientColorMap(allClients, snap.AccountID) + mode, total := selectClientMixMode(allClients) + if total <= 0 { + return nil, nil + } + + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + + headingName := widget.ClientCompositionHeading + if headingName == "" { + headingName = "Client Burn" + if mode == "requests" || mode == "sessions" { + headingName = "Client Activity" + } + } + headerSuffix := shortCompact(total) + " tok" + if mode == "requests" { + headerSuffix = shortCompact(total) + " req" + } else if mode == "sessions" { + headerSuffix = shortCompact(total) + " sess" + } + + lines := []string{ + lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render(headingName) + " " + dimStyle.Render(headerSuffix), + " " + renderClientMixBar(allClients, total, barW, clientColors, mode), + } + + for idx, client := range clients { + value := clientDisplayValue(client, mode) + if value <= 0 { + continue + } + pct := value / total * 100 + label := prettifyClientName(client.name) + colorDot := lipgloss.NewStyle().Foreground(colorForClient(clientColors, client.name)).Render("■") + maxLabelLen := tableLabelMaxLen(innerW) + if len(label) > maxLabelLen { + label = label[:maxLabelLen-1] + "…" + } + displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) + valueStr := fmt.Sprintf("%2.0f%% %s tok", pct, shortCompact(value)) + switch mode { + case "requests": + valueStr = fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(value)) + if client.sessions > 0 { + valueStr += fmt.Sprintf(" · %s sess", shortCompact(client.sessions)) + } + case "sessions": + valueStr = fmt.Sprintf("%2.0f%% %s sess", pct, shortCompact(value)) + default: + if client.requests > 0 { + valueStr += fmt.Sprintf(" · %s req", shortCompact(client.requests)) + } else if client.sessions > 0 { + valueStr += fmt.Sprintf(" · %s sess", shortCompact(client.sessions)) + } + } + lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) + } + + trendEntries := limitClientTrendEntries(clients, expanded) + if len(trendEntries) > 0 { + lines = append(lines, dimStyle.Render(" Trend (daily by client)")) + labelW := 12 + if innerW < 55 { + labelW = 10 + } + sparkW := innerW - labelW - 5 + if sparkW < 10 { + sparkW = 10 + } + if sparkW > 28 { + sparkW = 28 + } + + for _, client := range trendEntries { + values := make([]float64, 0, len(client.series)) + for _, point := range client.series { + values = append(values, point.Value) + } + if len(values) < 2 { + continue + } + label := truncateToWidth(prettifyClientName(client.name), labelW) + spark := RenderSparkline(values, sparkW, colorForClient(clientColors, client.name)) + lines = append(lines, fmt.Sprintf(" %s %s", + lipgloss.NewStyle().Foreground(colorSubtext).Width(labelW).Render(label), + spark, + )) + } + } + if hiddenCount > 0 { + lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more clients (Ctrl+O)", hiddenCount))) + } + return lines, usedKeys +} + +func buildProviderProjectBreakdownLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { + allProjects, usedKeys := collectProviderProjectMix(snap) + if len(allProjects) == 0 { + return nil, nil + } + + projects, hiddenCount := limitProjectMix(allProjects, expanded, 6) + projectColors := buildProjectColorMap(allProjects, snap.AccountID) + totalRequests := float64(0) + for _, project := range allProjects { + totalRequests += project.requests + } + if totalRequests <= 0 { + return nil, nil + } + + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + + barEntries := make([]toolMixEntry, 0, len(allProjects)) + for _, project := range allProjects { + barEntries = append(barEntries, toolMixEntry{name: project.name, count: project.requests}) + } + + lines := []string{ + lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Project Breakdown") + " " + dimStyle.Render(shortCompact(totalRequests)+" req"), + " " + renderToolMixBar(barEntries, totalRequests, barW, projectColors), + } + + for idx, project := range projects { + if project.requests <= 0 { + continue + } + pct := project.requests / totalRequests * 100 + label := project.name + colorDot := lipgloss.NewStyle().Foreground(colorForProject(projectColors, project.name)).Render("■") + maxLabelLen := tableLabelMaxLen(innerW) + if len(label) > maxLabelLen { + label = label[:maxLabelLen-1] + "…" + } + displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) + valueStr := fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(project.requests)) + if project.requests1d > 0 { + valueStr += fmt.Sprintf(" · today %s", shortCompact(project.requests1d)) + } + lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) + } + if hiddenCount > 0 { + lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more projects (Ctrl+O)", hiddenCount))) + } + return lines, usedKeys +} + +func collectProviderProjectMix(snap core.UsageSnapshot) ([]projectMixEntry, map[string]bool) { + projectUsage, usedKeys := core.ExtractProjectUsage(snap) + if len(projectUsage) == 0 { + return nil, usedKeys + } + projects := make([]projectMixEntry, 0, len(projectUsage)) + for _, project := range projectUsage { + projects = append(projects, projectMixEntry{ + name: project.Name, + requests: project.Requests, + requests1d: project.Requests1d, + series: project.Series, + }) + } + return projects, usedKeys +} + +func limitProjectMix(projects []projectMixEntry, expanded bool, maxVisible int) ([]projectMixEntry, int) { + if expanded || maxVisible <= 0 || len(projects) <= maxVisible { + return projects, 0 + } + return projects[:maxVisible], len(projects) - maxVisible +} + +func buildProjectColorMap(projects []projectMixEntry, providerID string) map[string]lipgloss.Color { + colors := make(map[string]lipgloss.Color, len(projects)) + if len(projects) == 0 { + return colors + } + base := stablePaletteOffset("project", providerID) + for i, project := range projects { + colors[project.name] = distributedPaletteColor(base, i) + } + return colors +} + +func colorForProject(colors map[string]lipgloss.Color, name string) lipgloss.Color { + if color, ok := colors[name]; ok { + return color + } + return stableModelColor("project:"+name, "project") +} + +func collectProviderClientMix(snap core.UsageSnapshot) ([]clientMixEntry, map[string]bool) { + entries, usedKeys := core.ExtractClientBreakdown(snap) + clients := make([]clientMixEntry, 0, len(entries)) + for _, entry := range entries { + clients = append(clients, clientMixEntry{ + name: entry.Name, + total: entry.Total, + input: entry.Input, + output: entry.Output, + cached: entry.Cached, + reasoning: entry.Reasoning, + requests: entry.Requests, + sessions: entry.Sessions, + seriesKind: entry.SeriesKind, + series: entry.Series, + }) + } + return clients, usedKeys +} + +func clientTokenValue(client clientMixEntry) float64 { + if client.total > 0 { + return client.total + } + if client.input > 0 || client.output > 0 || client.cached > 0 || client.reasoning > 0 { + return client.input + client.output + client.cached + client.reasoning + } + return 0 +} + +func clientMixValue(client clientMixEntry) float64 { + if value := clientTokenValue(client); value > 0 { + return value + } + if client.requests > 0 { + return client.requests + } + if len(client.series) > 0 { + return sumSeriesValues(client.series) + } + return 0 +} + +func clientDisplayValue(client clientMixEntry, mode string) float64 { + switch mode { + case "sessions": + return client.sessions + case "requests": + if client.requests > 0 { + return client.requests + } + return sumSeriesValues(client.series) + default: + return clientMixValue(client) + } +} + +func selectClientMixMode(clients []clientMixEntry) (string, float64) { + totalTokens := float64(0) + totalRequests := float64(0) + totalSessions := float64(0) + for _, client := range clients { + totalTokens += clientTokenValue(client) + totalRequests += client.requests + totalSessions += client.sessions + } + if totalTokens > 0 { + return "tokens", totalTokens + } + if totalRequests > 0 { + return "requests", totalRequests + } + return "sessions", totalSessions +} + +func sumSeriesValues(points []core.TimePoint) float64 { + total := float64(0) + for _, point := range points { + total += point.Value + } + return total +} + +func mergeSeriesByDay(seriesByClient map[string]map[string]float64, client string, points []core.TimePoint) { + if client == "" || len(points) == 0 { + return + } + if seriesByClient[client] == nil { + seriesByClient[client] = make(map[string]float64) + } + for _, point := range points { + if point.Date != "" { + seriesByClient[client][point.Date] += point.Value + } + } +} + +func limitClientMix(clients []clientMixEntry, expanded bool, maxVisible int) ([]clientMixEntry, int) { + if expanded || maxVisible <= 0 || len(clients) <= maxVisible { + return clients, 0 + } + return clients[:maxVisible], len(clients) - maxVisible +} + +func limitClientTrendEntries(clients []clientMixEntry, expanded bool) []clientMixEntry { + maxVisible := 2 + if expanded { + maxVisible = 4 + } + trend := make([]clientMixEntry, 0, maxVisible) + for _, client := range clients { + if len(client.series) < 2 { + continue + } + trend = append(trend, client) + if len(trend) >= maxVisible { + break + } + } + return trend +} + +func prettifyClientName(name string) string { + switch name { + case "cli": + return "CLI Agents" + case "ide": + return "IDE" + case "exec": + return "Exec" + case "desktop_app": + return "Desktop App" + case "other": + return "Other" + case "composer": + return "Composer" + case "human": + return "Human" + case "tab": + return "Tab Completion" + } + parts := strings.Split(name, "_") + for i := range parts { + switch parts[i] { + case "cli": + parts[i] = "CLI" + case "ide": + parts[i] = "IDE" + case "api": + parts[i] = "API" + default: + parts[i] = titleCase(parts[i]) + } + } + return strings.Join(parts, " ") +} + +func buildClientColorMap(clients []clientMixEntry, providerID string) map[string]lipgloss.Color { + colors := make(map[string]lipgloss.Color, len(clients)) + if len(clients) == 0 { + return colors + } + base := stablePaletteOffset("client", providerID) + for i, client := range clients { + colors[client.name] = distributedPaletteColor(base, i) + } + return colors +} + +func colorForClient(colors map[string]lipgloss.Color, name string) lipgloss.Color { + if color, ok := colors[name]; ok { + return color + } + return stableModelColor("client:"+name, "client") +} diff --git a/internal/tui/tiles_composition_providers.go b/internal/tui/tiles_composition_providers.go new file mode 100644 index 0000000..77136b0 --- /dev/null +++ b/internal/tui/tiles_composition_providers.go @@ -0,0 +1,319 @@ +package tui + +import ( + "fmt" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func buildProviderVendorCompositionLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { + allProviders, usedKeys := collectProviderVendorMix(snap) + if len(allProviders) == 0 { + return nil, nil + } + providers, hiddenCount := limitProviderMix(allProviders, expanded, 4) + providerColors := buildProviderColorMap(allProviders, snap.AccountID) + + totalCost := float64(0) + totalTokens := float64(0) + totalRequests := float64(0) + for _, provider := range allProviders { + totalCost += provider.cost + totalTokens += provider.input + provider.output + totalRequests += provider.requests + } + + mode, total := selectBurnMode(totalTokens, totalCost, totalRequests) + if total <= 0 { + return nil, nil + } + + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + + heading := "Provider Burn (tokens)" + if mode == "cost" { + heading = "Provider Burn (credits)" + } else if mode == "requests" { + heading = "Provider Activity (requests)" + } + + providerClients := make([]clientMixEntry, 0, len(allProviders)) + for _, provider := range allProviders { + value := provider.requests + if mode == "cost" { + value = provider.cost + } else if mode == "tokens" { + value = provider.input + provider.output + } + if value > 0 { + providerClients = append(providerClients, clientMixEntry{name: provider.name, total: value}) + } + } + if len(providerClients) == 0 { + return nil, nil + } + + lines := []string{ + lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render(heading), + " " + renderClientMixBar(providerClients, total, barW, providerColors, "tokens"), + } + + for idx, provider := range providers { + value := provider.requests + if mode == "cost" { + value = provider.cost + } else if mode == "tokens" { + value = provider.input + provider.output + } + if value <= 0 { + continue + } + pct := value / total * 100 + label := prettifyModelName(provider.name) + colorDot := lipgloss.NewStyle().Foreground(providerColors[provider.name]).Render("■") + maxLabelLen := tableLabelMaxLen(innerW) + if len(label) > maxLabelLen { + label = label[:maxLabelLen-1] + "…" + } + displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) + valueStr := fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(provider.requests)) + if mode == "tokens" { + valueStr = fmt.Sprintf("%2.0f%% %s tok · %s req", pct, shortCompact(provider.input+provider.output), shortCompact(provider.requests)) + if provider.cost > 0 { + valueStr += fmt.Sprintf(" · %s", formatUSD(provider.cost)) + } + } else if mode == "cost" { + valueStr = fmt.Sprintf("%2.0f%% %s tok · %s req · %s", pct, shortCompact(provider.input+provider.output), shortCompact(provider.requests), formatUSD(provider.cost)) + } + lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) + } + if hiddenCount > 0 { + lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more providers (Ctrl+O)", hiddenCount))) + } + return lines, usedKeys +} + +func collectProviderVendorMix(snap core.UsageSnapshot) ([]providerMixEntry, map[string]bool) { + entries, usedKeys := core.ExtractProviderBreakdown(snap) + providers := make([]providerMixEntry, 0, len(entries)) + for _, entry := range entries { + providers = append(providers, providerMixEntry{ + name: entry.Name, + cost: entry.Cost, + input: entry.Input, + output: entry.Output, + requests: entry.Requests, + }) + } + return providers, usedKeys +} + +func buildUpstreamProviderCompositionLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { + allProviders, usedKeys := collectUpstreamProviderMix(snap) + if len(allProviders) == 0 { + return nil, nil + } + providers, hiddenCount := limitProviderMix(allProviders, expanded, 4) + providerColors := buildProviderColorMap(allProviders, snap.AccountID) + + totalCost := float64(0) + totalTokens := float64(0) + totalRequests := float64(0) + for _, provider := range allProviders { + totalCost += provider.cost + totalTokens += provider.input + provider.output + totalRequests += provider.requests + } + + mode, total := selectBurnMode(totalTokens, totalCost, totalRequests) + if total <= 0 { + return nil, nil + } + + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + + heading := "Hosting Providers (tokens)" + if mode == "cost" { + heading = "Hosting Providers (credits)" + } else if mode == "requests" { + heading = "Hosting Providers (requests)" + } + + providerClients := make([]clientMixEntry, 0, len(allProviders)) + for _, provider := range allProviders { + value := provider.requests + if mode == "cost" { + value = provider.cost + } else if mode == "tokens" { + value = provider.input + provider.output + } + if value > 0 { + providerClients = append(providerClients, clientMixEntry{name: provider.name, total: value}) + } + } + if len(providerClients) == 0 { + return nil, nil + } + + lines := []string{ + lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render(heading), + " " + renderClientMixBar(providerClients, total, barW, providerColors, "tokens"), + } + + for idx, provider := range providers { + value := provider.requests + if mode == "cost" { + value = provider.cost + } else if mode == "tokens" { + value = provider.input + provider.output + } + if value <= 0 { + continue + } + pct := value / total * 100 + label := prettifyModelName(provider.name) + colorDot := lipgloss.NewStyle().Foreground(providerColors[provider.name]).Render("■") + maxLabelLen := tableLabelMaxLen(innerW) + if len(label) > maxLabelLen { + label = label[:maxLabelLen-1] + "…" + } + displayLabel := fmt.Sprintf("%s %d %s", colorDot, idx+1, label) + valueStr := fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(provider.requests)) + if mode == "tokens" { + valueStr = fmt.Sprintf("%2.0f%% %s tok · %s req", pct, shortCompact(provider.input+provider.output), shortCompact(provider.requests)) + if provider.cost > 0 { + valueStr += fmt.Sprintf(" · %s", formatUSD(provider.cost)) + } + } else if mode == "cost" { + valueStr = fmt.Sprintf("%2.0f%% %s tok · %s req · %s", pct, shortCompact(provider.input+provider.output), shortCompact(provider.requests), formatUSD(provider.cost)) + } + lines = append(lines, renderDotLeaderRow(displayLabel, valueStr, innerW)) + } + if hiddenCount > 0 { + lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more providers (Ctrl+O)", hiddenCount))) + } + return lines, usedKeys +} + +func collectUpstreamProviderMix(snap core.UsageSnapshot) ([]providerMixEntry, map[string]bool) { + entries, usedKeys := core.ExtractUpstreamProviderBreakdown(snap) + result := make([]providerMixEntry, 0, len(entries)) + for _, entry := range entries { + result = append(result, providerMixEntry{ + name: entry.Name, + cost: entry.Cost, + input: entry.Input, + output: entry.Output, + requests: entry.Requests, + }) + } + return result, usedKeys +} + +func limitProviderMix(providers []providerMixEntry, expanded bool, maxVisible int) ([]providerMixEntry, int) { + if expanded || maxVisible <= 0 || len(providers) <= maxVisible { + return providers, 0 + } + return providers[:maxVisible], len(providers) - maxVisible +} + +func buildProviderColorMap(providers []providerMixEntry, providerID string) map[string]lipgloss.Color { + colors := make(map[string]lipgloss.Color, len(providers)) + if len(providers) == 0 { + return colors + } + base := stablePaletteOffset("provider", providerID) + for i, provider := range providers { + colors[provider.name] = distributedPaletteColor(base, i) + } + return colors +} + +func buildProviderDailyTrendLines(snap core.UsageSnapshot, innerW int) []string { + type trendDef struct { + label string + keys []string + color lipgloss.Color + unit string + } + defs := []trendDef{ + {label: "Cost", keys: []string{"analytics_cost", "cost"}, color: colorTeal, unit: "USD"}, + {label: "Req", keys: []string{"analytics_requests", "requests"}, color: colorYellow, unit: "requests"}, + {label: "Tokens", keys: []string{"analytics_tokens"}, color: colorSapphire, unit: "tokens"}, + } + + lines := []string{} + labelW := 8 + if innerW < 55 { + labelW = 6 + } + sparkW := innerW - labelW - 14 + if sparkW < 10 { + sparkW = 10 + } + if sparkW > 30 { + sparkW = 30 + } + + for _, def := range defs { + var points []core.TimePoint + for _, key := range def.keys { + if got, ok := snap.DailySeries[key]; ok && len(got) > 1 { + points = got + break + } + } + if len(points) < 2 { + continue + } + values := tailSeriesValues(points, 14) + if len(values) < 2 { + continue + } + last := values[len(values)-1] + lastLabel := shortCompact(last) + if def.unit == "USD" { + lastLabel = formatUSD(last) + } + + if len(lines) == 0 { + lines = append(lines, lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Daily Usage")) + } + + label := lipgloss.NewStyle().Foreground(colorSubtext).Width(labelW).Render(def.label) + spark := RenderSparkline(values, sparkW, def.color) + lines = append(lines, fmt.Sprintf(" %s %s %s", label, spark, dimStyle.Render(lastLabel))) + } + + if len(lines) == 0 { + return nil + } + return lines +} + +func tailSeriesValues(points []core.TimePoint, max int) []float64 { + if len(points) == 0 { + return nil + } + if max > 0 && len(points) > max { + points = points[len(points)-max:] + } + values := make([]float64, 0, len(points)) + for _, point := range points { + values = append(values, point.Value) + } + return values +} diff --git a/internal/tui/tiles_composition_tools.go b/internal/tui/tiles_composition_tools.go new file mode 100644 index 0000000..7b2505f --- /dev/null +++ b/internal/tui/tiles_composition_tools.go @@ -0,0 +1,441 @@ +package tui + +import ( + "fmt" + "math" + "sort" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func prettifyMCPServerName(raw string) string { + s := strings.ToLower(strings.TrimSpace(raw)) + if s == "" { + return "unknown" + } + s = strings.TrimPrefix(s, "claude_ai_") + s = strings.TrimPrefix(s, "plugin_") + s = strings.TrimSuffix(s, "_mcp") + parts := strings.Split(s, "_") + if len(parts) >= 2 && parts[0] == parts[len(parts)-1] { + parts = parts[:len(parts)-1] + } + s = strings.Join(parts, "_") + if s == "" { + return raw + } + return prettifyMCPName(s) +} + +func prettifyMCPFunctionName(raw string) string { + s := strings.ToLower(strings.TrimSpace(raw)) + if s == "" { + return raw + } + return prettifyMCPName(s) +} + +func prettifyMCPName(s string) string { + s = strings.NewReplacer("_", " ", "-", " ").Replace(s) + words := strings.Fields(s) + for i, word := range words { + if len(word) > 0 { + words[i] = strings.ToUpper(word[:1]) + word[1:] + } + } + return strings.Join(words, " ") +} + +func buildProviderToolCompositionLines(snap core.UsageSnapshot, innerW int, expanded bool, widget core.DashboardWidget) ([]string, map[string]bool) { + allTools, usedKeys := collectProviderToolMix(snap) + if len(allTools) == 0 { + return nil, nil + } + tools, hiddenCount := limitToolMix(allTools, expanded, 4) + toolColors := buildToolColorMap(allTools, snap.AccountID) + totalCalls := float64(0) + for _, tool := range allTools { + totalCalls += tool.count + } + if totalCalls <= 0 { + return nil, nil + } + + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + + headingName := "Tool Usage" + if widget.ToolCompositionHeading != "" { + headingName = widget.ToolCompositionHeading + } + lines := []string{ + lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render(headingName) + " " + dimStyle.Render(shortCompact(totalCalls)+" calls"), + " " + renderToolMixBar(allTools, totalCalls, barW, toolColors), + } + for idx, tool := range tools { + if tool.count <= 0 { + continue + } + pct := tool.count / totalCalls * 100 + label := tool.name + colorDot := lipgloss.NewStyle().Foreground(colorForTool(toolColors, tool.name)).Render("■") + maxLabelLen := tableLabelMaxLen(innerW) + if len(label) > maxLabelLen { + label = label[:maxLabelLen-1] + "…" + } + lines = append(lines, renderDotLeaderRow(fmt.Sprintf("%s %d %s", colorDot, idx+1, label), fmt.Sprintf("%2.0f%% %s calls", pct, shortCompact(tool.count)), innerW)) + } + if hiddenCount > 0 { + lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more tools (Ctrl+O)", hiddenCount))) + } + return lines, usedKeys +} + +func collectProviderToolMix(snap core.UsageSnapshot) ([]toolMixEntry, map[string]bool) { + entries, usedKeys := core.ExtractInterfaceClientBreakdown(snap) + tools := make([]toolMixEntry, 0, len(entries)) + for _, entry := range entries { + tools = append(tools, toolMixEntry{name: entry.Name, count: entry.Requests}) + } + return tools, usedKeys +} + +func sortToolMixEntries(tools []toolMixEntry) { + sort.Slice(tools, func(i, j int) bool { + if tools[i].count == tools[j].count { + return tools[i].name < tools[j].name + } + return tools[i].count > tools[j].count + }) +} + +func limitToolMix(tools []toolMixEntry, expanded bool, maxVisible int) ([]toolMixEntry, int) { + if expanded || maxVisible <= 0 || len(tools) <= maxVisible { + return tools, 0 + } + return tools[:maxVisible], len(tools) - maxVisible +} + +func buildToolColorMap(tools []toolMixEntry, providerID string) map[string]lipgloss.Color { + colors := make(map[string]lipgloss.Color, len(tools)) + if len(tools) == 0 { + return colors + } + base := stablePaletteOffset("tool", providerID) + for i, tool := range tools { + colors[tool.name] = distributedPaletteColor(base, i) + } + return colors +} + +func colorForTool(colors map[string]lipgloss.Color, name string) lipgloss.Color { + if color, ok := colors[name]; ok { + return color + } + return stableModelColor("tool:"+name, "tool") +} + +func buildProviderLanguageCompositionLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { + allLangs, usedKeys := collectProviderLanguageMix(snap) + if len(allLangs) == 0 { + return nil, usedKeys + } + langs, hiddenCount := limitToolMix(allLangs, expanded, 6) + langColors := buildLangColorMap(allLangs, snap.AccountID) + totalReqs := float64(0) + for _, lang := range allLangs { + totalReqs += lang.count + } + if totalReqs <= 0 { + return nil, nil + } + + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + + lines := []string{ + lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Language") + " " + dimStyle.Render(shortCompact(totalReqs)+" req"), + " " + renderToolMixBar(allLangs, totalReqs, barW, langColors), + } + for idx, lang := range langs { + if lang.count <= 0 { + continue + } + pct := lang.count / totalReqs * 100 + label := lang.name + colorDot := lipgloss.NewStyle().Foreground(colorForTool(langColors, lang.name)).Render("■") + maxLabelLen := tableLabelMaxLen(innerW) + if len(label) > maxLabelLen { + label = label[:maxLabelLen-1] + "…" + } + lines = append(lines, renderDotLeaderRow(fmt.Sprintf("%s %d %s", colorDot, idx+1, label), fmt.Sprintf("%2.0f%% %s req", pct, shortCompact(lang.count)), innerW)) + } + if hiddenCount > 0 { + lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more languages (Ctrl+O)", hiddenCount))) + } + return lines, usedKeys +} + +func collectProviderLanguageMix(snap core.UsageSnapshot) ([]toolMixEntry, map[string]bool) { + languageUsage, usedKeys := core.ExtractLanguageUsage(snap) + if len(languageUsage) == 0 { + return nil, usedKeys + } + langs := make([]toolMixEntry, 0, len(languageUsage)) + for _, language := range languageUsage { + langs = append(langs, toolMixEntry{name: language.Name, count: language.Requests}) + } + return langs, usedKeys +} + +func buildLangColorMap(langs []toolMixEntry, providerID string) map[string]lipgloss.Color { + colors := make(map[string]lipgloss.Color, len(langs)) + if len(langs) == 0 { + return colors + } + base := stablePaletteOffset("lang", providerID) + for i, lang := range langs { + colors[lang.name] = distributedPaletteColor(base, i) + } + return colors +} + +func buildProviderCodeStatsLines(snap core.UsageSnapshot, widget core.DashboardWidget, innerW int) ([]string, map[string]bool) { + cs := widget.CodeStatsMetrics + usedKeys := make(map[string]bool) + getVal := func(key string) float64 { + if key == "" { + return 0 + } + if metric, ok := snap.Metrics[key]; ok && metric.Used != nil { + usedKeys[key] = true + return *metric.Used + } + return 0 + } + + added := getVal(cs.LinesAdded) + removed := getVal(cs.LinesRemoved) + files := getVal(cs.FilesChanged) + commits := getVal(cs.Commits) + aiPct := getVal(cs.AIPercent) + prompts := getVal(cs.Prompts) + + if added <= 0 && removed <= 0 && commits <= 0 && files <= 0 { + return nil, usedKeys + } + + parts := []string{} + if files > 0 { + parts = append(parts, shortCompact(files)+" files") + } + if added > 0 || removed > 0 { + parts = append(parts, shortCompact(added+removed)+" lines") + } + heading := lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Code Statistics") + if len(parts) > 0 { + heading += " " + dimStyle.Render(strings.Join(parts, " · ")) + } + lines := []string{heading} + + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + + if added > 0 || removed > 0 { + total := added + removed + addedW := int(math.Round(added / total * float64(barW))) + if addedW < 1 && added > 0 { + addedW = 1 + } + removedW := barW - addedW + bar := lipgloss.NewStyle().Foreground(colorGreen).Render(strings.Repeat("█", addedW)) + + lipgloss.NewStyle().Foreground(colorRed).Render(strings.Repeat("█", removedW)) + lines = append(lines, " "+bar) + lines = append(lines, renderDotLeaderRow( + fmt.Sprintf("%s +%s added", lipgloss.NewStyle().Foreground(colorGreen).Render("■"), shortCompact(added)), + fmt.Sprintf("%s -%s removed", lipgloss.NewStyle().Foreground(colorRed).Render("■"), shortCompact(removed)), + innerW, + )) + } + if files > 0 { + lines = append(lines, renderDotLeaderRow("Files Changed", shortCompact(files)+" files", innerW)) + } + if commits > 0 { + label := shortCompact(commits) + " commits" + if aiPct > 0 { + label += fmt.Sprintf(" · %.0f%% AI", aiPct) + } + lines = append(lines, renderDotLeaderRow("Commits", label, innerW)) + } + if aiPct > 0 { + filled := int(math.Round(aiPct / 100 * float64(barW))) + if filled < 1 && aiPct > 0 { + filled = 1 + } + empty := barW - filled + if empty < 0 { + empty = 0 + } + lines = append(lines, " "+lipgloss.NewStyle().Foreground(colorBlue).Render(strings.Repeat("█", filled))+ + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("░", empty))) + } + if prompts > 0 { + lines = append(lines, renderDotLeaderRow("Prompts", shortCompact(prompts)+" total", innerW)) + } + return lines, usedKeys +} + +func buildActualToolUsageLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { + rawTools, usedKeys := core.ExtractActualToolUsage(snap) + if len(rawTools) == 0 { + return nil, usedKeys + } + allTools := make([]toolMixEntry, 0, len(rawTools)) + totalCalls := float64(0) + for _, rawTool := range rawTools { + allTools = append(allTools, toolMixEntry{name: rawTool.RawName, count: rawTool.Calls}) + totalCalls += rawTool.Calls + } + if totalCalls <= 0 { + return nil, nil + } + sortToolMixEntries(allTools) + displayLimit := 6 + if expanded { + displayLimit = len(allTools) + } + visibleTools := allTools + hiddenCount := 0 + if len(allTools) > displayLimit { + visibleTools = allTools[:displayLimit] + hiddenCount = len(allTools) - displayLimit + } + toolColors := buildToolColorMap(allTools, snap.AccountID) + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + headerSuffix := shortCompact(totalCalls) + " calls" + if metric, ok := snap.Metrics["tool_success_rate"]; ok && metric.Used != nil { + headerSuffix += fmt.Sprintf(" · %.0f%% ok", *metric.Used) + } + lines := []string{ + lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("Tool Usage") + " " + dimStyle.Render(headerSuffix), + " " + renderToolMixBar(allTools, totalCalls, barW, toolColors), + } + for idx, tool := range visibleTools { + if tool.count <= 0 { + continue + } + pct := tool.count / totalCalls * 100 + label := tool.name + colorDot := lipgloss.NewStyle().Foreground(colorForTool(toolColors, tool.name)).Render("■") + maxLabelLen := tableLabelMaxLen(innerW) + if len(label) > maxLabelLen { + label = label[:maxLabelLen-1] + "…" + } + lines = append(lines, renderDotLeaderRow(fmt.Sprintf("%s %d %s", colorDot, idx+1, label), fmt.Sprintf("%2.0f%% %s calls", pct, shortCompact(tool.count)), innerW)) + } + if hiddenCount > 0 { + lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more tools (Ctrl+O)", hiddenCount))) + } + return lines, usedKeys +} + +func buildMCPUsageLines(snap core.UsageSnapshot, innerW int, expanded bool) ([]string, map[string]bool) { + type funcEntry struct { + name string + calls float64 + } + type serverEntry struct { + name string + calls float64 + funcs []funcEntry + } + + rawServers, usedKeys := core.ExtractMCPUsage(snap) + servers := make([]serverEntry, 0, len(rawServers)) + totalCalls := float64(0) + for _, rawServer := range rawServers { + server := serverEntry{name: prettifyMCPServerName(rawServer.RawName), calls: rawServer.Calls} + for _, rawFunc := range rawServer.Functions { + server.funcs = append(server.funcs, funcEntry{name: prettifyMCPFunctionName(rawFunc.RawName), calls: rawFunc.Calls}) + } + servers = append(servers, server) + totalCalls += server.calls + } + if len(servers) == 0 || totalCalls <= 0 { + return nil, usedKeys + } + + headerSuffix := shortCompact(totalCalls) + " calls · " + fmt.Sprintf("%d servers", len(servers)) + allEntries := make([]toolMixEntry, 0, len(servers)) + for _, server := range servers { + allEntries = append(allEntries, toolMixEntry{name: server.name, count: server.calls}) + } + barW := innerW - 2 + if barW < 12 { + barW = 12 + } + if barW > 40 { + barW = 40 + } + toolColors := buildToolColorMap(allEntries, snap.AccountID) + lines := []string{ + lipgloss.NewStyle().Foreground(colorSubtext).Bold(true).Render("MCP Usage") + " " + dimStyle.Render(headerSuffix), + " " + renderToolMixBar(allEntries, totalCalls, barW, toolColors), + } + + displayLimit := 6 + if expanded { + displayLimit = len(servers) + } + visible := servers + if len(visible) > displayLimit { + visible = visible[:displayLimit] + } + + for idx, server := range visible { + pct := server.calls / totalCalls * 100 + colorDot := lipgloss.NewStyle().Foreground(colorForTool(toolColors, server.name)).Render("■") + lines = append(lines, renderDotLeaderRow(fmt.Sprintf("%s %d %s", colorDot, idx+1, server.name), fmt.Sprintf("%2.0f%% %s calls", pct, shortCompact(server.calls)), innerW)) + maxFuncs := 3 + if expanded { + maxFuncs = len(server.funcs) + } + if len(server.funcs) < maxFuncs { + maxFuncs = len(server.funcs) + } + for j := 0; j < maxFuncs; j++ { + fn := server.funcs[j] + lines = append(lines, renderDotLeaderRow(" "+fn.name, fmt.Sprintf("%s calls", shortCompact(fn.calls)), innerW)) + } + if !expanded && len(server.funcs) > 3 { + lines = append(lines, dimStyle.Render(fmt.Sprintf(" + %d more (Ctrl+O)", len(server.funcs)-3))) + } + } + if !expanded && len(servers) > displayLimit { + lines = append(lines, dimStyle.Render(fmt.Sprintf("+ %d more servers (Ctrl+O)", len(servers)-displayLimit))) + } + return lines, usedKeys +} From 1b031d4686fdb1f7e87e0dcb3b68bd06ab7d91ba Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Tue, 10 Mar 2026 00:16:33 +0100 Subject: [PATCH 28/32] refactor: close remaining tui and provider cleanup gaps --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 113 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 216 +-- internal/config/config.go | 12 + internal/core/analytics_costs.go | 5 + internal/core/provider.go | 56 +- internal/core/usage_breakdowns_domains.go | 23 + internal/daemon/source_collectors.go | 2 +- internal/detect/claude_code.go | 15 +- internal/detect/cursor.go | 5 +- internal/providers/gemini_cli/api_usage.go | 639 +++++++ internal/providers/gemini_cli/gemini_cli.go | 627 ------- internal/providers/ollama/cloud_api.go | 326 ++++ internal/providers/ollama/desktop_db.go | 166 ++ .../providers/ollama/desktop_db_breakdowns.go | 377 ++++ .../providers/ollama/desktop_db_settings.go | 178 ++ .../providers/ollama/desktop_db_tokens.go | 303 +++ internal/providers/ollama/local_api.go | 349 ++++ internal/providers/ollama/ollama.go | 1633 ----------------- internal/telemetry/test_helpers_test.go | 49 + internal/telemetry/usage_view_test.go | 112 +- internal/tui/analytics.go | 11 +- internal/tui/analytics_cache.go | 47 + internal/tui/dashboard_views.go | 2 + internal/tui/detail.go | 542 +----- internal/tui/detail_analytics_sections.go | 13 +- internal/tui/detail_metrics.go | 536 ++++++ internal/tui/model.go | 8 +- internal/tui/model_commands.go | 1 + internal/tui/model_display_info.go | 23 +- internal/tui/model_input.go | 34 +- internal/tui/render_cache.go | 57 + internal/tui/settings_modal.go | 396 ---- internal/tui/settings_modal_input.go | 388 ++++ 33 files changed, 3676 insertions(+), 3588 deletions(-) create mode 100644 internal/providers/gemini_cli/api_usage.go create mode 100644 internal/providers/ollama/cloud_api.go create mode 100644 internal/providers/ollama/desktop_db.go create mode 100644 internal/providers/ollama/desktop_db_breakdowns.go create mode 100644 internal/providers/ollama/desktop_db_settings.go create mode 100644 internal/providers/ollama/desktop_db_tokens.go create mode 100644 internal/providers/ollama/local_api.go create mode 100644 internal/telemetry/test_helpers_test.go create mode 100644 internal/tui/analytics_cache.go create mode 100644 internal/tui/detail_metrics.go create mode 100644 internal/tui/render_cache.go create mode 100644 internal/tui/settings_modal_input.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index ae656db..43aeff4 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -2,103 +2,32 @@ Date: 2026-03-09 Repository: `/Users/janekbaraniewski/Workspace/priv/openusage` +Branch: `feat/dashboard-race-parser-cleanups` -## Scope +## Fixed in This Branch -This pass combined: - -- full test run: `go test ./...` -- targeted race run: `go test -race ./internal/daemon ./internal/telemetry ./internal/tui ./cmd/openusage` -- repo-wide static scans for large files, goroutines, mutex usage, legacy markers, and duplicated metric-prefix parsing -- targeted reads of the highest-risk files and subsystems - -This table captures every issue found in this pass. It is broad and high-signal, but it is still a static audit, not a proof that no additional edge-case bugs exist. - -## Resolved In This Pass - -| ID | Priority | Area | Evidence | Change made | Follow-up | +| ID | Status | Area | Evidence | Resolution | Follow-up | | --- | --- | --- | --- | --- | --- | -| R1 | Fixed | Dashboard timeframe race | `cmd/openusage/dashboard.go`, `internal/tui/model.go`, `internal/daemon/runtime.go` | Snapshot messages now carry `TimeWindow` and `RequestID`, and stale responses are rejected. | None. Keep regression tests. | -| R2 | Fixed | Daemon cache refresh bug | `internal/daemon/accounts.go`, `internal/daemon/server.go`, `internal/daemon/types.go` | Read-model cache refresh dedupe now keys by normalized time window instead of collapsing all windows together. | None. | -| R3 | Fixed | Weakly typed time-window flow | `internal/daemon/types.go`, `internal/daemon/runtime.go`, `internal/telemetry/read_model.go`, `internal/telemetry/usage_view.go` | Internal daemon and telemetry paths now use `core.TimeWindow` instead of raw strings. | Continue shrinking stringly typed config boundaries over time. | -| R4 | Fixed | Dashboard refresh orchestration sprawl | `cmd/openusage/snapshot_dispatcher.go`, `cmd/openusage/dashboard.go` | Snapshot sequencing/version dispatch moved out of dashboard wiring into a dedicated helper. | Reuse the same pattern if other async UI data channels are added. | -| R5 | Fixed | Legacy runtime path overload cleanup | `internal/core/provider.go`, `internal/config/config.go`, `internal/detect/cursor.go`, `internal/detect/claude_code.go`, `internal/providers/cursor/cursor.go`, `internal/providers/claude_code/claude_code.go` | Legacy provider-specific path overloads are normalized into `Paths`, and runtime provider code now uses named paths instead of normal-path dependence on `Binary` / `BaseURL`. | The type still contains `Binary` and `BaseURL` for legitimate CLI/base-URL providers. | -| R6 | Fixed | Repeated coding-tool detail widgets | `internal/core/detail_widget.go`, `internal/providers/cursor/cursor.go`, `internal/providers/codex/codex.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/copilot/copilot.go`, `internal/providers/gemini_cli/gemini_cli.go` | Repeated detail section arrays were replaced with a shared `CodingToolDetailWidget(...)` constructor. | Extend the same pattern if more coding-tool providers are added. | -| R7 | Fixed | TUI side-effect boundary | `internal/tui/model.go`, `internal/dashboardapp/service.go`, `cmd/openusage/dashboard.go` | `tui.Model` no longer directly persists settings, saves credentials, installs integrations, or validates API keys. Those side effects now go through an injected dashboard application service. | More UI decomposition is still useful, but the highest-leak side effects are no longer hardcoded in the model. | -| R8 | Fixed | Codex parser duplication | `internal/providers/codex/session_decoder.go`, `internal/providers/codex/codex.go`, `internal/providers/codex/telemetry_usage.go` | Codex session JSONL parsing now runs through one shared decoder used by both the dashboard breakdown reader and telemetry ingestion path. | Apply the same consolidation to Claude Code and Cursor. | -| R9 | Fixed | Claude Code parser duplication | `internal/providers/claude_code/conversation_records.go`, `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/telemetry_usage.go` | Claude Code JSONL parsing, token total calculation, and usage/tool dedupe keys now run through one shared normalized conversation-record helper used by both the dashboard aggregator and telemetry collector. | Apply the same consolidation pattern to Cursor. | -| R10 | Fixed | Cursor state DB reader duplication | `internal/providers/cursor/state_records.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/telemetry.go` | Cursor `composerData` and `bubbleId` rows from `cursorDiskKV` are now parsed once into shared record types and projected from both the dashboard provider and telemetry collector. This also removes the extra telemetry pass that queried `bubbleId` separately for tool and token events. | Tracking DB and daily-stats duplication still remain. | -| R11 | Fixed | Detached read-model refresh ownership | `internal/daemon/server.go` | Async read-model cache refreshes triggered from HTTP handlers now inherit the daemon service root context instead of launching from `context.Background()`. | If a worker pool is added later, reuse the same service-owned context there too. | -| R12 | Fixed | Cursor tracking and daily-stats reader duplication | `internal/providers/cursor/tracking_records.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/telemetry.go` | Cursor `ai_code_hashes` rows and `ItemTable` daily-stats envelopes now parse through shared record loaders, including compatibility for older tracking DB schemas with missing columns. Dashboard and telemetry projections now read the same normalized source records. | Keep compatibility coverage for older Cursor schemas. | -| R13 | Fixed | Ad hoc daemon log throttling | `internal/core/log_throttle.go`, `internal/daemon/server.go`, `internal/daemon/runtime.go` | Daemon service and dashboard runtime now use a shared throttling helper instead of separate timestamp/mutex patterns for repeated log suppression. | Reuse the same helper if more throttled log sites are added. | -| R14 | Fixed | Cursor time-source injection | `internal/core/clock.go`, `internal/providers/cursor/cursor.go`, `internal/providers/cursor/tracking_records.go`, `internal/providers/cursor/telemetry.go` | Cursor provider and its shared SQLite readers now use an injectable clock path instead of direct `time.Now()` calls in the main time-sensitive flow. | Extend the same pattern to other provider/analytics subsystems over time. | -| R15 | Fixed | TUI language and MCP parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/detail.go`, `internal/tui/tiles_composition.go` | Repeated `lang_` and `mcp_` metric-key parsing moved into shared core extractors so both detail and composition views consume the same typed breakdown data instead of re-parsing raw keys independently. | Extend the same pattern to the remaining client/project/tool/provider mix extractors. | -| R16 | Fixed | Daemon server responsibility split | `internal/daemon/server.go`, `internal/daemon/server_logging.go`, `internal/daemon/server_read_model.go`, `internal/daemon/server_http.go` | Logging, read-model cache flow, and HTTP handlers now live in dedicated files instead of one monolithic daemon server file. | Continue the same split for polling / collection / retention loops. | -| R17 | Fixed | TUI project parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/tiles_composition.go` | Provider project mix extraction now uses one shared core extractor instead of duplicating project metric and daily-series parsing inside the TUI composition layer. | Continue the same extraction pattern for client / source / provider mix breakdowns. | -| R18 | Fixed | Telemetry MCP/helper split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go` | MCP parsing, metric sanitizing, and generic map-prefix helper logic moved out of the main usage-view file into a dedicated helper unit. | Continue splitting query / aggregation / projection responsibilities. | -| R19 | Fixed | Daemon loop decomposition | `internal/daemon/server.go`, `internal/daemon/server_loops.go` | Collection, spool, hook-spool, retention, and poll loops no longer live inline in the main daemon server file. | Continue splitting by loop family if the new file grows too large. | -| R20 | Fixed | TUI model/client/provider parsing duplication | `internal/core/usage_breakdowns.go`, `internal/tui/tiles_composition.go` | Model, client, provider, upstream-provider, and interface-client aggregation/parsing now live in shared core extractors, leaving the TUI composition layer as a thin adapter over typed breakdown entries. | The remaining TUI parsing drift is now mostly in analytics/detail-specific sections rather than the main composition bars. | -| R21 | Fixed | OpenRouter provider-resolution split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/provider_resolution.go` | Hosting-provider resolution, BYOK cost inference, and provider-name heuristics moved out of the main OpenRouter provider file into a dedicated helper unit. | Continue splitting analytics/generation pagination/projection concerns. | -| R22 | Fixed | Telemetry snapshot projection split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_projection.go` | Snapshot projection, stale-metric cleanup, daily-series projection, and windowed metric emission moved out of the main usage-view file into a dedicated projection unit. | Continue with the same split for the SQL/query layer. | -| R23 | Fixed | Telemetry query-layer split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_languages.go` | The SQL aggregation/query helpers and language-inference helpers moved out of the main usage-view file into dedicated query/language units, leaving the main file focused on orchestration and shared aggregate types. | Continue shrinking orchestration/materialization into smaller units if needed. | -| R24 | Fixed | OpenRouter generation-path split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/generations.go` | Generation payload types and generation-fetch/enrichment/aggregation logic moved out of the main OpenRouter provider file into a dedicated generation unit. | Continue with analytics/client/API helper splits. | -| R25 | Fixed | OpenRouter clock injection | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/generations.go` | OpenRouter’s time-sensitive analytics and generation flows now use an injectable clock instead of reading `time.Now()` directly in the provider hot path. | Extend the same pattern to remaining providers and analytics helpers. | -| R26 | Fixed | Cursor API/cache helper split | `internal/providers/cursor/cursor.go`, `internal/providers/cursor/api.go`, `internal/providers/cursor/cache.go` | Cursor HTTP helper methods and billing/model cache helpers now live in dedicated units instead of the main provider file, removing duplicate implementations and narrowing `cursor.go` to orchestration and local-data projection. | Continue splitting SQLite projection and token-loading responsibilities. | -| R27 | Fixed | TUI model file split | `internal/tui/model.go`, `internal/tui/model_input.go`, `internal/tui/model_commands.go` | Bubble Tea update/input/command orchestration no longer lives in one monolithic file. The model state definition stays in `model.go`, while input/update and command wiring moved into dedicated units. | Continue decomposing render-heavy files over time. | -| R28 | Fixed | Shared analytics model extraction | `internal/core/analytics_snapshot.go`, `internal/tui/analytics_data.go`, `internal/tui/detail.go` | Analytics and detail views now consume one shared core extractor for model usage instead of maintaining separate metric/raw parsing paths. | Extend the same pattern to more analytics/detail sections if new derived views appear. | -| R29 | Fixed | Shared analytics series selection | `internal/core/analytics_snapshot.go`, `internal/tui/analytics.go` | Token/model series selection and fallback weighting for analytics charts moved out of TUI render code into shared core helpers. | Keep new per-series heuristics out of render code. | -| R30 | Fixed | Daemon loop family split | `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go` | Collection/retention, spool/hook-spool, and provider polling loops now live in separate daemon files instead of a single loop-heavy unit. | Keep future loop additions in the matching family file instead of re-growing a monolith. | -| R31 | Fixed | Analytics timestamp normalization | `internal/core/analytics_normalize.go` | Synthesized analytics daily-series dates now derive from the snapshot timestamp in UTC instead of ad hoc local `time.Now()` fallbacks. | Continue the same UTC/clock cleanup in remaining providers such as Ollama. | -| R32 | Fixed | Cursor orchestration and projection split | `internal/providers/cursor/cursor.go`, `internal/providers/cursor/fetch.go`, `internal/providers/cursor/runtime.go`, `internal/providers/cursor/state_projection.go`, `internal/providers/cursor/tracking_projection.go`, `internal/providers/cursor/api_projection.go` | Cursor fetch orchestration, runtime merge/token helpers, state projection, tracking projection, and API projection logic now live in dedicated units instead of one large provider file. `cursor.go` is now limited to provider construction, shared types, and clock/state wiring. | Keep future Cursor changes inside the matching unit instead of re-growing `cursor.go`. | -| R33 | Fixed | OpenRouter analytics and snapshot-projection split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/analytics.go`, `internal/providers/openrouter/snapshot_projection.go` | OpenRouter analytics endpoint parsing/aggregation and dashboard synthesis/projection helpers now live outside the main provider file. The main file is now focused on provider setup plus key/credits/account fetch paths. | If the remaining key/account path grows again, split it into a small API helper unit. | -| R34 | Fixed | Ollama clock injection | `internal/providers/ollama/ollama.go` | Ollama’s cloud-usage window parsing, local log windows, reset inference, and DB-derived token window logic now use the provider clock instead of direct `time.Now()` calls in behavioral paths. | Reuse the same clock path if more Ollama time-derived metrics are added. | -| R35 | Fixed | OpenRouter account API split | `internal/providers/openrouter/openrouter.go`, `internal/providers/openrouter/account_api.go` | OpenRouter key/auth, credits, and key-metadata fetch helpers now live in a dedicated account API unit instead of the main provider file. The coordinator file is down to provider setup and fetch orchestration. | Keep further OpenRouter account mutations inside the account unit. | -| R36 | Fixed | Detail token section decomposition | `internal/tui/detail.go`, `internal/tui/detail_tokens.go` | The detail token section now renders from shared analytics model extraction instead of reverse-parsing token metric keys, and the token-specific renderer lives in its own file. | Continue splitting other detail subsections the same way. | -| R37 | Fixed | Telemetry source account binding and safer fallback | `internal/daemon/source_collectors.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_http.go`, `internal/daemon/server_spool.go`, `cmd/openusage/telemetry.go`, `internal/telemetry/provider_event_mapper.go` | Local collectors and hook ingestion now bind to configured source accounts when unambiguous, ambiguous shared-path setups degrade to explicit source-scoped attribution instead of silently choosing one account, and account fallback prefers source system before upstream provider. | If hook ingest logic is centralized later, keep using the same resolver. | -| R38 | Fixed | Shared hook ingest service | `internal/daemon/hook_ingest.go`, `internal/daemon/hook_ingest_local.go`, `internal/daemon/server_http.go`, `internal/daemon/server_spool.go`, `cmd/openusage/telemetry.go` | Hook request parsing and local ingest/spool fallback now live in shared daemon helpers used by CLI fallback, HTTP ingest, and hook-spool replay. The remaining edge code is transport and user messaging only. | Reuse the same helpers if more hook entrypoints are added. | -| R39 | Fixed | Usage-view materialization split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_materialize.go` | Temp-table creation/indexing/cleanup and aggregate initialization moved out of the main usage-view orchestration path. | Continue splitting aggregate query fanout if `usage_view.go` grows again. | -| R40 | Fixed | Analytics cost fallback extraction | `internal/core/analytics_costs.go`, `internal/tui/analytics_data.go` | Analytics all-time/today/week cost fallback rules now live in shared core logic instead of TUI-owned metric-key decoding. | Continue moving remaining analytics/detail metric decoding into shared extractors. | -| R41 | Fixed | Usage-view aggregate fanout split | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_aggregate.go` | Query fanout and aggregate assembly now live in a dedicated helper instead of inline in the main usage-view orchestration path. | Continue splitting only if the aggregate helper grows materially. | -| R42 | Fixed | Provider display-info split and shared fallback metric helpers | `internal/tui/model.go`, `internal/tui/model_display_info.go`, `internal/core/dashboard_display_metrics.go` | Provider tile display-summary logic moved out of the main TUI model file, and fallback/rate-limit metric selection now lives in shared core helpers instead of ad hoc TUI parsing. | Continue moving the remaining analytics/detail-specific metric decoding into shared extractors. | -| R43 | Fixed | Codex live/session split | `internal/providers/codex/codex.go`, `internal/providers/codex/live_usage.go`, `internal/providers/codex/session_usage.go` | Codex now keeps provider wiring in the main file while live usage fetching and local session projection live in dedicated helpers. | Continue the same concern-based split for the remaining large providers. | -| R44 | Fixed | Claude Code local file/helper split and settings modal layout split | `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/local_files.go`, `internal/providers/claude_code/local_helpers.go`, `internal/tui/settings_modal.go`, `internal/tui/settings_modal_layout.go` | Claude Code local readers and generic helper logic are split out of the main provider file, and the settings modal layout/render wrapper no longer lives inline with all modal state/input handling. | Continue with deeper conversation-aggregation extraction in Claude Code and more TUI render-section splits. | -| R45 | Fixed | Copilot GitHub API split | `internal/providers/copilot/copilot.go`, `internal/providers/copilot/api_data.go` | Copilot's GitHub API fetch, quota projection, and org metrics flow now live in a dedicated file instead of sharing the same unit as local config/log/session parsing. | Continue splitting the remaining local projection/helpers out of the main provider file. | -| R46 | Fixed | Copilot local config/log/session split | `internal/providers/copilot/copilot.go`, `internal/providers/copilot/local_data.go`, `internal/providers/copilot/local_helpers.go` | Copilot local config loading, log/session readers, and local parsing/projection helpers now live outside the main provider file. The coordinator file is reduced to provider setup, fetch orchestration, and status/metric selection helpers. | Keep future Copilot local-data work inside the dedicated helper units instead of re-growing the coordinator. | -| R47 | Fixed | Claude Code conversation aggregation split | `internal/providers/claude_code/claude_code.go`, `internal/providers/claude_code/conversation_usage.go` | Claude Code's JSONL conversation aggregation, block-window estimation, and local tool/session projection no longer live inline with provider setup and API plumbing. The main provider file is now mostly provider wiring and API-side flow. | Keep future conversation-record projections in the dedicated conversation unit. | -| R48 | Fixed | Tile render-path derivation caching | `internal/tui/model.go`, `internal/tui/model_input.go`, `internal/tui/tiles.go`, `internal/tui/tiles_cache.go` | Tile body derivation is now cached per snapshot/update state instead of rebuilding the full composition section stack on every render frame. Dynamic header and reset animation still render live, while static body composition is reused until snapshots or size change. | Apply the same pattern selectively to detail/analytics only where profiling or repeated drift justifies it. | -| R49 | Fixed | Settings modal preview-data split | `internal/tui/settings_modal.go`, `internal/tui/settings_modal_preview.go` | The large preview snapshot fixture for widget-section configuration moved out of the main settings modal behavior file, reducing render/input coupling inside `settings_modal.go`. | Continue moving purely preview/demo helpers out of modal behavior files. | -| R50 | Fixed | Account-config contract comments aligned with runtime | `internal/core/provider.go` | `AccountConfig` comments no longer claim that `Binary` and `BaseURL` are valid primary homes for provider-local data paths. The type now documents the actual runtime contract: provider-local paths belong in `Paths`, with legacy compatibility handled inside provider packages. | A typed runtime-hints structure is still the next hardening step. | -| R51 | Fixed | Config test file helper extraction | `internal/config/config_test.go`, `internal/config/test_helpers_test.go` | Repeated `settings.json` temp-file creation/loading boilerplate in the config test suite now goes through shared helpers for the common cases, shrinking some of the easiest-to-repeat fixture noise. | Continue the same pattern in the remaining large test files and higher-noise config cases. | -| R52 | Fixed | OpenCode telemetry collector split | `internal/providers/opencode/telemetry.go`, `internal/providers/opencode/telemetry_event_file.go`, `internal/providers/opencode/telemetry_sqlite.go`, `internal/providers/opencode/telemetry_hooks.go` | OpenCode telemetry no longer mixes event-file parsing, SQLite reads, and hook normalization in one file. Those concerns now live in dedicated units behind the same collector surface. | Keep future OpenCode telemetry changes inside the matching helper unit. | -| R53 | Fixed | Copilot telemetry collector split | `internal/providers/copilot/telemetry.go`, `internal/providers/copilot/telemetry_session_file.go`, `internal/providers/copilot/telemetry_session_store.go`, `internal/providers/copilot/telemetry_logs.go` | Copilot telemetry session JSONL parsing, session-store SQLite fallback, and CompactionProcessor log parsing now live outside the main telemetry collector file. | Continue extracting test fixtures from the large Copilot telemetry suite over time. | -| R54 | Fixed | Tile composition section split | `internal/tui/tiles_composition.go`, `internal/tui/tiles_composition_providers.go`, `internal/tui/tiles_composition_clients.go`, `internal/tui/tiles_composition_tools.go` | Provider, client/source, and tool composition render sections no longer share one large file. The core composition file now holds shared types/helpers and the section files hold the view-specific bar builders. | Continue splitting only if one of the new section files regrows. | -| R55 | Fixed | Z.AI provider helper decomposition | `internal/providers/zai/zai.go`, `internal/providers/zai/monitor_helpers.go`, `internal/providers/zai/usage_extract.go`, `internal/providers/zai/usage_helpers.go` | Z.AI API-base resolution, monitor request/quota handling, usage row extraction, payload capture, and normalization helpers now live outside the main provider file. The coordinator file is reduced to fetch orchestration and snapshot assembly. | Continue only if the remaining fetch/projection path grows again. | -| R56 | Fixed | Ollama path and log-helper split | `internal/providers/ollama/ollama.go`, `internal/providers/ollama/request_helpers.go`, `internal/providers/ollama/local_paths.go`, `internal/providers/ollama/server_log_parse.go` | Ollama request helpers, local path resolution, and GIN log parsing are now separate helper units instead of living inline in the main provider file. | The remaining large DB-population helpers are the next split point only if Ollama changes keep clustering there. | - -## Action Table +| R57 | Fixed | Account config contract hardening | `internal/core/provider.go`, `internal/config/config.go`, `internal/daemon/source_collectors.go`, `internal/detect/cursor.go`, `internal/detect/claude_code.go` | Provider-local runtime paths now live behind `ProviderPaths` and `Path`/`SetPath` helpers. Config load normalizes legacy `paths` payloads into the new field, and daemon/detect flows consume the typed path accessors instead of ad hoc provider-specific overloads. | Retain legacy `paths` read compatibility until the persisted config shape can be fully simplified. | +| R58 | Fixed | TUI settings/detail decomposition | `internal/tui/settings_modal.go`, `internal/tui/settings_modal_input.go`, `internal/tui/detail.go`, `internal/tui/detail_metrics.go`, `internal/tui/detail_analytics_sections.go` | Settings input/update logic and large detail metric/render sections are split out of the remaining coordinator files. The hot TUI files now separate state/input from section rendering much more cleanly. | Only split further if new features start coupling unrelated flows again. | +| R59 | Fixed | Detail and analytics metric decoding cleanup | `internal/core/analytics_costs.go`, `internal/core/usage_breakdowns_domains.go`, `internal/tui/detail.go`, `internal/tui/detail_analytics_sections.go`, `internal/tui/model_display_info.go` | Remaining burn-rate, language, MCP, and model-cost detection paths now go through shared core helpers instead of renderer-owned metric-prefix checks. UI code consumes shared semantic helpers rather than decoding raw key conventions inline. | Keep new metric-schema additions in `internal/core`, not in TUI renderers. | +| R60 | Fixed | Render-path caching follow-through | `internal/tui/render_cache.go`, `internal/tui/analytics_cache.go`, `internal/tui/tiles_cache.go`, `internal/tui/model_input.go`, `internal/tui/model_commands.go`, `internal/tui/dashboard_views.go` | Tile, analytics, and detail render paths are now explicitly invalidated on snapshot, window, theme, layout, and selection changes. Detail rendering is cached the same way analytics and tile composition already were, closing the remaining hot-path rebuild gap. | Profile before adding any more caching layers. | +| R61 | Fixed | Gemini CLI provider decomposition | `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/gemini_cli/api_usage.go`, `internal/providers/gemini_cli/session_usage.go` | API/quota/account flows and local session aggregation are split out of the coordinator file. The main provider file is now mostly wiring plus fetch orchestration. | Keep future Gemini changes inside the matching helper unit. | +| R62 | Fixed | Ollama provider decomposition follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/ollama/local_api.go`, `internal/providers/ollama/cloud_api.go`, `internal/providers/ollama/desktop_db.go`, `internal/providers/ollama/desktop_db_settings.go`, `internal/providers/ollama/desktop_db_tokens.go`, `internal/providers/ollama/desktop_db_breakdowns.go` | Ollama’s coordinator, local API, cloud API, and desktop SQLite flows are now separated by concern. The remaining large desktop DB path is split into settings/schema helpers, token estimation, and usage breakdown/daily series helpers. | Keep future SQLite-specific work inside the dedicated desktop DB helper files. | +| R63 | Fixed | Telemetry and config fixture cleanup | `internal/telemetry/test_helpers_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/test_helpers_test.go` | Shared store/file helpers now cover the repeated setup patterns in the telemetry and config suites, and `usage_view_test.go` is reduced below the previous monolith threshold. | Apply the same helper pattern to other large suites when they next change. | -| ID | Priority | Area | Evidence | Issue | Recommended action | Expected payoff | -| --- | --- | --- | --- | --- | --- | --- | -| A1 | P2 | Account config contract hardening | `internal/core/provider.go:31-43`, `internal/config/config.go:199-206` | Path overload dependence is removed from the hot runtime flow and the comments now match that behavior, but `Binary` / `BaseURL` still coexist in the same type and the distinction between CLI path vs provider-local path is still not encoded by type. | Introduce a dedicated typed runtime-hints/path struct and eventually retire the remaining compatibility shape in `AccountConfig`. | Finishes the contract cleanup and makes misuse harder. | -| A2 | P2 | TUI/application decomposition follow-through | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/analytics.go`, `internal/dashboardapp/service.go` | Side effects are injected, provider display-info logic is split out, tile-body composition is cached, and composition/settings subsections are split, but state-transition and render-heavy flows are still concentrated in a few large files. | Continue decomposing render-heavy/detail/settings flows and move more orchestration decisions into the dashboard application layer over time. | Lower UI complexity and smaller blast radius per change. | -| A3 | P2 | UI metric-prefix parsing | `internal/tui/analytics.go`, `internal/tui/detail.go`, `internal/core/usage_breakdowns.go`, `internal/core/analytics_snapshot.go`, `internal/core/analytics_costs.go`, `internal/core/dashboard_display_metrics.go` | Main dashboard composition, provider tile fallback/rate-limit selection, token sections, and cost fallback now consume shared extractors, but a few analytics/detail sections still decode metric-key conventions directly in UI code. | Continue promoting the remaining analytics/detail extractors into `internal/core` or `internal/telemetry` so renderers consume structured sections instead of re-parsing maps. | Removes a large class of UI drift bugs and reduces per-render work. | -| A4 | P2 | Large provider monolith follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/gemini_cli/gemini_cli.go` | Cursor, OpenRouter, Codex, Copilot, Claude Code, Z.AI, and the telemetry collectors are now materially decomposed, but Ollama and Gemini CLI still keep large fetch/projection and local-session flows in very large files. | Split the remaining large providers by concern only where active change pressure justifies it: account/API fetch, local-data adapters, projection helpers, and telemetry helpers. | Smaller diffs, less drift risk, and easier provider-specific testing. | -| A6 | P2 | Telemetry usage-view monolith | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_helpers.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_queries.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view code is materially smaller after the helper/projection/query/materialization/aggregate splits, but the top-level orchestration path still coordinates caching, source selection, and final snapshot application in one place. | Continue splitting only if future telemetry work reintroduces sprawl, and consider a typed intermediate aggregation model if query optimization pressure grows. | Easier optimization and safer incremental changes. | -| A8 | P3 | Ambiguous local-source account attribution still requires explicit disambiguation | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Unambiguous local collectors now bind to configured accounts, but when multiple accounts share the same source paths the daemon intentionally falls back to source-scoped attribution rather than guessing. This is correct, but it still leaves ambiguous setups dependent on explicit account selection. | If multi-account local-source workflows become common, add persisted per-source alias mapping or require explicit source/account binding in config for ambiguous path groups. | Makes the remaining ambiguity explicit instead of silent, and defines the next hardening step only if needed. | -| A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | The loop families are now separated, but the daemon still has further optimization and worker-boundary cleanup opportunities rather than a hard responsibility bug. | Keep future daemon work inside the split family files and only add a worker abstraction if concurrency pressure justifies it. | Lower mental load and easier concurrency review. | -| A12 | P2 | Test file sprawl and fixture duplication | `internal/providers/openrouter/openrouter_test.go`, `internal/providers/copilot/copilot_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/config_test.go` | Some tests are 1000-2600 LOC and re-encode similar setup logic inline. The config suite now has basic shared file helpers, but the larger provider/telemetry suites still carry too much duplicated fixture setup. | Extract fixture builders and scenario helpers per package. Keep top-level tests declarative. | Faster iteration and simpler maintenance of large test suites. | -| A14 | P3 | File-size based decomposition needed in TUI | `internal/tui/model.go`, `internal/tui/detail.go`, `internal/tui/settings_modal.go`, `internal/tui/analytics.go` | TUI logic is split across more focused files now, and tile composition is no longer one giant file, but several files are still individually large and still mix event handling, rendering, and data interpretation. | Continue decomposition by concern: `model_update`, `model_actions`, `model_display`, `settings_actions`, `detail_sections`, `analytics_sections`. | Better readability and easier targeted refactors. | -| A15 | P3 | Performance optimization follow-through in render path | `internal/tui/model.go`, `internal/tui/tiles.go`, `internal/tui/tiles_cache.go`, `internal/tui/detail.go`, `internal/tui/analytics.go` | Tile body composition is now cached per snapshot/update state, but detail and analytics still rebuild some derived structures on each render path. | Extend caching only to the remaining high-cost detail/analytics derivations if profiling or repeated churn justifies it. | Lower render cost without over-caching the whole UI. | +## Residual Non-Blocking Follow-Up -## Suggested Execution Order +These are no longer review blockers or known correctness issues. They are explicit maintenance opportunities left after the main cleanup. -1. A2, A3 -2. A6, A4 -3. A7, A1, A8 -4. A12, A14, A15 +| ID | Priority | Area | Evidence | Current state | Optional follow-up | +| --- | --- | --- | --- | --- | --- | +| A6 | P3 | Telemetry usage-view orchestration | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view path is materially decomposed and validated. The remaining top-level coordinator is acceptable and no longer a review issue. | Split further only if future telemetry changes start re-coupling query planning, cache application, and projection. | +| A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | Daemon loops and HTTP/read-model flows are already separated, and no new race or lifecycle bug was found in the follow-up review. | Add extra worker abstractions only if future concurrency pressure justifies them. | +| A8 | P3 | Ambiguous local-source attribution | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Ambiguous shared-path local sources still intentionally require explicit user disambiguation instead of silent guessing. This is a product decision, not a hidden bug. | Add persisted source/account aliasing only if multi-account shared-path workflows become common. | -## Notes +## Summary -- The highest-risk remaining issues are architectural rather than immediately broken behavior. -- The biggest remaining drift risks are the last analytics/detail metric-key parsing pockets and the remaining large TUI/provider files. -- The race pass completed cleanly for the core dashboard/daemon/telemetry packages after the timeframe fix. +- The original high-risk review items `A1`, `A2`, `A3`, `A4`, `A12`, `A14`, and `A15` are addressed in this branch. +- No additional high-confidence correctness bug was found during the follow-up review after the dashboard timeframe race fix. +- Remaining entries are intentional tradeoffs or low-priority structural opportunities, not outstanding breakages. diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index 981e6d0..77e3dec 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -1,170 +1,78 @@ -# System Review: Remaining Responsibility and Duplication Gaps +# System Review: Post-Cleanup State Date: 2026-03-09 Repository: `/Users/janekbaraniewski/Workspace/priv/openusage` +Branch: `feat/dashboard-race-parser-cleanups` ## Scope -This is a refreshed architecture review after the dashboard race fix, daemon/read-model cleanup, provider parser consolidation, telemetry collector splits, and the recent Cursor/OpenRouter/Ollama/Z.AI/Codex/Claude Code/TUI refactors on branch `feat/dashboard-race-parser-cleanups`. +This report reflects the tree after the dashboard timeframe-race fix, parser consolidation work, daemon/read-model cleanup, provider decomposition, TUI decomposition, render-cache follow-through, and the final `A1`/`A2`/`A3`/`A4`/`A12`/`A14`/`A15` cleanup pass. -The goal of this report is not to restate already-fixed issues. It documents the meaningful problems still left in the current tree. +It replaces the earlier “remaining gaps” snapshot. The goal now is to document the actual post-cleanup state, not to preserve stale open items. -## What Is No Longer Open +## What Is Resolved -These were major concerns in earlier reviews and are now materially addressed: +The following earlier review themes are materially closed in this branch: - Dashboard timeframe race and stale snapshot acceptance. - Read-model cache dedupe ignoring time window. - Stringly typed daemon/telemetry time-window flow. -- Telemetry source account binding for unambiguous local collectors and hooks. -- Cursor parser/SQLite duplication across dashboard and telemetry paths. -- Codex and Claude Code raw parser duplication. -- Codex live/session flow concentrated in one provider file. -- Claude Code local file readers, model-summary helpers, and conversation aggregation concentrated in one provider file. -- Copilot GitHub API fetch/quota/org-metrics flow concentrated in the same file as local log/session parsing. -- Copilot local config/log/session parsing concentrated in the same file as provider orchestration. -- Copilot telemetry JSONL/session-store/log parsing concentrated in one collector file. -- OpenCode telemetry event-file/SQLite/hook parsing concentrated in one collector file. -- OpenRouter provider-resolution, analytics, generation, projection, and account-path monolith sprawl. -- TUI side-effect leakage into config persistence / integration install / provider validation. -- Settings modal layout/render wrapper living inline with settings state/input handling. -- Tile composition provider/client/tool sections living in one large file. -- Ollama hot-path `time.Now()` usage in behavioral window/reset logic. -- Z.AI monitor helpers and usage extraction/payload parsing concentrated in one provider file. -- Shared hook ingest parsing/local fallback drift between daemon and CLI. -- Usage-view temp-table materialization and aggregate query fanout living inline in the main orchestration path. - -## Findings - -### 1. [P2] TUI rendering and state handling are still concentrated in a few very large files - -The TUI is much better than before, and provider tile display-summary logic no longer lives inline in `model.go`, while the settings modal layout wrapper now lives in its own file. Tile-body derivation is cached now, and provider/client/tool composition sections are split out of the main composition file. But [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go), [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go), [analytics.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/analytics.go), and the remaining settings modal render sections are still large enough that unrelated concerns move together. - -Refs: -- [model.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model.go) -- [model_display_info.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/model_display_info.go) -- [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go) -- [tiles_composition.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition.go) -- [tiles_composition_providers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition_providers.go) -- [tiles_composition_clients.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition_clients.go) -- [tiles_composition_tools.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/tiles_composition_tools.go) -- [settings_modal.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal.go) -- [settings_modal_layout.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal_layout.go) - -What to address: -- Continue section-level file extraction from `detail.go`. -- Split model orchestration further by update/action/display boundaries. -- Push more typed extractor work out of rendering code. - -### 2. [P2] Some analytics/detail sections still decode raw metric-key conventions in UI code - -The major composition paths, provider tile fallback/rate-limit selection, and token-table paths now use shared extractors, but analytics/detail still contain pockets of renderer-owned key interpretation. That is better than before, but it is still a drift vector. - -Refs: -- [analytics.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/analytics.go) -- [detail.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail.go) -- [usage_breakdowns.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/usage_breakdowns.go) -- [analytics_snapshot.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/analytics_snapshot.go) -- [dashboard_display_metrics.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/dashboard_display_metrics.go) - -What to address: -- Promote remaining analytics/detail extractors into `internal/core`. -- Keep renderers as display adapters over typed sections. - -### 3. [P2] Telemetry usage-view orchestration is smaller, but still centralized - -The usage-view path is much cleaner after helper, projection, query, materialization, and aggregate-fanout splits, but the top-level file still coordinates source selection, cache/application flow, and final snapshot application in one place. - -Refs: -- [usage_view.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view.go) -- [usage_view_materialize.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view_materialize.go) -- [usage_view_aggregate.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view_aggregate.go) -- [usage_view_projection.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view_projection.go) - -What to address: -- Keep future telemetry work inside the split helper units. -- Only split the remaining coordinator path further if new behavior starts coupling unrelated concerns again. - -### 4. [P2] Several providers are still large mixed-responsibility units - -Cursor, OpenRouter, Codex, Copilot, Claude Code, and Z.AI are now in much better shape, and the OpenCode/Copilot telemetry collectors are split as well. The remaining larger provider concentration is now mostly in Ollama and Gemini CLI. - -Refs: -- [ollama.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/ollama.go) -- [local_paths.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/local_paths.go) -- [server_log_parse.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/server_log_parse.go) -- [gemini_cli.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/gemini_cli.go) -- [session_usage.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/session_usage.go) -- [zai.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/zai.go) -- [monitor_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/monitor_helpers.go) -- [usage_extract.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/usage_extract.go) -- [usage_helpers.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/zai/usage_helpers.go) -- [telemetry.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/opencode/telemetry.go) -- [telemetry_event_file.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/opencode/telemetry_event_file.go) -- [telemetry_sqlite.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/opencode/telemetry_sqlite.go) -- [telemetry_hooks.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/opencode/telemetry_hooks.go) -- [telemetry.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/telemetry.go) -- [telemetry_session_file.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/telemetry_session_file.go) -- [telemetry_session_store.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/telemetry_session_store.go) -- [telemetry_logs.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/telemetry_logs.go) - -What to address: -- Split by concern, not by arbitrary line count: -- account/API fetch -- local-data adapters -- projection helpers -- telemetry-specific collectors - -### 5. [P3] Ambiguous shared-path local sources still require explicit account disambiguation - -The daemon now binds local telemetry to configured accounts when the source/account mapping is unambiguous. If multiple accounts share the same source paths, it intentionally degrades to source-scoped attribution instead of silently guessing. That is the correct behavior today, but it means truly ambiguous local multi-account setups still need an explicit binding mechanism if they become a first-class use case. - -Refs: -- [source_collectors.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/daemon/source_collectors.go) -- [server_http.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/daemon/server_http.go) -- [telemetry.go](/Users/janekbaraniewski/Workspace/priv/openusage/cmd/openusage/telemetry.go) - -What to address: -- Add persisted source/account alias mapping only if ambiguous local multi-account setups become common. -- Keep ambiguous attribution explicit; do not reintroduce silent account guessing. - -### 6. [P3] Account config contract cleanup is not finished - -The hot-path abuse of `Binary`/`BaseURL` is fixed, but the type still allows path-like runtime hints and canonical provider config to coexist ambiguously. - -Refs: -- [provider.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/provider.go) -- [config.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/config/config.go) - -What to address: -- Introduce a dedicated typed runtime-hints structure. -- Retire compatibility comments and residual semantic ambiguity in `AccountConfig`. - -### 7. [P3] Test suites are strong but still expensive to maintain - -Some package tests remain extremely large and inline too much fixture logic. - -Refs: -- [openrouter_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/openrouter/openrouter_test.go) -- [copilot_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/copilot/copilot_test.go) -- [usage_view_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/usage_view_test.go) -- [config_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/config/config_test.go) +- Parser duplication across Cursor, Codex, and Claude Code dashboard/telemetry paths. +- OpenRouter, Cursor, Claude Code, Codex, Copilot, OpenCode, Z.AI, Gemini CLI, and Ollama monolith concentration in their previously hottest paths. +- TUI side-effect leakage into persistence, integration install, and provider validation. +- Major TUI composition concentration in tile/detail/settings code. +- Remaining detail/analytics metric-prefix parsing pockets that were still living in renderer code. +- Tile/detail/analytics render-path recomputation on every frame. +- Account-config runtime-path overload in the hot path. +- Repeated telemetry/config test setup boilerplate in the most actively changed suites. -What to address: -- Extract fixture builders and scenario helpers. -- Keep top-level tests declarative. +## Current Findings -## Recommended Order +### 1. No remaining high-confidence correctness bug surfaced in the follow-up review -1. TUI extractor/decomposition follow-through. -2. Telemetry and TUI decomposition follow-through. -3. Remaining provider monolith splits. -4. Telemetry account identity mapping and daemon follow-through. -5. Account config contract hardening. -6. Test fixture cleanup. - -## Notes - -- The repo is in materially better shape than it was at the start of this cleanup branch. -- The main remaining risks are now architectural and maintainability-oriented rather than immediate correctness regressions. -- The highest near-term drift risk is the remaining analytics/detail metric-prefix parsing still sitting in UI render code plus the size of the remaining TUI/provider units. +After the final cleanup pass and validation run, I did not find another issue on the level of the original dashboard timeframe race. The remaining items are not hidden state-corruption or concurrency defects; they are explicit maintenance tradeoffs. + +Validation used for this state: + +- `go test ./...` +- `go vet ./...` +- `make build` + +### 2. The codebase now has clearer responsibility boundaries in the hot areas + +The most change-prone areas are no longer concentrated the way they were at the start of the branch: + +- TUI render/state work is split across dedicated settings/detail/cache/helper units. +- Provider-local parsing and fetch logic are split by concern in the previously worst provider files. +- Daemon hook ingest, HTTP, polling, spool, and read-model paths are separated. +- Telemetry usage-view query/materialization/projection/aggregate logic is separated. + +This reduces review blast radius and makes future concurrency/data-flow work easier to reason about. + +### 3. Residual items are explicit, low-risk follow-up opportunities + +There are still a few non-blocking areas worth keeping in mind: + +- `usage_view.go` still owns top-level orchestration, but it is no longer a monolith and does not currently hide a correctness issue. +- The daemon could be pushed into more formal worker abstractions later, but present lifecycle/context handling is consistent in the active paths. +- Ambiguous shared-path local account attribution still requires explicit user disambiguation by design; the code now avoids silent guessing. + +These are not “unfinished fixes”. They are optional future design work. + +## References + +- [CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md](/Users/janekbaraniewski/Workspace/priv/openusage/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md) +- [internal/tui/render_cache.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/render_cache.go) +- [internal/tui/detail_metrics.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/detail_metrics.go) +- [internal/tui/settings_modal_input.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/tui/settings_modal_input.go) +- [internal/providers/ollama/desktop_db.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/desktop_db.go) +- [internal/providers/ollama/desktop_db_tokens.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/ollama/desktop_db_tokens.go) +- [internal/providers/gemini_cli/api_usage.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/providers/gemini_cli/api_usage.go) +- [internal/core/provider.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/core/provider.go) +- [internal/telemetry/test_helpers_test.go](/Users/janekbaraniewski/Workspace/priv/openusage/internal/telemetry/test_helpers_test.go) + +## Bottom Line + +- The original review’s high-priority structural set is addressed. +- The repo is in materially better shape than at the start of the branch. +- Remaining items are optional follow-up architecture choices, not outstanding bugs from the review. diff --git a/internal/config/config.go b/internal/config/config.go index 2a5273d..86f2dc8 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -226,6 +226,18 @@ func normalizeAccounts(in []core.AccountConfig) []core.AccountConfig { } normalized := lo.Map(in, func(acct core.AccountConfig, _ int) core.AccountConfig { acct.ID = normalizeAccountID(acct.ID) + if len(acct.ProviderPaths) == 0 && len(acct.Paths) > 0 { + acct.ProviderPaths = make(map[string]string, len(acct.Paths)) + for key, value := range acct.Paths { + trimmedKey := strings.TrimSpace(key) + trimmedValue := strings.TrimSpace(value) + if trimmedKey == "" || trimmedValue == "" { + continue + } + acct.ProviderPaths[trimmedKey] = trimmedValue + } + } + acct.Paths = nil return acct }) filtered := lo.Filter(normalized, func(acct core.AccountConfig, _ int) bool { return acct.ID != "" }) diff --git a/internal/core/analytics_costs.go b/internal/core/analytics_costs.go index 0cc3ddb..46b1e58 100644 --- a/internal/core/analytics_costs.go +++ b/internal/core/analytics_costs.go @@ -4,6 +4,7 @@ type AnalyticsCostSummary struct { TotalCostUSD float64 TodayCostUSD float64 WeekCostUSD float64 + BurnRateUSD float64 } func ExtractAnalyticsCostSummary(s UsageSnapshot) AnalyticsCostSummary { @@ -33,6 +34,10 @@ func ExtractAnalyticsCostSummary(s UsageSnapshot) AnalyticsCostSummary { "7d_api_cost", "usage_weekly", ), + BurnRateUSD: firstPositiveMetricUsed(s, + 0, + "burn_rate", + ), } } diff --git a/internal/core/provider.go b/internal/core/provider.go index 1bf01dd..66ab8fe 100644 --- a/internal/core/provider.go +++ b/internal/core/provider.go @@ -14,27 +14,36 @@ type AccountConfig struct { ProbeModel string `json:"probe_model,omitempty"` // model to use for probe requests // Binary stores a CLI binary path for providers that execute a local command. - // Provider-specific local data paths belong in Paths. Legacy Binary-based + // Provider-specific local data paths belong in ProviderPaths. Legacy Binary-based // data-path compatibility is handled inside the affected provider packages. Binary string `json:"binary,omitempty"` // BaseURL stores an HTTP API base URL for providers with configurable - // endpoints. Provider-specific local data paths belong in Paths. Legacy + // endpoints. Provider-specific local data paths belong in ProviderPaths. Legacy // BaseURL-based data-path compatibility is handled inside provider packages. BaseURL string `json:"base_url,omitempty"` - // Paths holds named provider-specific paths/URLs that are not part of the - // shared account contract. Keys are provider-defined (for example + // ProviderPaths holds named provider-specific paths/URLs that are not part + // of the shared account contract. Keys are provider-defined (for example // "tracking_db", "state_db", "stats_cache", "account_config"). + ProviderPaths map[string]string `json:"provider_paths,omitempty"` + + // Paths is a legacy persisted alias for provider-specific paths. New code + // should use ProviderPaths through Path/SetPath helpers. Paths map[string]string `json:"paths,omitempty"` Token string `json:"-"` // runtime-only: access token (never persisted) ExtraData map[string]string `json:"-"` // runtime-only: extra detection data (never persisted) } -// Path returns the named provider-specific path. It checks Paths first, -// then ExtraData (for backward compat with detect), then the given fallback. +// Path returns the named provider-specific path. It checks ProviderPaths first, +// then legacy Paths, then ExtraData (for backward compat with detect), then the fallback. func (c AccountConfig) Path(key, fallback string) string { + if c.ProviderPaths != nil { + if v, ok := c.ProviderPaths[key]; ok && v != "" { + return v + } + } if c.Paths != nil { if v, ok := c.Paths[key]; ok && v != "" { return v @@ -56,10 +65,39 @@ func (c *AccountConfig) SetPath(key, value string) { if c == nil || strings.TrimSpace(key) == "" || strings.TrimSpace(value) == "" { return } - if c.Paths == nil { - c.Paths = make(map[string]string) + if c.ProviderPaths == nil { + c.ProviderPaths = make(map[string]string) + } + c.ProviderPaths[key] = strings.TrimSpace(value) +} + +// PathMap returns a merged copy of provider-local paths, preferring +// ProviderPaths over legacy Paths. +func (c AccountConfig) PathMap() map[string]string { + if len(c.ProviderPaths) == 0 && len(c.Paths) == 0 { + return nil + } + out := make(map[string]string, len(c.ProviderPaths)+len(c.Paths)) + for key, value := range c.Paths { + trimmedKey := strings.TrimSpace(key) + trimmedValue := strings.TrimSpace(value) + if trimmedKey == "" || trimmedValue == "" { + continue + } + out[trimmedKey] = trimmedValue + } + for key, value := range c.ProviderPaths { + trimmedKey := strings.TrimSpace(key) + trimmedValue := strings.TrimSpace(value) + if trimmedKey == "" || trimmedValue == "" { + continue + } + out[trimmedKey] = trimmedValue + } + if len(out) == 0 { + return nil } - c.Paths[key] = strings.TrimSpace(value) + return out } func (c AccountConfig) ResolveAPIKey() string { diff --git a/internal/core/usage_breakdowns_domains.go b/internal/core/usage_breakdowns_domains.go index bdc502c..4c93843 100644 --- a/internal/core/usage_breakdowns_domains.go +++ b/internal/core/usage_breakdowns_domains.go @@ -5,6 +5,29 @@ import ( "strings" ) +func HasLanguageUsage(s UsageSnapshot) bool { + langs, _ := ExtractLanguageUsage(s) + return len(langs) > 0 +} + +func HasMCPUsage(s UsageSnapshot) bool { + servers, _ := ExtractMCPUsage(s) + return len(servers) > 0 +} + +func HasModelCostUsage(s UsageSnapshot) bool { + for key := range s.Metrics { + if IsModelCostMetricKey(key) { + return true + } + } + return false +} + +func IncludeDetailMetricKey(key string) bool { + return !strings.HasPrefix(strings.TrimSpace(key), "mcp_") +} + func ExtractProjectUsage(s UsageSnapshot) ([]ProjectUsageEntry, map[string]bool) { byProject := make(map[string]*ProjectUsageEntry) usedKeys := make(map[string]bool) diff --git a/internal/daemon/source_collectors.go b/internal/daemon/source_collectors.go index 016c732..77d2978 100644 --- a/internal/daemon/source_collectors.go +++ b/internal/daemon/source_collectors.go @@ -215,7 +215,7 @@ func collectOptionsForAccount(source shared.TelemetrySource, acct core.AccountCo for key, value := range opts.Paths { opts.Paths[key] = strings.TrimSpace(acct.Path(key, value)) } - for key, value := range acct.Paths { + for key, value := range acct.PathMap() { trimmedKey := strings.TrimSpace(key) trimmedValue := strings.TrimSpace(value) if trimmedKey == "" || trimmedValue == "" { diff --git a/internal/detect/claude_code.go b/internal/detect/claude_code.go index 1b5333c..9265f8d 100644 --- a/internal/detect/claude_code.go +++ b/internal/detect/claude_code.go @@ -34,15 +34,18 @@ func detectClaudeCode(result *Result) { if hasStats || hasAccount { log.Printf("[detect] Claude Code data found (stats=%v, account=%v)", hasStats, hasAccount) - addAccount(result, core.AccountConfig{ + acct := core.AccountConfig{ ID: "claude-code", Provider: "claude_code", Auth: "local", - Paths: map[string]string{ - "stats_cache": statsFile, - "account_config": accountFile, - }, - }) + } + if hasStats { + acct.SetPath("stats_cache", statsFile) + } + if hasAccount { + acct.SetPath("account_config", accountFile) + } + addAccount(result, acct) } else { log.Printf("[detect] Claude Code found but no stats data at expected locations") } diff --git a/internal/detect/cursor.go b/internal/detect/cursor.go index 9e84878..cad6aec 100644 --- a/internal/detect/cursor.go +++ b/internal/detect/cursor.go @@ -48,16 +48,15 @@ func detectCursor(result *Result) { ID: "cursor-ide", Provider: "cursor", Auth: "local", - Paths: make(map[string]string), ExtraData: make(map[string]string), } if hasTracking { - acct.Paths["tracking_db"] = trackingDB + acct.SetPath("tracking_db", trackingDB) acct.ExtraData["tracking_db"] = trackingDB } if hasState { - acct.Paths["state_db"] = stateDB + acct.SetPath("state_db", stateDB) acct.ExtraData["state_db"] = stateDB } diff --git a/internal/providers/gemini_cli/api_usage.go b/internal/providers/gemini_cli/api_usage.go new file mode 100644 index 0000000..6a094be --- /dev/null +++ b/internal/providers/gemini_cli/api_usage.go @@ -0,0 +1,639 @@ +package gemini_cli + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" + "github.com/samber/lo" +) + +func (p *Provider) fetchUsageFromAPI(ctx context.Context, snap *core.UsageSnapshot, creds oauthCreds, acct core.AccountConfig) error { + client := p.Client() + accessToken, err := refreshAccessToken(ctx, creds.RefreshToken, client) + if err != nil { + snap.Status = core.StatusAuth + snap.Message = "OAuth token refresh failed — run `gemini` to re-authenticate" + return fmt.Errorf("token refresh: %w", err) + } + snap.Raw["oauth_status"] = "valid (refreshed)" + + projectID := "" + if v := os.Getenv("GOOGLE_CLOUD_PROJECT"); v != "" { + projectID = v + } else if v := os.Getenv("GOOGLE_CLOUD_PROJECT_ID"); v != "" { + projectID = v + } + if projectID == "" && acct.ExtraData != nil { + projectID = acct.ExtraData["project_id"] + } + + loadResp, err := loadCodeAssistDetails(ctx, accessToken, projectID, client) + if err != nil { + return fmt.Errorf("loadCodeAssist: %w", err) + } + if loadResp != nil { + applyLoadCodeAssistMetadata(snap, loadResp) + if projectID == "" { + projectID = loadResp.CloudAICompanionProject + } + } + + if projectID == "" { + return fmt.Errorf("could not determine project ID") + } + snap.Raw["project_id"] = projectID + + quota, method, err := retrieveUserQuota(ctx, accessToken, projectID, client) + if err != nil { + return fmt.Errorf("retrieveUserQuota: %w", err) + } + + if len(quota.Buckets) == 0 { + snap.Raw["quota_api"] = fmt.Sprintf("ok (0 buckets, %s)", method) + snap.Raw["quota_api_method"] = method + return nil + } + + snap.Raw["quota_api"] = fmt.Sprintf("ok (%d buckets, %s)", len(quota.Buckets), method) + snap.Raw["quota_api_method"] = method + snap.Raw["quota_bucket_count"] = fmt.Sprintf("%d", len(quota.Buckets)) + + result := applyQuotaBuckets(snap, quota.Buckets) + applyQuotaStatus(snap, result.worstFraction) + + return nil +} + +func refreshAccessToken(ctx context.Context, refreshToken string, client *http.Client) (string, error) { + return refreshAccessTokenWithEndpoint(ctx, refreshToken, tokenEndpoint, client) +} + +func refreshAccessTokenWithEndpoint(ctx context.Context, refreshToken, endpoint string, client *http.Client) (string, error) { + if client == nil { + client = &http.Client{Timeout: 30 * time.Second} + } + data := url.Values{ + "client_id": {oauthClientID}, + "client_secret": {oauthClientSecret}, + "refresh_token": {refreshToken}, + "grant_type": {"refresh_token"}, + } + + req, err := http.NewRequestWithContext(ctx, "POST", endpoint, strings.NewReader(data.Encode())) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("token refresh HTTP %d: %s", resp.StatusCode, string(body)) + } + + var tokenResp tokenRefreshResponse + if err := json.Unmarshal(body, &tokenResp); err != nil { + return "", fmt.Errorf("parse token response: %w", err) + } + if tokenResp.AccessToken == "" { + return "", fmt.Errorf("empty access_token in refresh response") + } + + return tokenResp.AccessToken, nil +} + +func loadCodeAssistDetails(ctx context.Context, accessToken, existingProjectID string, client *http.Client) (*loadCodeAssistResponse, error) { + return loadCodeAssistDetailsWithEndpoint(ctx, accessToken, existingProjectID, codeAssistEndpoint, client) +} + +func loadCodeAssistDetailsWithEndpoint(ctx context.Context, accessToken, existingProjectID, baseURL string, client *http.Client) (*loadCodeAssistResponse, error) { + reqBody := loadCodeAssistRequest{ + CloudAICompanionProject: existingProjectID, + Metadata: clientMetadata{ + IDEType: "IDE_UNSPECIFIED", + Platform: "PLATFORM_UNSPECIFIED", + PluginType: "GEMINI", + Project: existingProjectID, + }, + } + + respBody, err := codeAssistPostWithEndpoint(ctx, accessToken, "loadCodeAssist", reqBody, baseURL, client) + if err != nil { + return nil, err + } + + var resp loadCodeAssistResponse + if err := json.Unmarshal(respBody, &resp); err != nil { + return nil, fmt.Errorf("parse loadCodeAssist response: %w", err) + } + + return &resp, nil +} + +func retrieveUserQuota(ctx context.Context, accessToken, projectID string, client *http.Client) (*retrieveUserQuotaResponse, string, error) { + return retrieveUserQuotaWithEndpoint(ctx, accessToken, projectID, codeAssistEndpoint, client) +} + +func retrieveUserQuotaWithEndpoint(ctx context.Context, accessToken, projectID, baseURL string, client *http.Client) (*retrieveUserQuotaResponse, string, error) { + reqBody := retrieveUserQuotaRequest{ + Project: projectID, + } + + respBody, err := codeAssistPostWithEndpoint(ctx, accessToken, "retrieveUserQuota", reqBody, baseURL, client) + if err != nil { + return nil, "", err + } + + var resp retrieveUserQuotaResponse + if err := json.Unmarshal(respBody, &resp); err != nil { + return nil, "", fmt.Errorf("parse retrieveUserQuota response: %w", err) + } + + return &resp, "retrieveUserQuota", nil +} + +func codeAssistPostWithEndpoint(ctx context.Context, accessToken, method string, body interface{}, baseURL string, client *http.Client) ([]byte, error) { + if client == nil { + client = &http.Client{Timeout: 30 * time.Second} + } + apiURL := fmt.Sprintf("%s/%s:%s", baseURL, codeAssistAPIVersion, method) + + jsonBody, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+accessToken) + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + respBody, _ := io.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s HTTP %d: %s", method, resp.StatusCode, truncate(string(respBody), 200)) + } + + return respBody, nil +} + +func formatWindow(d time.Duration) string { + if d <= 0 { + return "expired" + } + hours := int(d.Hours()) + minutes := int(d.Minutes()) % 60 + + if hours >= 24 { + days := hours / 24 + if days == 1 { + return "~1 day" + } + return fmt.Sprintf("~%dd", days) + } + if hours > 0 && minutes > 0 { + return fmt.Sprintf("%dh%dm", hours, minutes) + } + if hours > 0 { + return fmt.Sprintf("%dh", hours) + } + return fmt.Sprintf("%dm", minutes) +} + +func truncate(s string, maxLen int) string { return shared.Truncate(s, maxLen) } + +type quotaAggregationResult struct { + bucketCount int + modelCount int + worstFraction float64 +} + +type quotaAggregate struct { + modelID string + tokenType string + remainingFraction float64 + resetAt time.Time + hasReset bool +} + +func applyLoadCodeAssistMetadata(snap *core.UsageSnapshot, resp *loadCodeAssistResponse) { + if resp == nil { + return + } + + snap.Raw["gcp_managed"] = fmt.Sprintf("%t", resp.GCPManaged) + if resp.UpgradeSubscriptionURI != "" { + snap.Raw["upgrade_uri"] = resp.UpgradeSubscriptionURI + } + if resp.UpgradeSubscriptionType != "" { + snap.Raw["upgrade_type"] = resp.UpgradeSubscriptionType + } + + if resp.CurrentTier != nil { + if resp.CurrentTier.ID != "" { + snap.Raw["tier_id"] = resp.CurrentTier.ID + } + if resp.CurrentTier.Name != "" { + snap.Raw["tier_name"] = resp.CurrentTier.Name + } + if resp.CurrentTier.Description != "" { + snap.Raw["tier_description"] = truncate(strings.TrimSpace(resp.CurrentTier.Description), 200) + } + snap.Raw["tier_uses_gcp_tos"] = fmt.Sprintf("%t", resp.CurrentTier.UsesGCPTOS) + snap.Raw["tier_user_project"] = fmt.Sprintf("%t", resp.CurrentTier.UserDefinedCloudAICompanionProject) + } + + allowedTiers := float64(len(resp.AllowedTiers)) + ineligibleTiers := float64(len(resp.IneligibleTiers)) + snap.Metrics["allowed_tiers"] = core.Metric{Used: &allowedTiers, Unit: "tiers", Window: "current"} + snap.Metrics["ineligible_tiers"] = core.Metric{Used: &ineligibleTiers, Unit: "tiers", Window: "current"} + + if len(resp.AllowedTiers) > 0 { + names := make([]string, 0, len(resp.AllowedTiers)) + for _, tier := range resp.AllowedTiers { + if tier.Name != "" { + names = append(names, tier.Name) + } else if tier.ID != "" { + names = append(names, tier.ID) + } + } + if len(names) > 0 { + snap.Raw["allowed_tier_names"] = strings.Join(names, ", ") + } + } + + if len(resp.IneligibleTiers) > 0 { + reasons := make([]string, 0, len(resp.IneligibleTiers)) + for _, tier := range resp.IneligibleTiers { + if tier.ReasonMessage != "" { + reasons = append(reasons, tier.ReasonMessage) + } else if tier.ReasonCode != "" { + reasons = append(reasons, tier.ReasonCode) + } + } + if len(reasons) > 0 { + snap.Raw["ineligible_reasons"] = strings.Join(reasons, " | ") + } + } +} + +func applyQuotaBuckets(snap *core.UsageSnapshot, buckets []bucketInfo) quotaAggregationResult { + result := quotaAggregationResult{bucketCount: len(buckets), worstFraction: 1.0} + if len(buckets) == 0 { + return result + } + + aggregates := make(map[string]quotaAggregate) + for _, bucket := range buckets { + fraction, ok := bucketRemainingFraction(bucket) + if !ok { + continue + } + if fraction < 0 { + fraction = 0 + } + if fraction > 1 { + fraction = 1 + } + + modelID := normalizeQuotaModelID(bucket.ModelID) + tokenType := strings.ToLower(strings.TrimSpace(bucket.TokenType)) + if tokenType == "" { + tokenType = "requests" + } + + var resetAt time.Time + hasReset := false + if bucket.ResetTime != "" { + if parsed, err := time.Parse(time.RFC3339, bucket.ResetTime); err == nil { + resetAt = parsed + hasReset = true + } + } + + key := modelID + "|" + tokenType + current, exists := aggregates[key] + if !exists || fraction < current.remainingFraction { + aggregates[key] = quotaAggregate{ + modelID: modelID, + tokenType: tokenType, + remainingFraction: fraction, + resetAt: resetAt, + hasReset: hasReset, + } + continue + } + if exists && fraction == current.remainingFraction && hasReset && (!current.hasReset || resetAt.Before(current.resetAt)) { + current.resetAt = resetAt + current.hasReset = true + aggregates[key] = current + } + } + + if len(aggregates) == 0 { + return result + } + + keys := lo.Keys(aggregates) + sort.Strings(keys) + + modelWorst := make(map[string]float64) + var summary []string + + worstFraction := 1.0 + var worstMetric core.Metric + worstFound := false + var worstReset time.Time + worstHasReset := false + + proFraction := 1.0 + var proMetric core.Metric + proFound := false + var proReset time.Time + proHasReset := false + + flashFraction := 1.0 + var flashMetric core.Metric + flashFound := false + var flashReset time.Time + flashHasReset := false + + for _, key := range keys { + agg := aggregates[key] + window := "daily" + if agg.hasReset { + window = formatWindow(time.Until(agg.resetAt)) + } + metric := quotaMetricFromFraction(agg.remainingFraction, window) + + metricKey := "quota_model_" + sanitizeMetricName(agg.modelID) + "_" + sanitizeMetricName(agg.tokenType) + snap.Metrics[metricKey] = metric + if agg.hasReset { + snap.Resets[metricKey+"_reset"] = agg.resetAt + } + + usedPct := 100 - agg.remainingFraction*100 + summary = append(summary, fmt.Sprintf("%s %.1f%% used", agg.modelID, usedPct)) + + if prev, ok := modelWorst[agg.modelID]; !ok || agg.remainingFraction < prev { + modelWorst[agg.modelID] = agg.remainingFraction + } + + if !worstFound || agg.remainingFraction < worstFraction { + worstFraction = agg.remainingFraction + worstMetric = metric + worstFound = true + worstReset = agg.resetAt + worstHasReset = agg.hasReset + } + + modelLower := strings.ToLower(agg.modelID) + if strings.Contains(modelLower, "pro") && (!proFound || agg.remainingFraction < proFraction) { + proFraction = agg.remainingFraction + proMetric = metric + proFound = true + proReset = agg.resetAt + proHasReset = agg.hasReset + } + if strings.Contains(modelLower, "flash") && (!flashFound || agg.remainingFraction < flashFraction) { + flashFraction = agg.remainingFraction + flashMetric = metric + flashFound = true + flashReset = agg.resetAt + flashHasReset = agg.hasReset + } + } + + if len(summary) > maxBreakdownRaw { + summary = summary[:maxBreakdownRaw] + } + if len(summary) > 0 { + snap.Raw["quota_models"] = strings.Join(summary, ", ") + } + + if worstFound { + snap.Metrics["quota"] = worstMetric + if worstHasReset { + snap.Resets["quota_reset"] = worstReset + } + result.worstFraction = worstFraction + } + if proFound { + snap.Metrics["quota_pro"] = proMetric + if proHasReset { + snap.Resets["quota_pro_reset"] = proReset + } + } + if flashFound { + snap.Metrics["quota_flash"] = flashMetric + if flashHasReset { + snap.Resets["quota_flash_reset"] = flashReset + } + } + + lowCount := 0 + exhaustedCount := 0 + for _, fraction := range modelWorst { + if fraction <= 0 { + exhaustedCount++ + } + if fraction < quotaNearLimitFraction { + lowCount++ + } + } + modelCount := len(modelWorst) + result.modelCount = modelCount + snap.Raw["quota_models_tracked"] = fmt.Sprintf("%d", modelCount) + + modelCountF := float64(modelCount) + lowCountF := float64(lowCount) + exhaustedCountF := float64(exhaustedCount) + snap.Metrics["quota_models_tracked"] = core.Metric{Used: &modelCountF, Unit: "models", Window: "daily"} + snap.Metrics["quota_models_low"] = core.Metric{Used: &lowCountF, Unit: "models", Window: "daily"} + snap.Metrics["quota_models_exhausted"] = core.Metric{Used: &exhaustedCountF, Unit: "models", Window: "daily"} + + return result +} + +func quotaMetricFromFraction(remainingFraction float64, window string) core.Metric { + limit := 100.0 + remaining := remainingFraction * 100 + used := 100 - remaining + return core.Metric{ + Limit: &limit, + Remaining: &remaining, + Used: &used, + Unit: "%", + Window: window, + } +} + +func normalizeQuotaModelID(modelID string) string { + modelID = strings.TrimSpace(modelID) + if modelID == "" { + return "all_models" + } + modelID = strings.TrimPrefix(modelID, "models/") + modelID = strings.TrimSuffix(modelID, "_vertex") + return modelID +} + +func bucketRemainingFraction(bucket bucketInfo) (float64, bool) { + if bucket.RemainingFraction != nil { + return *bucket.RemainingFraction, true + } + if bucket.RemainingAmount == "" { + return 0, false + } + return parseRemainingAmountFraction(bucket.RemainingAmount) +} + +func parseRemainingAmountFraction(raw string) (float64, bool) { + s := strings.TrimSpace(strings.ToLower(raw)) + if s == "" { + return 0, false + } + + if strings.HasSuffix(s, "%") { + value, err := strconv.ParseFloat(strings.TrimSuffix(s, "%"), 64) + if err != nil { + return 0, false + } + return value / 100, true + } + + if strings.Contains(s, "/") { + parts := strings.SplitN(s, "/", 2) + if len(parts) != 2 { + return 0, false + } + numerator, err1 := strconv.ParseFloat(strings.TrimSpace(parts[0]), 64) + denominator, err2 := strconv.ParseFloat(strings.TrimSpace(parts[1]), 64) + if err1 != nil || err2 != nil || denominator <= 0 { + return 0, false + } + return numerator / denominator, true + } + + value, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, false + } + if value > 1 { + return value / 100, true + } + return value, true +} + +func applyQuotaStatus(snap *core.UsageSnapshot, worstFraction float64) { + if worstFraction < 0 { + return + } + + desired := core.StatusOK + if worstFraction <= 0 { + desired = core.StatusLimited + } else if worstFraction < quotaNearLimitFraction { + desired = core.StatusNearLimit + } + + if snap.Status == core.StatusAuth || snap.Status == core.StatusError { + return + } + + severity := map[core.Status]int{ + core.StatusOK: 0, + core.StatusNearLimit: 1, + core.StatusLimited: 2, + } + if severity[desired] > severity[snap.Status] { + snap.Status = desired + } +} + +func applyGeminiMCPMetadata(snap *core.UsageSnapshot, settings geminiSettings, enablementPath string) { + configured := make(map[string]bool) + for name := range settings.MCPServers { + name = strings.TrimSpace(name) + if name == "" { + continue + } + configured[name] = true + } + + enabled := make(map[string]bool) + disabled := make(map[string]bool) + if data, err := os.ReadFile(enablementPath); err == nil { + var state map[string]geminiMCPEnablement + if json.Unmarshal(data, &state) == nil { + for name, cfg := range state { + name = strings.TrimSpace(name) + if name == "" { + continue + } + configured[name] = true + if cfg.Enabled { + enabled[name] = true + delete(disabled, name) + continue + } + if !enabled[name] { + disabled[name] = true + } + } + } + } + + configuredNames := mapKeysSorted(configured) + enabledNames := mapKeysSorted(enabled) + disabledNames := mapKeysSorted(disabled) + + if len(configuredNames) == 0 { + return + } + + setUsedMetric(snap, "mcp_servers_configured", float64(len(configuredNames)), "servers", defaultUsageWindowLabel) + if len(enabledNames) > 0 { + setUsedMetric(snap, "mcp_servers_enabled", float64(len(enabledNames)), "servers", defaultUsageWindowLabel) + } + if len(disabledNames) > 0 { + setUsedMetric(snap, "mcp_servers_disabled", float64(len(disabledNames)), "servers", defaultUsageWindowLabel) + } + if len(enabledNames)+len(disabledNames) > 0 { + setUsedMetric(snap, "mcp_servers_tracked", float64(len(enabledNames)+len(disabledNames)), "servers", defaultUsageWindowLabel) + } + + if summary := formatGeminiNameList(configuredNames, maxBreakdownRaw); summary != "" { + snap.Raw["mcp_servers"] = summary + } + if summary := formatGeminiNameList(enabledNames, maxBreakdownRaw); summary != "" { + snap.Raw["mcp_servers_enabled"] = summary + } + if summary := formatGeminiNameList(disabledNames, maxBreakdownRaw); summary != "" { + snap.Raw["mcp_servers_disabled"] = summary + } +} diff --git a/internal/providers/gemini_cli/gemini_cli.go b/internal/providers/gemini_cli/gemini_cli.go index 821229b..fbbdb93 100644 --- a/internal/providers/gemini_cli/gemini_cli.go +++ b/internal/providers/gemini_cli/gemini_cli.go @@ -1,26 +1,18 @@ package gemini_cli import ( - "bytes" "context" "encoding/json" "fmt" - "io" "log" - "net/http" - "net/url" "os" "os/exec" "path/filepath" - "sort" - "strconv" "strings" "time" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" - "github.com/janekbaraniewski/openusage/internal/providers/shared" - "github.com/samber/lo" ) const ( @@ -416,622 +408,3 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa return snap, nil } - -func (p *Provider) fetchUsageFromAPI(ctx context.Context, snap *core.UsageSnapshot, creds oauthCreds, acct core.AccountConfig) error { - client := p.Client() - accessToken, err := refreshAccessToken(ctx, creds.RefreshToken, client) - if err != nil { - snap.Status = core.StatusAuth - snap.Message = "OAuth token refresh failed — run `gemini` to re-authenticate" - return fmt.Errorf("token refresh: %w", err) - } - snap.Raw["oauth_status"] = "valid (refreshed)" - - projectID := "" - if v := os.Getenv("GOOGLE_CLOUD_PROJECT"); v != "" { - projectID = v - } else if v := os.Getenv("GOOGLE_CLOUD_PROJECT_ID"); v != "" { - projectID = v - } - if projectID == "" && acct.ExtraData != nil { - projectID = acct.ExtraData["project_id"] - } - - loadResp, err := loadCodeAssistDetails(ctx, accessToken, projectID, client) - if err != nil { - return fmt.Errorf("loadCodeAssist: %w", err) - } - if loadResp != nil { - applyLoadCodeAssistMetadata(snap, loadResp) - if projectID == "" { - projectID = loadResp.CloudAICompanionProject - } - } - - if projectID == "" { - return fmt.Errorf("could not determine project ID") - } - snap.Raw["project_id"] = projectID - - quota, method, err := retrieveUserQuota(ctx, accessToken, projectID, client) - if err != nil { - return fmt.Errorf("retrieveUserQuota: %w", err) - } - - if len(quota.Buckets) == 0 { - snap.Raw["quota_api"] = fmt.Sprintf("ok (0 buckets, %s)", method) - snap.Raw["quota_api_method"] = method - return nil - } - - snap.Raw["quota_api"] = fmt.Sprintf("ok (%d buckets, %s)", len(quota.Buckets), method) - snap.Raw["quota_api_method"] = method - snap.Raw["quota_bucket_count"] = fmt.Sprintf("%d", len(quota.Buckets)) - - result := applyQuotaBuckets(snap, quota.Buckets) - applyQuotaStatus(snap, result.worstFraction) - - return nil -} - -func refreshAccessToken(ctx context.Context, refreshToken string, client *http.Client) (string, error) { - return refreshAccessTokenWithEndpoint(ctx, refreshToken, tokenEndpoint, client) -} - -func refreshAccessTokenWithEndpoint(ctx context.Context, refreshToken, endpoint string, client *http.Client) (string, error) { - if client == nil { - client = &http.Client{Timeout: 30 * time.Second} - } - data := url.Values{ - "client_id": {oauthClientID}, - "client_secret": {oauthClientSecret}, - "refresh_token": {refreshToken}, - "grant_type": {"refresh_token"}, - } - - req, err := http.NewRequestWithContext(ctx, "POST", endpoint, strings.NewReader(data.Encode())) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - body, _ := io.ReadAll(resp.Body) - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("token refresh HTTP %d: %s", resp.StatusCode, string(body)) - } - - var tokenResp tokenRefreshResponse - if err := json.Unmarshal(body, &tokenResp); err != nil { - return "", fmt.Errorf("parse token response: %w", err) - } - if tokenResp.AccessToken == "" { - return "", fmt.Errorf("empty access_token in refresh response") - } - - return tokenResp.AccessToken, nil -} - -func loadCodeAssistDetails(ctx context.Context, accessToken, existingProjectID string, client *http.Client) (*loadCodeAssistResponse, error) { - return loadCodeAssistDetailsWithEndpoint(ctx, accessToken, existingProjectID, codeAssistEndpoint, client) -} - -func loadCodeAssistDetailsWithEndpoint(ctx context.Context, accessToken, existingProjectID, baseURL string, client *http.Client) (*loadCodeAssistResponse, error) { - reqBody := loadCodeAssistRequest{ - CloudAICompanionProject: existingProjectID, - Metadata: clientMetadata{ - IDEType: "IDE_UNSPECIFIED", - Platform: "PLATFORM_UNSPECIFIED", - PluginType: "GEMINI", - Project: existingProjectID, - }, - } - - respBody, err := codeAssistPostWithEndpoint(ctx, accessToken, "loadCodeAssist", reqBody, baseURL, client) - if err != nil { - return nil, err - } - - var resp loadCodeAssistResponse - if err := json.Unmarshal(respBody, &resp); err != nil { - return nil, fmt.Errorf("parse loadCodeAssist response: %w", err) - } - - return &resp, nil -} - -func retrieveUserQuota(ctx context.Context, accessToken, projectID string, client *http.Client) (*retrieveUserQuotaResponse, string, error) { - return retrieveUserQuotaWithEndpoint(ctx, accessToken, projectID, codeAssistEndpoint, client) -} - -func retrieveUserQuotaWithEndpoint(ctx context.Context, accessToken, projectID, baseURL string, client *http.Client) (*retrieveUserQuotaResponse, string, error) { - reqBody := retrieveUserQuotaRequest{ - Project: projectID, - } - - respBody, err := codeAssistPostWithEndpoint(ctx, accessToken, "retrieveUserQuota", reqBody, baseURL, client) - if err != nil { - return nil, "", err - } - - var resp retrieveUserQuotaResponse - if err := json.Unmarshal(respBody, &resp); err != nil { - return nil, "", fmt.Errorf("parse retrieveUserQuota response: %w", err) - } - - return &resp, "retrieveUserQuota", nil -} - -func codeAssistPostWithEndpoint(ctx context.Context, accessToken, method string, body interface{}, baseURL string, client *http.Client) ([]byte, error) { - if client == nil { - client = &http.Client{Timeout: 30 * time.Second} - } - apiURL := fmt.Sprintf("%s/%s:%s", baseURL, codeAssistAPIVersion, method) - - jsonBody, err := json.Marshal(body) - if err != nil { - return nil, fmt.Errorf("marshal request: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewReader(jsonBody)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", "Bearer "+accessToken) - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - respBody, _ := io.ReadAll(resp.Body) - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s HTTP %d: %s", method, resp.StatusCode, truncate(string(respBody), 200)) - } - - return respBody, nil -} - -func formatWindow(d time.Duration) string { - if d <= 0 { - return "expired" - } - hours := int(d.Hours()) - minutes := int(d.Minutes()) % 60 - - if hours >= 24 { - days := hours / 24 - if days == 1 { - return "~1 day" - } - return fmt.Sprintf("~%dd", days) - } - if hours > 0 && minutes > 0 { - return fmt.Sprintf("%dh%dm", hours, minutes) - } - if hours > 0 { - return fmt.Sprintf("%dh", hours) - } - return fmt.Sprintf("%dm", minutes) -} - -func truncate(s string, maxLen int) string { return shared.Truncate(s, maxLen) } - -type quotaAggregationResult struct { - bucketCount int - modelCount int - worstFraction float64 -} - -type quotaAggregate struct { - modelID string - tokenType string - remainingFraction float64 - resetAt time.Time - hasReset bool -} - -func applyLoadCodeAssistMetadata(snap *core.UsageSnapshot, resp *loadCodeAssistResponse) { - if resp == nil { - return - } - - snap.Raw["gcp_managed"] = fmt.Sprintf("%t", resp.GCPManaged) - if resp.UpgradeSubscriptionURI != "" { - snap.Raw["upgrade_uri"] = resp.UpgradeSubscriptionURI - } - if resp.UpgradeSubscriptionType != "" { - snap.Raw["upgrade_type"] = resp.UpgradeSubscriptionType - } - - if resp.CurrentTier != nil { - if resp.CurrentTier.ID != "" { - snap.Raw["tier_id"] = resp.CurrentTier.ID - } - if resp.CurrentTier.Name != "" { - snap.Raw["tier_name"] = resp.CurrentTier.Name - } - if resp.CurrentTier.Description != "" { - snap.Raw["tier_description"] = truncate(strings.TrimSpace(resp.CurrentTier.Description), 200) - } - snap.Raw["tier_uses_gcp_tos"] = fmt.Sprintf("%t", resp.CurrentTier.UsesGCPTOS) - snap.Raw["tier_user_project"] = fmt.Sprintf("%t", resp.CurrentTier.UserDefinedCloudAICompanionProject) - } - - allowedTiers := float64(len(resp.AllowedTiers)) - ineligibleTiers := float64(len(resp.IneligibleTiers)) - snap.Metrics["allowed_tiers"] = core.Metric{Used: &allowedTiers, Unit: "tiers", Window: "current"} - snap.Metrics["ineligible_tiers"] = core.Metric{Used: &ineligibleTiers, Unit: "tiers", Window: "current"} - - if len(resp.AllowedTiers) > 0 { - names := make([]string, 0, len(resp.AllowedTiers)) - for _, tier := range resp.AllowedTiers { - if tier.Name != "" { - names = append(names, tier.Name) - } else if tier.ID != "" { - names = append(names, tier.ID) - } - } - if len(names) > 0 { - snap.Raw["allowed_tier_names"] = strings.Join(names, ", ") - } - } - - if len(resp.IneligibleTiers) > 0 { - reasons := make([]string, 0, len(resp.IneligibleTiers)) - for _, tier := range resp.IneligibleTiers { - if tier.ReasonMessage != "" { - reasons = append(reasons, tier.ReasonMessage) - } else if tier.ReasonCode != "" { - reasons = append(reasons, tier.ReasonCode) - } - } - if len(reasons) > 0 { - snap.Raw["ineligible_reasons"] = strings.Join(reasons, " | ") - } - } -} - -func applyQuotaBuckets(snap *core.UsageSnapshot, buckets []bucketInfo) quotaAggregationResult { - result := quotaAggregationResult{bucketCount: len(buckets), worstFraction: 1.0} - if len(buckets) == 0 { - return result - } - - aggregates := make(map[string]quotaAggregate) - for _, bucket := range buckets { - fraction, ok := bucketRemainingFraction(bucket) - if !ok { - continue - } - if fraction < 0 { - fraction = 0 - } - if fraction > 1 { - fraction = 1 - } - - modelID := normalizeQuotaModelID(bucket.ModelID) - tokenType := strings.ToLower(strings.TrimSpace(bucket.TokenType)) - if tokenType == "" { - tokenType = "requests" - } - - var resetAt time.Time - hasReset := false - if bucket.ResetTime != "" { - if parsed, err := time.Parse(time.RFC3339, bucket.ResetTime); err == nil { - resetAt = parsed - hasReset = true - } - } - - key := modelID + "|" + tokenType - current, exists := aggregates[key] - if !exists || fraction < current.remainingFraction { - aggregates[key] = quotaAggregate{ - modelID: modelID, - tokenType: tokenType, - remainingFraction: fraction, - resetAt: resetAt, - hasReset: hasReset, - } - continue - } - if exists && fraction == current.remainingFraction && hasReset && (!current.hasReset || resetAt.Before(current.resetAt)) { - current.resetAt = resetAt - current.hasReset = true - aggregates[key] = current - } - } - - if len(aggregates) == 0 { - return result - } - - keys := lo.Keys(aggregates) - sort.Strings(keys) - - modelWorst := make(map[string]float64) - var summary []string - - worstFraction := 1.0 - var worstMetric core.Metric - worstFound := false - var worstReset time.Time - worstHasReset := false - - proFraction := 1.0 - var proMetric core.Metric - proFound := false - var proReset time.Time - proHasReset := false - - flashFraction := 1.0 - var flashMetric core.Metric - flashFound := false - var flashReset time.Time - flashHasReset := false - - for _, key := range keys { - agg := aggregates[key] - window := "daily" - if agg.hasReset { - window = formatWindow(time.Until(agg.resetAt)) - } - metric := quotaMetricFromFraction(agg.remainingFraction, window) - - metricKey := "quota_model_" + sanitizeMetricName(agg.modelID) + "_" + sanitizeMetricName(agg.tokenType) - snap.Metrics[metricKey] = metric - if agg.hasReset { - snap.Resets[metricKey+"_reset"] = agg.resetAt - } - - usedPct := 100 - agg.remainingFraction*100 - summary = append(summary, fmt.Sprintf("%s %.1f%% used", agg.modelID, usedPct)) - - if prev, ok := modelWorst[agg.modelID]; !ok || agg.remainingFraction < prev { - modelWorst[agg.modelID] = agg.remainingFraction - } - - if !worstFound || agg.remainingFraction < worstFraction { - worstFraction = agg.remainingFraction - worstMetric = metric - worstFound = true - worstReset = agg.resetAt - worstHasReset = agg.hasReset - } - - modelLower := strings.ToLower(agg.modelID) - if strings.Contains(modelLower, "pro") && (!proFound || agg.remainingFraction < proFraction) { - proFraction = agg.remainingFraction - proMetric = metric - proFound = true - proReset = agg.resetAt - proHasReset = agg.hasReset - } - if strings.Contains(modelLower, "flash") && (!flashFound || agg.remainingFraction < flashFraction) { - flashFraction = agg.remainingFraction - flashMetric = metric - flashFound = true - flashReset = agg.resetAt - flashHasReset = agg.hasReset - } - } - - if len(summary) > maxBreakdownRaw { - summary = summary[:maxBreakdownRaw] - } - if len(summary) > 0 { - snap.Raw["quota_models"] = strings.Join(summary, ", ") - } - - if worstFound { - snap.Metrics["quota"] = worstMetric - if worstHasReset { - snap.Resets["quota_reset"] = worstReset - } - result.worstFraction = worstFraction - } - if proFound { - snap.Metrics["quota_pro"] = proMetric - if proHasReset { - snap.Resets["quota_pro_reset"] = proReset - } - } - if flashFound { - snap.Metrics["quota_flash"] = flashMetric - if flashHasReset { - snap.Resets["quota_flash_reset"] = flashReset - } - } - - lowCount := 0 - exhaustedCount := 0 - for _, fraction := range modelWorst { - if fraction <= 0 { - exhaustedCount++ - } - if fraction < quotaNearLimitFraction { - lowCount++ - } - } - modelCount := len(modelWorst) - result.modelCount = modelCount - snap.Raw["quota_models_tracked"] = fmt.Sprintf("%d", modelCount) - - modelCountF := float64(modelCount) - lowCountF := float64(lowCount) - exhaustedCountF := float64(exhaustedCount) - snap.Metrics["quota_models_tracked"] = core.Metric{Used: &modelCountF, Unit: "models", Window: "daily"} - snap.Metrics["quota_models_low"] = core.Metric{Used: &lowCountF, Unit: "models", Window: "daily"} - snap.Metrics["quota_models_exhausted"] = core.Metric{Used: &exhaustedCountF, Unit: "models", Window: "daily"} - - return result -} - -func quotaMetricFromFraction(remainingFraction float64, window string) core.Metric { - limit := 100.0 - remaining := remainingFraction * 100 - used := 100 - remaining - return core.Metric{ - Limit: &limit, - Remaining: &remaining, - Used: &used, - Unit: "%", - Window: window, - } -} - -func normalizeQuotaModelID(modelID string) string { - modelID = strings.TrimSpace(modelID) - if modelID == "" { - return "all_models" - } - modelID = strings.TrimPrefix(modelID, "models/") - modelID = strings.TrimSuffix(modelID, "_vertex") - return modelID -} - -func bucketRemainingFraction(bucket bucketInfo) (float64, bool) { - if bucket.RemainingFraction != nil { - return *bucket.RemainingFraction, true - } - if bucket.RemainingAmount == "" { - return 0, false - } - return parseRemainingAmountFraction(bucket.RemainingAmount) -} - -func parseRemainingAmountFraction(raw string) (float64, bool) { - s := strings.TrimSpace(strings.ToLower(raw)) - if s == "" { - return 0, false - } - - if strings.HasSuffix(s, "%") { - value, err := strconv.ParseFloat(strings.TrimSuffix(s, "%"), 64) - if err != nil { - return 0, false - } - return value / 100, true - } - - if strings.Contains(s, "/") { - parts := strings.SplitN(s, "/", 2) - if len(parts) != 2 { - return 0, false - } - numerator, err1 := strconv.ParseFloat(strings.TrimSpace(parts[0]), 64) - denominator, err2 := strconv.ParseFloat(strings.TrimSpace(parts[1]), 64) - if err1 != nil || err2 != nil || denominator <= 0 { - return 0, false - } - return numerator / denominator, true - } - - value, err := strconv.ParseFloat(s, 64) - if err != nil { - return 0, false - } - if value > 1 { - return value / 100, true - } - return value, true -} - -func applyQuotaStatus(snap *core.UsageSnapshot, worstFraction float64) { - if worstFraction < 0 { - return - } - - desired := core.StatusOK - if worstFraction <= 0 { - desired = core.StatusLimited - } else if worstFraction < quotaNearLimitFraction { - desired = core.StatusNearLimit - } - - if snap.Status == core.StatusAuth || snap.Status == core.StatusError { - return - } - - severity := map[core.Status]int{ - core.StatusOK: 0, - core.StatusNearLimit: 1, - core.StatusLimited: 2, - } - if severity[desired] > severity[snap.Status] { - snap.Status = desired - } -} - -func applyGeminiMCPMetadata(snap *core.UsageSnapshot, settings geminiSettings, enablementPath string) { - configured := make(map[string]bool) - for name := range settings.MCPServers { - name = strings.TrimSpace(name) - if name == "" { - continue - } - configured[name] = true - } - - enabled := make(map[string]bool) - disabled := make(map[string]bool) - if data, err := os.ReadFile(enablementPath); err == nil { - var state map[string]geminiMCPEnablement - if json.Unmarshal(data, &state) == nil { - for name, cfg := range state { - name = strings.TrimSpace(name) - if name == "" { - continue - } - configured[name] = true - if cfg.Enabled { - enabled[name] = true - delete(disabled, name) - continue - } - if !enabled[name] { - disabled[name] = true - } - } - } - } - - configuredNames := mapKeysSorted(configured) - enabledNames := mapKeysSorted(enabled) - disabledNames := mapKeysSorted(disabled) - - if len(configuredNames) == 0 { - return - } - - setUsedMetric(snap, "mcp_servers_configured", float64(len(configuredNames)), "servers", defaultUsageWindowLabel) - if len(enabledNames) > 0 { - setUsedMetric(snap, "mcp_servers_enabled", float64(len(enabledNames)), "servers", defaultUsageWindowLabel) - } - if len(disabledNames) > 0 { - setUsedMetric(snap, "mcp_servers_disabled", float64(len(disabledNames)), "servers", defaultUsageWindowLabel) - } - if len(enabledNames)+len(disabledNames) > 0 { - setUsedMetric(snap, "mcp_servers_tracked", float64(len(enabledNames)+len(disabledNames)), "servers", defaultUsageWindowLabel) - } - - if summary := formatGeminiNameList(configuredNames, maxBreakdownRaw); summary != "" { - snap.Raw["mcp_servers"] = summary - } - if summary := formatGeminiNameList(enabledNames, maxBreakdownRaw); summary != "" { - snap.Raw["mcp_servers_enabled"] = summary - } - if summary := formatGeminiNameList(disabledNames, maxBreakdownRaw); summary != "" { - snap.Raw["mcp_servers_disabled"] = summary - } -} diff --git a/internal/providers/ollama/cloud_api.go b/internal/providers/ollama/cloud_api.go new file mode 100644 index 0000000..78ddbd0 --- /dev/null +++ b/internal/providers/ollama/cloud_api.go @@ -0,0 +1,326 @@ +package ollama + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/parsers" +) + +func (p *Provider) fetchCloudAPI(ctx context.Context, acct core.AccountConfig, apiKey string, snap *core.UsageSnapshot) (hasData, authFailed, limited bool, err error) { + cloudBaseURL := resolveCloudBaseURL(acct) + + var me map[string]any + status, headers, reqErr := doJSONRequest(ctx, http.MethodPost, cloudEndpointURL(cloudBaseURL, "/api/me"), apiKey, &me, p.Client()) + if reqErr != nil { + return false, false, false, fmt.Errorf("ollama: cloud account request failed: %w", reqErr) + } + + for k, v := range parsers.RedactHeaders(headers, "authorization") { + if strings.EqualFold(k, "X-Request-Id") { + snap.Raw["cloud_me_"+normalizeHeaderKey(k)] = v + } + } + + switch status { + case http.StatusOK: + snap.SetAttribute("auth_type", "api_key") + if applyCloudUserPayload(me, snap, p.now()) { + hasData = true + } + case http.StatusUnauthorized, http.StatusForbidden: + authFailed = true + case http.StatusTooManyRequests: + limited = true + default: + snap.SetDiagnostic("cloud_me_status", fmt.Sprintf("HTTP %d", status)) + } + + var tags tagsResponse + tagsStatus, _, tagsErr := doJSONRequest(ctx, http.MethodGet, cloudEndpointURL(cloudBaseURL, "/api/tags"), apiKey, &tags, p.Client()) + if tagsErr != nil { + if !hasData { + return hasData, authFailed, limited, fmt.Errorf("ollama: cloud tags request failed: %w", tagsErr) + } + snap.SetDiagnostic("cloud_tags_error", tagsErr.Error()) + return hasData, authFailed, limited, nil + } + + switch tagsStatus { + case http.StatusOK: + setValueMetric(snap, "cloud_catalog_models", float64(len(tags.Models)), "models", "current") + hasData = true + case http.StatusUnauthorized, http.StatusForbidden: + authFailed = true + case http.StatusTooManyRequests: + limited = true + default: + snap.SetDiagnostic("cloud_tags_status", fmt.Sprintf("HTTP %d", tagsStatus)) + } + + if _, ok := snap.Metrics["usage_five_hour"]; !ok { + if parsed, parseErr := fetchCloudUsageFromSettingsPage(ctx, cloudBaseURL, apiKey, acct, snap, p.Client()); parseErr != nil { + snap.SetDiagnostic("cloud_usage_settings_error", parseErr.Error()) + } else if parsed { + hasData = true + } + } + + return hasData, authFailed, limited, nil +} + +func applyCloudUserPayload(payload map[string]any, snap *core.UsageSnapshot, now time.Time) bool { + if len(payload) == 0 { + return false + } + + var hasData bool + + if id := anyStringCaseInsensitive(payload, "id", "ID"); id != "" { + snap.SetAttribute("account_id", id) + hasData = true + } + if email := anyStringCaseInsensitive(payload, "email", "Email"); email != "" { + snap.SetAttribute("account_email", email) + hasData = true + } + if name := anyStringCaseInsensitive(payload, "name", "Name"); name != "" { + snap.SetAttribute("account_name", name) + hasData = true + } + if plan := anyStringCaseInsensitive(payload, "plan", "Plan"); plan != "" { + snap.SetAttribute("plan_name", plan) + hasData = true + } + + if customerID := anyNullStringCaseInsensitive(payload, "customerid", "customer_id", "CustomerID"); customerID != "" { + snap.SetAttribute("customer_id", customerID) + } + if subscriptionID := anyNullStringCaseInsensitive(payload, "subscriptionid", "subscription_id", "SubscriptionID"); subscriptionID != "" { + snap.SetAttribute("subscription_id", subscriptionID) + } + if workOSUserID := anyNullStringCaseInsensitive(payload, "workosuserid", "workos_user_id", "WorkOSUserID"); workOSUserID != "" { + snap.SetAttribute("workos_user_id", workOSUserID) + } + + if billingStart, ok := anyNullTimeCaseInsensitive(payload, "subscriptionperiodstart", "subscription_period_start", "SubscriptionPeriodStart"); ok { + snap.SetAttribute("billing_cycle_start", billingStart.Format(time.RFC3339)) + } + if billingEnd, ok := anyNullTimeCaseInsensitive(payload, "subscriptionperiodend", "subscription_period_end", "SubscriptionPeriodEnd"); ok { + snap.SetAttribute("billing_cycle_end", billingEnd.Format(time.RFC3339)) + } + + if extractCloudUsageWindows(payload, snap, now) { + hasData = true + } + + return hasData +} + +func extractCloudUsageWindows(payload map[string]any, snap *core.UsageSnapshot, now time.Time) bool { + var found bool + + sessionKeys := []string{ + "session_usage", "sessionusage", "usage_5h", "usagefivehour", "five_hour_usage", "fivehourusage", + } + if metric, resetAt, ok := findUsageWindow(payload, sessionKeys, "5h", now); ok { + snap.Metrics["usage_five_hour"] = metric + if !resetAt.IsZero() { + snap.Resets["usage_five_hour"] = resetAt + snap.SetAttribute("block_end", resetAt.Format(time.RFC3339)) + if metric.Window == "5h" { + start := resetAt.Add(-5 * time.Hour) + snap.SetAttribute("block_start", start.Format(time.RFC3339)) + } + } + found = true + } + + dayKeys := []string{ + "weekly_usage", "weeklyusage", "usage_1d", "usageoneday", "one_day_usage", "daily_usage", "dailyusage", + } + if metric, resetAt, ok := findUsageWindow(payload, dayKeys, "1d", now); ok { + snap.Metrics["usage_weekly"] = core.Metric{ + Limit: metric.Limit, + Remaining: metric.Remaining, + Used: metric.Used, + Unit: metric.Unit, + Window: "1w", + } + snap.Metrics["usage_one_day"] = metric + if !resetAt.IsZero() { + snap.Resets["usage_weekly"] = resetAt + snap.Resets["usage_one_day"] = resetAt + } + found = true + } + + return found +} + +func findUsageWindow(payload map[string]any, keys []string, fallbackWindow string, now time.Time) (core.Metric, time.Time, bool) { + sources := []map[string]any{ + payload, + anyMapCaseInsensitive(payload, "usage"), + anyMapCaseInsensitive(payload, "cloud_usage"), + anyMapCaseInsensitive(payload, "quota"), + } + + for _, src := range sources { + if len(src) == 0 { + continue + } + for _, key := range keys { + v, ok := anyValueCaseInsensitive(src, key) + if !ok { + continue + } + if metric, resetAt, ok := parseUsageWindowValue(v, fallbackWindow, now); ok { + return metric, resetAt, true + } + } + } + + return core.Metric{}, time.Time{}, false +} + +func parseUsageWindowValue(v any, fallbackWindow string, now time.Time) (core.Metric, time.Time, bool) { + if pct, ok := anyFloat(v); ok { + return core.Metric{ + Used: core.Float64Ptr(pct), + Unit: "%", + Window: fallbackWindow, + }, time.Time{}, true + } + + switch raw := v.(type) { + case string: + s := strings.TrimSpace(strings.TrimSuffix(raw, "%")) + if f, err := strconv.ParseFloat(s, 64); err == nil { + return core.Metric{ + Used: core.Float64Ptr(f), + Unit: "%", + Window: fallbackWindow, + }, time.Time{}, true + } + case map[string]any: + var metric core.Metric + metric.Window = fallbackWindow + metric.Unit = anyStringCaseInsensitive(raw, "unit") + if metric.Unit == "" { + metric.Unit = "%" + } + + if window := anyStringCaseInsensitive(raw, "window"); window != "" { + metric.Window = strings.TrimSpace(window) + } + + if used, ok := anyFloatCaseInsensitive(raw, "used", "usage", "value"); ok { + metric.Used = core.Float64Ptr(used) + } + if limit, ok := anyFloatCaseInsensitive(raw, "limit", "max"); ok { + metric.Limit = core.Float64Ptr(limit) + } + if remaining, ok := anyFloatCaseInsensitive(raw, "remaining", "left"); ok { + metric.Remaining = core.Float64Ptr(remaining) + } + if pct, ok := anyFloatCaseInsensitive(raw, "percent", "pct", "used_percent", "usage_percent"); ok { + metric.Unit = "%" + metric.Used = core.Float64Ptr(pct) + metric.Limit = nil + metric.Remaining = nil + } + + var resetAt time.Time + if resetRaw := anyStringCaseInsensitive(raw, "reset_at", "resets_at", "reset_time", "reset"); resetRaw != "" { + if t, ok := parseAnyTime(resetRaw); ok { + resetAt = t + } + } + if resetAt.IsZero() { + if seconds, ok := anyFloatCaseInsensitive(raw, "reset_in", "reset_in_seconds", "resets_in", "seconds_to_reset"); ok && seconds > 0 { + resetAt = now.Add(time.Duration(seconds * float64(time.Second))) + } + } + + if metric.Used != nil || metric.Limit != nil || metric.Remaining != nil { + return metric, resetAt, true + } + } + + return core.Metric{}, time.Time{}, false +} + +func finalizeUsageWindows(snap *core.UsageSnapshot, now time.Time) { + now = now.In(time.Local) + blockStart, blockEnd := currentFiveHourBlock(now) + + if _, ok := snap.Metrics["usage_five_hour"]; ok { + if _, ok := snap.Resets["usage_five_hour"]; !ok { + snap.Resets["usage_five_hour"] = blockEnd + } + if _, ok := snap.Attributes["block_start"]; !ok { + snap.SetAttribute("block_start", blockStart.Format(time.RFC3339)) + } + if _, ok := snap.Attributes["block_end"]; !ok { + snap.SetAttribute("block_end", blockEnd.Format(time.RFC3339)) + } + } + + hundred := 100.0 + for _, key := range []string{"usage_five_hour", "usage_weekly", "usage_one_day"} { + if m, ok := snap.Metrics[key]; ok && m.Unit == "%" && m.Limit == nil { + m.Limit = core.Float64Ptr(hundred) + if m.Used != nil && m.Remaining == nil { + rem := hundred - *m.Used + m.Remaining = core.Float64Ptr(rem) + } + snap.Metrics[key] = m + } + } +} + +func currentFiveHourBlock(now time.Time) (time.Time, time.Time) { + startHour := (now.Hour() / 5) * 5 + start := time.Date(now.Year(), now.Month(), now.Day(), startHour, 0, 0, 0, now.Location()) + end := start.Add(5 * time.Hour) + return start, end +} + +func resolveCloudBaseURL(acct core.AccountConfig) string { + normalize := func(raw string) string { + raw = strings.TrimSpace(strings.TrimRight(raw, "/")) + if raw == "" { + return "" + } + u, err := url.Parse(raw) + if err != nil { + return raw + } + switch strings.TrimSpace(strings.ToLower(u.Path)) { + case "", "/": + u.Path = "" + case "/api", "/api/v1": + u.Path = "" + } + u.RawQuery = "" + u.Fragment = "" + return strings.TrimRight(u.String(), "/") + } + + if acct.ExtraData != nil { + if v := strings.TrimSpace(acct.ExtraData["cloud_base_url"]); v != "" { + return normalize(v) + } + } + if strings.HasPrefix(strings.ToLower(acct.BaseURL), "https://") && strings.Contains(strings.ToLower(acct.BaseURL), "ollama.com") { + return normalize(acct.BaseURL) + } + return normalize(defaultCloudBaseURL) +} diff --git a/internal/providers/ollama/desktop_db.go b/internal/providers/ollama/desktop_db.go new file mode 100644 index 0000000..cd1f8b4 --- /dev/null +++ b/internal/providers/ollama/desktop_db.go @@ -0,0 +1,166 @@ +package ollama + +import ( + "context" + "database/sql" + "fmt" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (p *Provider) fetchDesktopDB(ctx context.Context, acct core.AccountConfig, snap *core.UsageSnapshot) (bool, error) { + dbPath := resolveDesktopDBPath(acct) + if dbPath == "" || !fileExists(dbPath) { + return false, nil + } + + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + return false, fmt.Errorf("ollama: opening desktop db: %w", err) + } + defer db.Close() + + if err := db.PingContext(ctx); err != nil { + return false, fmt.Errorf("ollama: pinging desktop db: %w", err) + } + + snap.Raw["desktop_db_path"] = dbPath + + setCountMetric := func(key string, count int64, unit, window string) { + setValueMetric(snap, key, float64(count), unit, window) + } + + totalChats, err := queryCount(ctx, db, `SELECT COUNT(*) FROM chats`) + if err == nil { + setCountMetric("total_conversations", totalChats, "chats", "all-time") + } + + totalMessages, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages`) + if err == nil { + setCountMetric("total_messages", totalMessages, "messages", "all-time") + } + + totalUserMessages, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'user'`) + if err == nil { + setCountMetric("total_user_messages", totalUserMessages, "messages", "all-time") + } + + totalAssistantMessages, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'assistant'`) + if err == nil { + setCountMetric("total_assistant_messages", totalAssistantMessages, "messages", "all-time") + } + + totalToolCalls, err := queryCount(ctx, db, `SELECT COUNT(*) FROM tool_calls`) + if err == nil { + setCountMetric("total_tool_calls", totalToolCalls, "calls", "all-time") + } + + totalAttachments, err := queryCount(ctx, db, `SELECT COUNT(*) FROM attachments`) + if err == nil { + setCountMetric("total_attachments", totalAttachments, "attachments", "all-time") + } + + sessionsToday, err := queryCount(ctx, db, `SELECT COUNT(*) FROM chats WHERE date(created_at) = date('now', 'localtime')`) + if err == nil { + setCountMetric("sessions_today", sessionsToday, "sessions", "today") + } + + messagesToday, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE date(created_at) = date('now', 'localtime')`) + if err == nil { + setCountMetric("messages_today", messagesToday, "messages", "today") + } + + userMessagesToday, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'user' AND date(created_at) = date('now', 'localtime')`) + if err == nil { + setCountMetric("requests_today", userMessagesToday, "requests", "today") + } + + sessions5h, err := queryCount(ctx, db, `SELECT COUNT(*) FROM chats WHERE datetime(created_at) >= datetime('now', '-5 hours')`) + if err == nil { + setCountMetric("sessions_5h", sessions5h, "sessions", "5h") + } + + sessions1d, err := queryCount(ctx, db, `SELECT COUNT(*) FROM chats WHERE datetime(created_at) >= datetime('now', '-24 hours')`) + if err == nil { + setCountMetric("sessions_1d", sessions1d, "sessions", "1d") + } + + messages5h, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE datetime(created_at) >= datetime('now', '-5 hours')`) + if err == nil { + setCountMetric("messages_5h", messages5h, "messages", "5h") + } + + messages1d, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE datetime(created_at) >= datetime('now', '-24 hours')`) + if err == nil { + setCountMetric("messages_1d", messages1d, "messages", "1d") + } + + requests5h, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'user' AND datetime(created_at) >= datetime('now', '-5 hours')`) + if err == nil { + setCountMetric("requests_5h", requests5h, "requests", "5h") + } + + requests1d, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'user' AND datetime(created_at) >= datetime('now', '-24 hours')`) + if err == nil { + setCountMetric("requests_1d", requests1d, "requests", "1d") + } + + toolCallsToday, err := queryCount(ctx, db, `SELECT COUNT(*) + FROM tool_calls tc + JOIN messages m ON tc.message_id = m.id + WHERE date(m.created_at) = date('now', 'localtime')`) + if err == nil { + setCountMetric("tool_calls_today", toolCallsToday, "calls", "today") + } + + toolCalls5h, err := queryCount(ctx, db, `SELECT COUNT(*) + FROM tool_calls tc + JOIN messages m ON tc.message_id = m.id + WHERE datetime(m.created_at) >= datetime('now', '-5 hours')`) + if err == nil { + setCountMetric("tool_calls_5h", toolCalls5h, "calls", "5h") + } + + toolCalls1d, err := queryCount(ctx, db, `SELECT COUNT(*) + FROM tool_calls tc + JOIN messages m ON tc.message_id = m.id + WHERE datetime(m.created_at) >= datetime('now', '-24 hours')`) + if err == nil { + setCountMetric("tool_calls_1d", toolCalls1d, "calls", "1d") + } + + attachmentsToday, err := queryCount(ctx, db, `SELECT COUNT(*) + FROM attachments a + JOIN messages m ON a.message_id = m.id + WHERE date(m.created_at) = date('now', 'localtime')`) + if err == nil { + setCountMetric("attachments_today", attachmentsToday, "attachments", "today") + } + + if err := populateModelUsageFromDB(ctx, db, snap); err != nil { + snap.SetDiagnostic("desktop_model_usage_error", err.Error()) + } + if err := populateEstimatedTokenUsageFromDB(ctx, db, snap, p.now()); err != nil { + snap.SetDiagnostic("desktop_token_estimate_error", err.Error()) + } + if err := populateSourceUsageFromDB(ctx, db, snap); err != nil { + snap.SetDiagnostic("desktop_source_usage_error", err.Error()) + } + if err := populateToolUsageFromDB(ctx, db, snap); err != nil { + snap.SetDiagnostic("desktop_tool_usage_error", err.Error()) + } + if err := populateDailySeriesFromDB(ctx, db, snap); err != nil { + snap.SetDiagnostic("desktop_daily_series_error", err.Error()) + } + if err := populateThinkingMetricsFromDB(ctx, db, snap); err != nil { + snap.SetDiagnostic("desktop_thinking_error", err.Error()) + } + if err := populateSettingsFromDB(ctx, db, snap); err != nil { + snap.SetDiagnostic("desktop_settings_error", err.Error()) + } + if err := populateCachedUserFromDB(ctx, db, snap); err != nil { + snap.SetDiagnostic("desktop_user_error", err.Error()) + } + + return true, nil +} diff --git a/internal/providers/ollama/desktop_db_breakdowns.go b/internal/providers/ollama/desktop_db_breakdowns.go new file mode 100644 index 0000000..f14787a --- /dev/null +++ b/internal/providers/ollama/desktop_db_breakdowns.go @@ -0,0 +1,377 @@ +package ollama + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func populateModelUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { + rows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) FROM messages WHERE model_name IS NOT NULL AND trim(model_name) != '' GROUP BY model_name ORDER BY COUNT(*) DESC`) + if err != nil { + return err + } + defer rows.Close() + + var top []string + for rows.Next() { + var rawModel string + var count float64 + if err := rows.Scan(&rawModel, &count); err != nil { + return err + } + model := normalizeModelName(rawModel) + if model == "" { + continue + } + + metricKey := "model_" + sanitizeMetricPart(model) + "_requests" + setValueMetric(snap, metricKey, count, "requests", "all-time") + + rec := core.ModelUsageRecord{ + RawModelID: model, + RawSource: "sqlite", + Window: "all-time", + Requests: core.Float64Ptr(count), + } + rec.SetDimension("provider", "ollama") + snap.AppendModelUsage(rec) + + if len(top) < 6 { + top = append(top, fmt.Sprintf("%s=%.0f", model, count)) + } + } + if err := rows.Err(); err != nil { + return err + } + + if len(top) > 0 { + snap.Raw["models_usage_top"] = strings.Join(top, ", ") + } + + todayRows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) + FROM messages + WHERE model_name IS NOT NULL AND trim(model_name) != '' + AND date(created_at) = date('now', 'localtime') + GROUP BY model_name`) + if err == nil { + defer todayRows.Close() + for todayRows.Next() { + var rawModel string + var count float64 + if err := todayRows.Scan(&rawModel, &count); err != nil { + return err + } + model := normalizeModelName(rawModel) + if model == "" { + continue + } + + metricKey := "model_" + sanitizeMetricPart(model) + "_requests_today" + setValueMetric(snap, metricKey, count, "requests", "today") + + rec := core.ModelUsageRecord{ + RawModelID: model, + RawSource: "sqlite", + Window: "today", + Requests: core.Float64Ptr(count), + } + rec.SetDimension("provider", "ollama") + snap.AppendModelUsage(rec) + } + if err := todayRows.Err(); err != nil { + return err + } + } + + perDayRows, err := db.QueryContext(ctx, `SELECT date(created_at), model_name, COUNT(*) + FROM messages + WHERE model_name IS NOT NULL AND trim(model_name) != '' + GROUP BY date(created_at), model_name`) + if err != nil { + return nil + } + defer perDayRows.Close() + + perModelDaily := make(map[string]map[string]float64) + for perDayRows.Next() { + var date string + var rawModel string + var count float64 + if err := perDayRows.Scan(&date, &rawModel, &count); err != nil { + return err + } + model := normalizeModelName(rawModel) + date = strings.TrimSpace(date) + if model == "" || date == "" { + continue + } + if perModelDaily[model] == nil { + perModelDaily[model] = make(map[string]float64) + } + perModelDaily[model][date] = count + } + if err := perDayRows.Err(); err != nil { + return err + } + + for model, byDate := range perModelDaily { + seriesKey := "requests_model_" + sanitizeMetricPart(model) + snap.DailySeries[seriesKey] = core.SortedTimePoints(byDate) + usageSeriesKey := "usage_model_" + sanitizeMetricPart(model) + snap.DailySeries[usageSeriesKey] = core.SortedTimePoints(byDate) + } + + return nil +} + +func populateSourceUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { + allTimeRows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) + FROM messages + WHERE model_name IS NOT NULL AND trim(model_name) != '' + GROUP BY model_name`) + if err != nil { + return err + } + defer allTimeRows.Close() + + allTimeBySource := make(map[string]float64) + for allTimeRows.Next() { + var rawModel string + var count float64 + if err := allTimeRows.Scan(&rawModel, &count); err != nil { + return err + } + model := normalizeModelName(rawModel) + source := sourceFromModelName(model) + allTimeBySource[source] += count + } + if err := allTimeRows.Err(); err != nil { + return err + } + + for source, count := range allTimeBySource { + if count <= 0 { + continue + } + sourceKey := sanitizeMetricPart(source) + setValueMetric(snap, "source_"+sourceKey+"_requests", count, "requests", "all-time") + } + + todayRows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) + FROM messages + WHERE model_name IS NOT NULL AND trim(model_name) != '' + AND date(created_at) = date('now', 'localtime') + GROUP BY model_name`) + if err == nil { + defer todayRows.Close() + todayBySource := make(map[string]float64) + for todayRows.Next() { + var rawModel string + var count float64 + if err := todayRows.Scan(&rawModel, &count); err != nil { + return err + } + model := normalizeModelName(rawModel) + source := sourceFromModelName(model) + todayBySource[source] += count + } + if err := todayRows.Err(); err != nil { + return err + } + + for source, count := range todayBySource { + if count <= 0 { + continue + } + sourceKey := sanitizeMetricPart(source) + setValueMetric(snap, "source_"+sourceKey+"_requests_today", count, "requests", "today") + } + } + + perDayRows, err := db.QueryContext(ctx, `SELECT date(created_at), model_name, COUNT(*) + FROM messages + WHERE model_name IS NOT NULL AND trim(model_name) != '' + GROUP BY date(created_at), model_name`) + if err != nil { + return nil + } + defer perDayRows.Close() + + perSourceDaily := make(map[string]map[string]float64) + for perDayRows.Next() { + var day string + var rawModel string + var count float64 + if err := perDayRows.Scan(&day, &rawModel, &count); err != nil { + return err + } + day = strings.TrimSpace(day) + if day == "" { + continue + } + model := normalizeModelName(rawModel) + source := sourceFromModelName(model) + sourceKey := sanitizeMetricPart(source) + if perSourceDaily[sourceKey] == nil { + perSourceDaily[sourceKey] = make(map[string]float64) + } + perSourceDaily[sourceKey][day] += count + } + if err := perDayRows.Err(); err != nil { + return err + } + + for sourceKey, byDay := range perSourceDaily { + if len(byDay) == 0 { + continue + } + snap.DailySeries["usage_source_"+sourceKey] = core.SortedTimePoints(byDay) + } + + return nil +} + +func populateToolUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { + hasFunctionName, err := tableHasColumn(ctx, db, "tool_calls", "function_name") + if err != nil || !hasFunctionName { + return nil + } + + rows, err := db.QueryContext(ctx, `SELECT function_name, COUNT(*) + FROM tool_calls + WHERE trim(function_name) != '' + GROUP BY function_name + ORDER BY COUNT(*) DESC`) + if err != nil { + return err + } + defer rows.Close() + + var top []string + for rows.Next() { + var toolName string + var count float64 + if err := rows.Scan(&toolName, &count); err != nil { + return err + } + toolName = strings.TrimSpace(toolName) + if toolName == "" { + continue + } + + setValueMetric(snap, "tool_"+sanitizeMetricPart(toolName), count, "calls", "all-time") + if len(top) < 6 { + top = append(top, fmt.Sprintf("%s=%.0f", toolName, count)) + } + } + if err := rows.Err(); err != nil { + return err + } + if len(top) > 0 { + snap.Raw["tool_usage"] = strings.Join(top, ", ") + } + + perDayRows, err := db.QueryContext(ctx, `SELECT date(m.created_at), tc.function_name, COUNT(*) + FROM tool_calls tc + JOIN messages m ON tc.message_id = m.id + WHERE trim(tc.function_name) != '' + GROUP BY date(m.created_at), tc.function_name`) + if err != nil { + return nil + } + defer perDayRows.Close() + + perToolDaily := make(map[string]map[string]float64) + for perDayRows.Next() { + var day string + var toolName string + var count float64 + if err := perDayRows.Scan(&day, &toolName, &count); err != nil { + return err + } + day = strings.TrimSpace(day) + toolKey := sanitizeMetricPart(toolName) + if day == "" || toolKey == "" { + continue + } + if perToolDaily[toolKey] == nil { + perToolDaily[toolKey] = make(map[string]float64) + } + perToolDaily[toolKey][day] += count + } + if err := perDayRows.Err(); err != nil { + return err + } + + for toolKey, byDay := range perToolDaily { + if len(byDay) == 0 { + continue + } + snap.DailySeries["usage_tool_"+toolKey] = core.SortedTimePoints(byDay) + } + + return nil +} + +func sourceFromModelName(model string) string { + normalized := normalizeModelName(model) + if normalized == "" { + return "unknown" + } + if strings.HasSuffix(normalized, ":cloud") || strings.Contains(normalized, "-cloud") { + return "cloud" + } + return "local" +} + +func populateDailySeriesFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { + dailyQueries := []struct { + key string + query string + }{ + {"messages", `SELECT date(created_at), COUNT(*) FROM messages GROUP BY date(created_at)`}, + {"sessions", `SELECT date(created_at), COUNT(*) FROM chats GROUP BY date(created_at)`}, + {"tool_calls", `SELECT date(m.created_at), COUNT(*) + FROM tool_calls tc + JOIN messages m ON tc.message_id = m.id + GROUP BY date(m.created_at)`}, + {"requests_user", `SELECT date(created_at), COUNT(*) FROM messages WHERE role = 'user' GROUP BY date(created_at)`}, + } + + for _, dq := range dailyQueries { + rows, err := db.QueryContext(ctx, dq.query) + if err != nil { + continue + } + + byDate := make(map[string]float64) + for rows.Next() { + var date string + var count float64 + if err := rows.Scan(&date, &count); err != nil { + rows.Close() + return err + } + if strings.TrimSpace(date) == "" { + continue + } + byDate[date] = count + } + rows.Close() + if len(byDate) > 0 { + points := core.SortedTimePoints(byDate) + snap.DailySeries[dq.key] = points + if dq.key == "requests_user" { + if _, exists := snap.DailySeries["requests"]; !exists { + snap.DailySeries["requests"] = points + } + } + } + } + + return nil +} diff --git a/internal/providers/ollama/desktop_db_settings.go b/internal/providers/ollama/desktop_db_settings.go new file mode 100644 index 0000000..a300807 --- /dev/null +++ b/internal/providers/ollama/desktop_db_settings.go @@ -0,0 +1,178 @@ +package ollama + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func queryCount(ctx context.Context, db *sql.DB, query string) (int64, error) { + var count int64 + if err := db.QueryRowContext(ctx, query).Scan(&count); err != nil { + return 0, err + } + return count, nil +} + +func tableHasColumn(ctx context.Context, db *sql.DB, table, column string) (bool, error) { + table = strings.TrimSpace(table) + column = strings.TrimSpace(column) + if table == "" || column == "" { + return false, nil + } + safeTable := strings.ReplaceAll(table, "'", "''") + query := fmt.Sprintf(`SELECT COUNT(*) FROM pragma_table_info('%s') WHERE name = ?`, safeTable) + var count int + if err := db.QueryRowContext(ctx, query, column).Scan(&count); err != nil { + return false, err + } + return count > 0, nil +} + +func populateThinkingMetricsFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { + hasStart, _ := tableHasColumn(ctx, db, "messages", "thinking_time_start") + hasEnd, _ := tableHasColumn(ctx, db, "messages", "thinking_time_end") + if !hasStart || !hasEnd { + return nil + } + + rows, err := db.QueryContext(ctx, ` + SELECT model_name, + COUNT(*) as think_count, + SUM(CAST((julianday(thinking_time_end) - julianday(thinking_time_start)) * 86400 AS REAL)) as total_think_seconds, + AVG(CAST((julianday(thinking_time_end) - julianday(thinking_time_start)) * 86400 AS REAL)) as avg_think_seconds + FROM messages + WHERE thinking_time_start IS NOT NULL AND thinking_time_end IS NOT NULL + AND thinking_time_start != '' AND thinking_time_end != '' + GROUP BY model_name`) + if err != nil { + return err + } + defer rows.Close() + + var totalThinkRequests int64 + var totalThinkSeconds float64 + var totalAvgCount int + + for rows.Next() { + var rawModel sql.NullString + var thinkCount int64 + var totalSec sql.NullFloat64 + var avgSec sql.NullFloat64 + + if err := rows.Scan(&rawModel, &thinkCount, &totalSec, &avgSec); err != nil { + return err + } + + totalThinkRequests += thinkCount + if totalSec.Valid { + totalThinkSeconds += totalSec.Float64 + } + totalAvgCount++ + + if rawModel.Valid && strings.TrimSpace(rawModel.String) != "" { + model := normalizeModelName(rawModel.String) + if model != "" { + prefix := "model_" + sanitizeMetricPart(model) + if totalSec.Valid { + setValueMetric(snap, prefix+"_thinking_seconds", totalSec.Float64, "seconds", "all-time") + } + } + } + } + if err := rows.Err(); err != nil { + return err + } + + if totalThinkRequests > 0 { + setValueMetric(snap, "thinking_requests", float64(totalThinkRequests), "requests", "all-time") + setValueMetric(snap, "total_thinking_seconds", totalThinkSeconds, "seconds", "all-time") + if totalAvgCount > 0 { + setValueMetric(snap, "avg_thinking_seconds", totalThinkSeconds/float64(totalThinkRequests), "seconds", "all-time") + } + } + + return nil +} + +func populateSettingsFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { + var selectedModel sql.NullString + var contextLength sql.NullInt64 + err := db.QueryRowContext(ctx, `SELECT selected_model, context_length FROM settings LIMIT 1`).Scan(&selectedModel, &contextLength) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil + } + return err + } + + if selectedModel.Valid && strings.TrimSpace(selectedModel.String) != "" { + snap.SetAttribute("selected_model", selectedModel.String) + } + if contextLength.Valid && contextLength.Int64 > 0 { + setValueMetric(snap, "configured_context_length", float64(contextLength.Int64), "tokens", "current") + } + + type settingsCol struct { + column string + attr string + } + extraCols := []settingsCol{ + {"websearch_enabled", "websearch_enabled"}, + {"turbo_enabled", "turbo_enabled"}, + {"agent", "agent_mode"}, + {"tools", "tools_enabled"}, + {"think_enabled", "think_enabled"}, + {"airplane_mode", "airplane_mode"}, + {"device_id", "device_id"}, + } + for _, col := range extraCols { + has, _ := tableHasColumn(ctx, db, "settings", col.column) + if !has { + continue + } + var val sql.NullString + query := fmt.Sprintf(`SELECT CAST(%s AS TEXT) FROM settings LIMIT 1`, col.column) + if err := db.QueryRowContext(ctx, query).Scan(&val); err != nil { + continue + } + if val.Valid && strings.TrimSpace(val.String) != "" { + snap.SetAttribute(col.attr, val.String) + } + } + + return nil +} + +func populateCachedUserFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { + var name sql.NullString + var email sql.NullString + var plan sql.NullString + var cachedAt sql.NullString + + err := db.QueryRowContext(ctx, `SELECT name, email, plan, cached_at FROM users ORDER BY cached_at DESC LIMIT 1`).Scan(&name, &email, &plan, &cachedAt) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil + } + return err + } + + if name.Valid && strings.TrimSpace(name.String) != "" { + snap.SetAttribute("account_name", name.String) + } + if email.Valid && strings.TrimSpace(email.String) != "" { + snap.SetAttribute("account_email", email.String) + } + if plan.Valid && strings.TrimSpace(plan.String) != "" { + snap.SetAttribute("plan_name", plan.String) + } + if cachedAt.Valid && strings.TrimSpace(cachedAt.String) != "" { + snap.SetAttribute("account_cached_at", cachedAt.String) + } + return nil +} diff --git a/internal/providers/ollama/desktop_db_tokens.go b/internal/providers/ollama/desktop_db_tokens.go new file mode 100644 index 0000000..e18d89d --- /dev/null +++ b/internal/providers/ollama/desktop_db_tokens.go @@ -0,0 +1,303 @@ +package ollama + +import ( + "context" + "database/sql" + "fmt" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func populateEstimatedTokenUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot, now time.Time) error { + hasThinking, err := tableHasColumn(ctx, db, "messages", "thinking") + if err != nil { + return err + } + + thinkingExpr := `''` + if hasThinking { + thinkingExpr = `COALESCE(thinking, '')` + } + + query := fmt.Sprintf(`SELECT chat_id, id, role, model_name, COALESCE(content, ''), %s, COALESCE(created_at, '') + FROM messages + ORDER BY chat_id, datetime(created_at), id`, thinkingExpr) + rows, err := db.QueryContext(ctx, query) + if err != nil { + return err + } + defer rows.Close() + + type tokenAgg struct { + input float64 + output float64 + requests float64 + } + ensureAgg := func(m map[string]*tokenAgg, key string) *tokenAgg { + if m[key] == nil { + m[key] = &tokenAgg{} + } + return m[key] + } + ensureDaily := func(m map[string]map[string]float64, key string) map[string]float64 { + if m[key] == nil { + m[key] = make(map[string]float64) + } + return m[key] + } + + modelAgg := make(map[string]*tokenAgg) + sourceAgg := make(map[string]*tokenAgg) + dailyTokens := make(map[string]float64) + dailyRequests := make(map[string]float64) + modelDailyTokens := make(map[string]map[string]float64) + sourceDailyTokens := make(map[string]map[string]float64) + sourceDailyRequests := make(map[string]map[string]float64) + sessionsBySource := make(map[string]float64) + + now = now.In(time.Local) + start5h := now.Add(-5 * time.Hour) + start1d := now.Add(-24 * time.Hour) + start7d := now.Add(-7 * 24 * time.Hour) + + var tokens5h float64 + var tokens1d float64 + var tokens7d float64 + var tokensToday float64 + + currentChat := "" + pendingInputChars := 0 + chatSources := make(map[string]bool) + flushChat := func() { + for source := range chatSources { + sessionsBySource[source]++ + } + clear(chatSources) + pendingInputChars = 0 + } + + for rows.Next() { + var chatID string + var id int64 + var role sql.NullString + var modelName sql.NullString + var content sql.NullString + var thinking sql.NullString + var createdAt sql.NullString + + if err := rows.Scan(&chatID, &id, &role, &modelName, &content, &thinking, &createdAt); err != nil { + return err + } + + if currentChat == "" { + currentChat = chatID + } + if chatID != currentChat { + flushChat() + currentChat = chatID + } + + roleVal := strings.ToLower(strings.TrimSpace(role.String)) + contentLen := len(content.String) + thinkingLen := len(thinking.String) + + ts := time.Time{} + if createdAt.Valid && strings.TrimSpace(createdAt.String) != "" { + if parsed, ok := parseDesktopDBTime(createdAt.String); ok { + ts = parsed.In(time.Local) + } + } + day := "" + if !ts.IsZero() { + day = ts.Format("2006-01-02") + } else if createdAt.Valid && len(createdAt.String) >= 10 { + day = createdAt.String[:10] + } + + if roleVal == "user" { + pendingInputChars += contentLen + thinkingLen + continue + } + if roleVal != "assistant" { + continue + } + + model := strings.TrimSpace(modelName.String) + model = normalizeModelName(model) + if model == "" { + continue + } + modelKey := sanitizeMetricPart(model) + source := sourceFromModelName(model) + sourceKey := sanitizeMetricPart(source) + + inputTokens := estimateTokensFromChars(pendingInputChars) + outputTokens := estimateTokensFromChars(contentLen + thinkingLen) + totalTokens := inputTokens + outputTokens + pendingInputChars = 0 + + modelTotals := ensureAgg(modelAgg, model) + modelTotals.input += inputTokens + modelTotals.output += outputTokens + modelTotals.requests++ + + sourceTotals := ensureAgg(sourceAgg, sourceKey) + sourceTotals.input += inputTokens + sourceTotals.output += outputTokens + sourceTotals.requests++ + chatSources[sourceKey] = true + + if day != "" { + dailyTokens[day] += totalTokens + dailyRequests[day]++ + ensureDaily(modelDailyTokens, modelKey)[day] += totalTokens + ensureDaily(sourceDailyTokens, sourceKey)[day] += totalTokens + ensureDaily(sourceDailyRequests, sourceKey)[day]++ + if day == now.Format("2006-01-02") { + tokensToday += totalTokens + } + } + + if !ts.IsZero() { + if ts.After(start5h) { + tokens5h += totalTokens + } + if ts.After(start1d) { + tokens1d += totalTokens + } + if ts.After(start7d) { + tokens7d += totalTokens + } + } + } + if err := rows.Err(); err != nil { + return err + } + if currentChat != "" { + flushChat() + } + + type modelTotal struct { + name string + tok float64 + } + var topModels []modelTotal + for model, totals := range modelAgg { + modelKey := sanitizeMetricPart(model) + setValueMetric(snap, "model_"+modelKey+"_input_tokens", totals.input, "tokens", "all-time") + setValueMetric(snap, "model_"+modelKey+"_output_tokens", totals.output, "tokens", "all-time") + setValueMetric(snap, "model_"+modelKey+"_total_tokens", totals.input+totals.output, "tokens", "all-time") + + rec := core.ModelUsageRecord{ + RawModelID: model, + RawSource: "sqlite_estimate", + Window: "all-time", + InputTokens: core.Float64Ptr(totals.input), + OutputTokens: core.Float64Ptr(totals.output), + TotalTokens: core.Float64Ptr(totals.input + totals.output), + Requests: core.Float64Ptr(totals.requests), + } + rec.SetDimension("provider", "ollama") + rec.SetDimension("estimation", "chars_div_4") + snap.AppendModelUsage(rec) + + topModels = append(topModels, modelTotal{name: model, tok: totals.input + totals.output}) + } + sort.Slice(topModels, func(i, j int) bool { + if topModels[i].tok == topModels[j].tok { + return topModels[i].name < topModels[j].name + } + return topModels[i].tok > topModels[j].tok + }) + if len(topModels) > 0 { + top := make([]string, 0, min(len(topModels), 6)) + for i := 0; i < len(topModels) && i < 6; i++ { + top = append(top, fmt.Sprintf("%s=%.0f", topModels[i].name, topModels[i].tok)) + } + snap.Raw["model_tokens_estimated_top"] = strings.Join(top, ", ") + } + + for sourceKey, totals := range sourceAgg { + totalTokens := totals.input + totals.output + setValueMetric(snap, "client_"+sourceKey+"_input_tokens", totals.input, "tokens", "all-time") + setValueMetric(snap, "client_"+sourceKey+"_output_tokens", totals.output, "tokens", "all-time") + setValueMetric(snap, "client_"+sourceKey+"_total_tokens", totalTokens, "tokens", "all-time") + setValueMetric(snap, "client_"+sourceKey+"_requests", totals.requests, "requests", "all-time") + if sessions := sessionsBySource[sourceKey]; sessions > 0 { + setValueMetric(snap, "client_"+sourceKey+"_sessions", sessions, "sessions", "all-time") + } + + setValueMetric(snap, "provider_"+sourceKey+"_input_tokens", totals.input, "tokens", "all-time") + setValueMetric(snap, "provider_"+sourceKey+"_output_tokens", totals.output, "tokens", "all-time") + setValueMetric(snap, "provider_"+sourceKey+"_requests", totals.requests, "requests", "all-time") + } + + for sourceKey, byDay := range sourceDailyTokens { + if len(byDay) == 0 { + continue + } + snap.DailySeries["tokens_client_"+sourceKey] = core.SortedTimePoints(byDay) + } + for sourceKey, byDay := range sourceDailyRequests { + if len(byDay) == 0 { + continue + } + snap.DailySeries["usage_client_"+sourceKey] = core.SortedTimePoints(byDay) + } + for modelKey, byDay := range modelDailyTokens { + if len(byDay) == 0 { + continue + } + snap.DailySeries["tokens_model_"+modelKey] = core.SortedTimePoints(byDay) + } + if len(dailyTokens) > 0 { + snap.DailySeries["analytics_tokens"] = core.SortedTimePoints(dailyTokens) + } + if len(dailyRequests) > 0 { + snap.DailySeries["analytics_requests"] = core.SortedTimePoints(dailyRequests) + } + + if tokensToday > 0 { + setValueMetric(snap, "tokens_today", tokensToday, "tokens", "today") + } + if tokens5h > 0 { + setValueMetric(snap, "tokens_5h", tokens5h, "tokens", "5h") + } + if tokens1d > 0 { + setValueMetric(snap, "tokens_1d", tokens1d, "tokens", "1d") + } + if tokens7d > 0 { + setValueMetric(snap, "7d_tokens", tokens7d, "tokens", "7d") + } + + snap.SetAttribute("token_estimation", "chars_div_4") + return nil +} + +func estimateTokensFromChars(chars int) float64 { + if chars <= 0 { + return 0 + } + return float64((chars + 3) / 4) +} + +func parseDesktopDBTime(raw string) (time.Time, bool) { + raw = strings.TrimSpace(raw) + if raw == "" { + return time.Time{}, false + } + for _, layout := range []string{ + "2006-01-02 15:04:05.999999999", + "2006-01-02 15:04:05", + "2006-01-02T15:04:05.999999999", + "2006-01-02T15:04:05", + } { + if ts, err := time.ParseInLocation(layout, raw, time.Local); err == nil { + return ts, true + } + } + return parseAnyTime(raw) +} diff --git a/internal/providers/ollama/local_api.go b/internal/providers/ollama/local_api.go new file mode 100644 index 0000000..b94913c --- /dev/null +++ b/internal/providers/ollama/local_api.go @@ -0,0 +1,349 @@ +package ollama + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/parsers" +) + +func (p *Provider) fetchLocalAPI(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { + var hasData bool + + statusOK, err := p.fetchLocalStatus(ctx, baseURL, snap) + if err != nil { + return false, err + } + hasData = hasData || statusOK + + versionOK, err := p.fetchLocalVersion(ctx, baseURL, snap) + if err != nil { + return false, err + } + hasData = hasData || versionOK + + meOK, err := p.fetchLocalMe(ctx, baseURL, snap) + if err != nil { + return hasData, err + } + hasData = hasData || meOK + + models, tagsOK, err := p.fetchLocalTags(ctx, baseURL, snap) + if err != nil { + return hasData, err + } + hasData = hasData || tagsOK + + if len(models) > 0 { + if err := p.fetchModelDetails(ctx, baseURL, models, snap); err != nil { + snap.SetDiagnostic("model_details_error", err.Error()) + } + } + + psOK, err := p.fetchLocalPS(ctx, baseURL, snap) + if err != nil { + return hasData, err + } + hasData = hasData || psOK + + return hasData, nil +} + +func (p *Provider) fetchLocalVersion(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { + var resp versionResponse + code, headers, err := doJSONRequest(ctx, http.MethodGet, baseURL+"/api/version", "", &resp, p.Client()) + if err != nil { + return false, fmt.Errorf("ollama: local version request failed: %w", err) + } + for k, v := range parsers.RedactHeaders(headers) { + if strings.EqualFold(k, "X-Request-Id") || strings.EqualFold(k, "X-Build-Time") || strings.EqualFold(k, "X-Build-Commit") { + snap.Raw["local_version_"+normalizeHeaderKey(k)] = v + } + } + if code != http.StatusOK { + return false, fmt.Errorf("ollama: local version endpoint returned HTTP %d", code) + } + if resp.Version != "" { + snap.SetAttribute("cli_version", resp.Version) + return true, nil + } + return false, nil +} + +func (p *Provider) fetchLocalStatus(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { + var resp map[string]any + code, _, err := doJSONRequest(ctx, http.MethodGet, baseURL+"/api/status", "", &resp, p.Client()) + if err != nil { + return false, nil + } + if code == http.StatusNotFound || code == http.StatusMethodNotAllowed { + return false, nil + } + if code != http.StatusOK { + return false, nil + } + + cloud := anyMapCaseInsensitive(resp, "cloud") + if len(cloud) == 0 { + return false, nil + } + + var hasData bool + if disabled, ok := anyBoolCaseInsensitive(cloud, "disabled"); ok { + snap.SetAttribute("cloud_disabled", strconv.FormatBool(disabled)) + hasData = true + } + if source := anyStringCaseInsensitive(cloud, "source"); source != "" { + snap.SetAttribute("cloud_source", source) + hasData = true + } + return hasData, nil +} + +func (p *Provider) fetchLocalMe(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { + var resp map[string]any + code, _, err := doJSONRequest(ctx, http.MethodPost, baseURL+"/api/me", "", &resp, p.Client()) + if err != nil { + return false, nil + } + + switch code { + case http.StatusOK: + return applyCloudUserPayload(resp, snap, p.now()), nil + case http.StatusUnauthorized, http.StatusForbidden: + if signinURL := anyStringCaseInsensitive(resp, "signin_url", "sign_in_url"); signinURL != "" { + snap.SetAttribute("signin_url", signinURL) + return true, nil + } + return false, nil + case http.StatusNotFound, http.StatusMethodNotAllowed: + return false, nil + default: + snap.SetDiagnostic("local_me_status", fmt.Sprintf("HTTP %d", code)) + return false, nil + } +} + +func (p *Provider) fetchLocalTags(ctx context.Context, baseURL string, snap *core.UsageSnapshot) ([]tagModel, bool, error) { + var resp tagsResponse + code, headers, err := doJSONRequest(ctx, http.MethodGet, baseURL+"/api/tags", "", &resp, p.Client()) + if err != nil { + return nil, false, fmt.Errorf("ollama: local tags request failed: %w", err) + } + for k, v := range parsers.RedactHeaders(headers) { + if strings.EqualFold(k, "X-Request-Id") { + snap.Raw["local_tags_"+normalizeHeaderKey(k)] = v + } + } + if code != http.StatusOK { + return nil, false, fmt.Errorf("ollama: local tags endpoint returned HTTP %d", code) + } + + totalModels := float64(len(resp.Models)) + setValueMetric(snap, "models_total", totalModels, "models", "current") + + var localCount, cloudCount int + var localBytes, cloudBytes int64 + for _, model := range resp.Models { + if isCloudModel(model) { + cloudCount++ + if model.Size > 0 { + cloudBytes += model.Size + } + continue + } + + localCount++ + if model.Size > 0 { + localBytes += model.Size + } + } + + setValueMetric(snap, "models_local", float64(localCount), "models", "current") + setValueMetric(snap, "models_cloud", float64(cloudCount), "models", "current") + setValueMetric(snap, "model_storage_bytes", float64(localBytes), "bytes", "current") + setValueMetric(snap, "cloud_model_stub_bytes", float64(cloudBytes), "bytes", "current") + + if len(resp.Models) > 0 { + snap.Raw["models_top"] = summarizeModels(resp.Models, 6) + } + + return resp.Models, true, nil +} + +func (p *Provider) fetchLocalPS(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { + var resp processResponse + code, _, err := doJSONRequest(ctx, http.MethodGet, baseURL+"/api/ps", "", &resp, p.Client()) + if err != nil { + return false, fmt.Errorf("ollama: local process list request failed: %w", err) + } + if code != http.StatusOK { + return false, fmt.Errorf("ollama: local process list endpoint returned HTTP %d", code) + } + + setValueMetric(snap, "loaded_models", float64(len(resp.Models)), "models", "current") + + var loadedBytes int64 + var loadedVRAM int64 + maxContext := 0 + for _, m := range resp.Models { + loadedBytes += m.Size + loadedVRAM += m.SizeVRAM + if m.ContextLength > maxContext { + maxContext = m.ContextLength + } + } + + setValueMetric(snap, "loaded_model_bytes", float64(loadedBytes), "bytes", "current") + setValueMetric(snap, "loaded_vram_bytes", float64(loadedVRAM), "bytes", "current") + if maxContext > 0 { + setValueMetric(snap, "context_window", float64(maxContext), "tokens", "current") + } + + if len(resp.Models) > 0 { + loadedNames := make([]string, 0, len(resp.Models)) + for _, m := range resp.Models { + name := normalizeModelName(m.Name) + if name == "" { + continue + } + loadedNames = append(loadedNames, name) + } + if len(loadedNames) > 0 { + snap.Raw["loaded_models"] = strings.Join(loadedNames, ", ") + } + } + + return true, nil +} + +func (p *Provider) fetchModelDetails(ctx context.Context, baseURL string, models []tagModel, snap *core.UsageSnapshot) error { + var toolsCount, visionCount, thinkingCount int + var maxCtx int64 + var totalParams float64 + + for _, model := range models { + name := normalizeModelName(model.Name) + if name == "" { + continue + } + + var show showResponse + code, err := doJSONPostRequest(ctx, baseURL+"/api/show", map[string]string{"name": model.Name}, &show, p.Client()) + if err != nil || code != http.StatusOK { + continue + } + + prefix := "model_" + sanitizeMetricPart(name) + + capSet := make(map[string]bool, len(show.Capabilities)) + for _, cap := range show.Capabilities { + capSet[strings.TrimSpace(strings.ToLower(cap))] = true + } + if capSet["tools"] { + toolsCount++ + snap.SetAttribute(prefix+"_capability_tools", "true") + } + if capSet["vision"] { + visionCount++ + snap.SetAttribute(prefix+"_capability_vision", "true") + } + if capSet["thinking"] { + thinkingCount++ + snap.SetAttribute(prefix+"_capability_thinking", "true") + } + + if show.Details.QuantizationLevel != "" { + snap.SetAttribute(prefix+"_quantization", show.Details.QuantizationLevel) + } + + if ctxVal, ok := extractContextLength(show.ModelInfo); ok && ctxVal > 0 { + setValueMetric(snap, prefix+"_context_length", float64(ctxVal), "tokens", "current") + if ctxVal > maxCtx { + maxCtx = ctxVal + } + } + + if ps := parseParameterSize(show.Details.ParameterSize); ps > 0 { + totalParams += ps + } + + rec := core.ModelUsageRecord{ + RawModelID: name, + RawSource: "api_show", + Window: "current", + } + rec.SetDimension("provider", "ollama") + if capSet["tools"] { + rec.SetDimension("capability_tools", "true") + } + if capSet["vision"] { + rec.SetDimension("capability_vision", "true") + } + if capSet["thinking"] { + rec.SetDimension("capability_thinking", "true") + } + snap.AppendModelUsage(rec) + } + + setValueMetric(snap, "models_with_tools", float64(toolsCount), "models", "current") + setValueMetric(snap, "models_with_vision", float64(visionCount), "models", "current") + setValueMetric(snap, "models_with_thinking", float64(thinkingCount), "models", "current") + if maxCtx > 0 { + setValueMetric(snap, "max_context_length", float64(maxCtx), "tokens", "current") + } + if totalParams > 0 { + setValueMetric(snap, "total_parameters", totalParams, "params", "current") + } + + return nil +} + +func extractContextLength(modelInfo map[string]any) (int64, bool) { + if len(modelInfo) == 0 { + return 0, false + } + for k, v := range modelInfo { + if !strings.HasSuffix(strings.ToLower(k), ".context_length") { + continue + } + switch val := v.(type) { + case float64: + return int64(val), true + case int64: + return val, true + case json.Number: + n, err := val.Int64() + if err == nil { + return n, true + } + } + } + return 0, false +} + +func parseParameterSize(s string) float64 { + s = strings.TrimSpace(strings.ToUpper(s)) + if s == "" { + return 0 + } + multiplier := 1.0 + if strings.HasSuffix(s, "B") { + s = strings.TrimSuffix(s, "B") + multiplier = 1e9 + } + if strings.HasSuffix(s, "M") { + s = strings.TrimSuffix(s, "M") + multiplier = 1e6 + } + val, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0 + } + return val * multiplier +} diff --git a/internal/providers/ollama/ollama.go b/internal/providers/ollama/ollama.go index ae147ae..7d67903 100644 --- a/internal/providers/ollama/ollama.go +++ b/internal/providers/ollama/ollama.go @@ -2,15 +2,10 @@ package ollama import ( "context" - "database/sql" "encoding/json" - "errors" "fmt" - "net/http" - "net/url" "os" "regexp" - "sort" "strconv" "strings" "time" @@ -18,7 +13,6 @@ import ( _ "github.com/mattn/go-sqlite3" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/janekbaraniewski/openusage/internal/parsers" "github.com/janekbaraniewski/openusage/internal/providers/providerbase" "github.com/janekbaraniewski/openusage/internal/providers/shared" ) @@ -194,509 +188,6 @@ func buildStatusMessage(snap core.UsageSnapshot) string { return strings.Join(parts, ", ") } -func (p *Provider) fetchLocalAPI(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { - var hasData bool - - statusOK, err := p.fetchLocalStatus(ctx, baseURL, snap) - if err != nil { - return false, err - } - hasData = hasData || statusOK - - versionOK, err := p.fetchLocalVersion(ctx, baseURL, snap) - if err != nil { - return false, err - } - hasData = hasData || versionOK - - meOK, err := p.fetchLocalMe(ctx, baseURL, snap) - if err != nil { - return hasData, err - } - hasData = hasData || meOK - - models, tagsOK, err := p.fetchLocalTags(ctx, baseURL, snap) - if err != nil { - return hasData, err - } - hasData = hasData || tagsOK - - if len(models) > 0 { - if err := p.fetchModelDetails(ctx, baseURL, models, snap); err != nil { - snap.SetDiagnostic("model_details_error", err.Error()) - } - } - - psOK, err := p.fetchLocalPS(ctx, baseURL, snap) - if err != nil { - return hasData, err - } - hasData = hasData || psOK - - return hasData, nil -} - -func (p *Provider) fetchLocalVersion(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { - var resp versionResponse - code, headers, err := doJSONRequest(ctx, http.MethodGet, baseURL+"/api/version", "", &resp, p.Client()) - if err != nil { - return false, fmt.Errorf("ollama: local version request failed: %w", err) - } - for k, v := range parsers.RedactHeaders(headers) { - if strings.EqualFold(k, "X-Request-Id") || strings.EqualFold(k, "X-Build-Time") || strings.EqualFold(k, "X-Build-Commit") { - snap.Raw["local_version_"+normalizeHeaderKey(k)] = v - } - } - if code != http.StatusOK { - return false, fmt.Errorf("ollama: local version endpoint returned HTTP %d", code) - } - if resp.Version != "" { - snap.SetAttribute("cli_version", resp.Version) - return true, nil - } - return false, nil -} - -func (p *Provider) fetchLocalStatus(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { - var resp map[string]any - code, _, err := doJSONRequest(ctx, http.MethodGet, baseURL+"/api/status", "", &resp, p.Client()) - if err != nil { - return false, nil - } - if code == http.StatusNotFound || code == http.StatusMethodNotAllowed { - return false, nil - } - if code != http.StatusOK { - return false, nil - } - - cloud := anyMapCaseInsensitive(resp, "cloud") - if len(cloud) == 0 { - return false, nil - } - - var hasData bool - if disabled, ok := anyBoolCaseInsensitive(cloud, "disabled"); ok { - snap.SetAttribute("cloud_disabled", strconv.FormatBool(disabled)) - hasData = true - } - if source := anyStringCaseInsensitive(cloud, "source"); source != "" { - snap.SetAttribute("cloud_source", source) - hasData = true - } - return hasData, nil -} - -func (p *Provider) fetchLocalMe(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { - var resp map[string]any - code, _, err := doJSONRequest(ctx, http.MethodPost, baseURL+"/api/me", "", &resp, p.Client()) - if err != nil { - return false, nil - } - - switch code { - case http.StatusOK: - return applyCloudUserPayload(resp, snap, p.now()), nil - case http.StatusUnauthorized, http.StatusForbidden: - if signinURL := anyStringCaseInsensitive(resp, "signin_url", "sign_in_url"); signinURL != "" { - snap.SetAttribute("signin_url", signinURL) - return true, nil - } - return false, nil - case http.StatusNotFound, http.StatusMethodNotAllowed: - return false, nil - default: - snap.SetDiagnostic("local_me_status", fmt.Sprintf("HTTP %d", code)) - return false, nil - } -} - -func (p *Provider) fetchLocalTags(ctx context.Context, baseURL string, snap *core.UsageSnapshot) ([]tagModel, bool, error) { - var resp tagsResponse - code, headers, err := doJSONRequest(ctx, http.MethodGet, baseURL+"/api/tags", "", &resp, p.Client()) - if err != nil { - return nil, false, fmt.Errorf("ollama: local tags request failed: %w", err) - } - for k, v := range parsers.RedactHeaders(headers) { - if strings.EqualFold(k, "X-Request-Id") { - snap.Raw["local_tags_"+normalizeHeaderKey(k)] = v - } - } - if code != http.StatusOK { - return nil, false, fmt.Errorf("ollama: local tags endpoint returned HTTP %d", code) - } - - totalModels := float64(len(resp.Models)) - setValueMetric(snap, "models_total", totalModels, "models", "current") - - var localCount, cloudCount int - var localBytes, cloudBytes int64 - for _, model := range resp.Models { - if isCloudModel(model) { - cloudCount++ - if model.Size > 0 { - cloudBytes += model.Size - } - continue - } - - localCount++ - if model.Size > 0 { - localBytes += model.Size - } - } - - setValueMetric(snap, "models_local", float64(localCount), "models", "current") - setValueMetric(snap, "models_cloud", float64(cloudCount), "models", "current") - setValueMetric(snap, "model_storage_bytes", float64(localBytes), "bytes", "current") - setValueMetric(snap, "cloud_model_stub_bytes", float64(cloudBytes), "bytes", "current") - - if len(resp.Models) > 0 { - snap.Raw["models_top"] = summarizeModels(resp.Models, 6) - } - - return resp.Models, true, nil -} - -func (p *Provider) fetchLocalPS(ctx context.Context, baseURL string, snap *core.UsageSnapshot) (bool, error) { - var resp processResponse - code, _, err := doJSONRequest(ctx, http.MethodGet, baseURL+"/api/ps", "", &resp, p.Client()) - if err != nil { - return false, fmt.Errorf("ollama: local process list request failed: %w", err) - } - if code != http.StatusOK { - return false, fmt.Errorf("ollama: local process list endpoint returned HTTP %d", code) - } - - setValueMetric(snap, "loaded_models", float64(len(resp.Models)), "models", "current") - - var loadedBytes int64 - var loadedVRAM int64 - maxContext := 0 - for _, m := range resp.Models { - loadedBytes += m.Size - loadedVRAM += m.SizeVRAM - if m.ContextLength > maxContext { - maxContext = m.ContextLength - } - } - - setValueMetric(snap, "loaded_model_bytes", float64(loadedBytes), "bytes", "current") - setValueMetric(snap, "loaded_vram_bytes", float64(loadedVRAM), "bytes", "current") - if maxContext > 0 { - setValueMetric(snap, "context_window", float64(maxContext), "tokens", "current") - } - - if len(resp.Models) > 0 { - loadedNames := make([]string, 0, len(resp.Models)) - for _, m := range resp.Models { - name := normalizeModelName(m.Name) - if name == "" { - continue - } - loadedNames = append(loadedNames, name) - } - if len(loadedNames) > 0 { - snap.Raw["loaded_models"] = strings.Join(loadedNames, ", ") - } - } - - return true, nil -} - -func (p *Provider) fetchModelDetails(ctx context.Context, baseURL string, models []tagModel, snap *core.UsageSnapshot) error { - var toolsCount, visionCount, thinkingCount int - var maxCtx int64 - var totalParams float64 - - for _, model := range models { - name := normalizeModelName(model.Name) - if name == "" { - continue - } - - var show showResponse - code, err := doJSONPostRequest(ctx, baseURL+"/api/show", map[string]string{"name": model.Name}, &show, p.Client()) - if err != nil || code != http.StatusOK { - continue - } - - prefix := "model_" + sanitizeMetricPart(name) - - capSet := make(map[string]bool, len(show.Capabilities)) - for _, cap := range show.Capabilities { - capSet[strings.TrimSpace(strings.ToLower(cap))] = true - } - if capSet["tools"] { - toolsCount++ - snap.SetAttribute(prefix+"_capability_tools", "true") - } - if capSet["vision"] { - visionCount++ - snap.SetAttribute(prefix+"_capability_vision", "true") - } - if capSet["thinking"] { - thinkingCount++ - snap.SetAttribute(prefix+"_capability_thinking", "true") - } - - if show.Details.QuantizationLevel != "" { - snap.SetAttribute(prefix+"_quantization", show.Details.QuantizationLevel) - } - - // Extract context length from model_info. - if ctxVal, ok := extractContextLength(show.ModelInfo); ok && ctxVal > 0 { - setValueMetric(snap, prefix+"_context_length", float64(ctxVal), "tokens", "current") - if ctxVal > maxCtx { - maxCtx = ctxVal - } - } - - // Parse parameter size for aggregation. - if ps := parseParameterSize(show.Details.ParameterSize); ps > 0 { - totalParams += ps - } - - // Add model usage record with capability dimensions. - rec := core.ModelUsageRecord{ - RawModelID: name, - RawSource: "api_show", - Window: "current", - } - rec.SetDimension("provider", "ollama") - if capSet["tools"] { - rec.SetDimension("capability_tools", "true") - } - if capSet["vision"] { - rec.SetDimension("capability_vision", "true") - } - if capSet["thinking"] { - rec.SetDimension("capability_thinking", "true") - } - snap.AppendModelUsage(rec) - } - - setValueMetric(snap, "models_with_tools", float64(toolsCount), "models", "current") - setValueMetric(snap, "models_with_vision", float64(visionCount), "models", "current") - setValueMetric(snap, "models_with_thinking", float64(thinkingCount), "models", "current") - if maxCtx > 0 { - setValueMetric(snap, "max_context_length", float64(maxCtx), "tokens", "current") - } - if totalParams > 0 { - setValueMetric(snap, "total_parameters", totalParams, "params", "current") - } - - return nil -} - -func extractContextLength(modelInfo map[string]any) (int64, bool) { - if len(modelInfo) == 0 { - return 0, false - } - for k, v := range modelInfo { - if !strings.HasSuffix(strings.ToLower(k), ".context_length") { - continue - } - switch val := v.(type) { - case float64: - return int64(val), true - case int64: - return val, true - case json.Number: - n, err := val.Int64() - if err == nil { - return n, true - } - } - } - return 0, false -} - -func parseParameterSize(s string) float64 { - s = strings.TrimSpace(strings.ToUpper(s)) - if s == "" { - return 0 - } - multiplier := 1.0 - if strings.HasSuffix(s, "B") { - s = strings.TrimSuffix(s, "B") - multiplier = 1e9 - } - if strings.HasSuffix(s, "M") { - s = strings.TrimSuffix(s, "M") - multiplier = 1e6 - } - val, err := strconv.ParseFloat(s, 64) - if err != nil { - return 0 - } - return val * multiplier -} - -func (p *Provider) fetchDesktopDB(ctx context.Context, acct core.AccountConfig, snap *core.UsageSnapshot) (bool, error) { - dbPath := resolveDesktopDBPath(acct) - if dbPath == "" || !fileExists(dbPath) { - return false, nil - } - - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return false, fmt.Errorf("ollama: opening desktop db: %w", err) - } - defer db.Close() - - if err := db.PingContext(ctx); err != nil { - return false, fmt.Errorf("ollama: pinging desktop db: %w", err) - } - - snap.Raw["desktop_db_path"] = dbPath - - setCountMetric := func(key string, count int64, unit, window string) { - setValueMetric(snap, key, float64(count), unit, window) - } - - totalChats, err := queryCount(ctx, db, `SELECT COUNT(*) FROM chats`) - if err == nil { - setCountMetric("total_conversations", totalChats, "chats", "all-time") - } - - totalMessages, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages`) - if err == nil { - setCountMetric("total_messages", totalMessages, "messages", "all-time") - } - - totalUserMessages, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'user'`) - if err == nil { - setCountMetric("total_user_messages", totalUserMessages, "messages", "all-time") - } - - totalAssistantMessages, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'assistant'`) - if err == nil { - setCountMetric("total_assistant_messages", totalAssistantMessages, "messages", "all-time") - } - - totalToolCalls, err := queryCount(ctx, db, `SELECT COUNT(*) FROM tool_calls`) - if err == nil { - setCountMetric("total_tool_calls", totalToolCalls, "calls", "all-time") - } - - totalAttachments, err := queryCount(ctx, db, `SELECT COUNT(*) FROM attachments`) - if err == nil { - setCountMetric("total_attachments", totalAttachments, "attachments", "all-time") - } - - sessionsToday, err := queryCount(ctx, db, `SELECT COUNT(*) FROM chats WHERE date(created_at, 'localtime') = date('now', 'localtime')`) - if err == nil { - setCountMetric("sessions_today", sessionsToday, "sessions", "today") - } - - messagesToday, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE date(created_at, 'localtime') = date('now', 'localtime')`) - if err == nil { - setCountMetric("messages_today", messagesToday, "messages", "today") - } - - userMessagesToday, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'user' AND date(created_at, 'localtime') = date('now', 'localtime')`) - if err == nil { - setCountMetric("requests_today", userMessagesToday, "requests", "today") - } - - sessions5h, err := queryCount(ctx, db, `SELECT COUNT(*) FROM chats WHERE datetime(created_at) >= datetime('now', '-5 hours')`) - if err == nil { - setCountMetric("sessions_5h", sessions5h, "sessions", "5h") - } - - sessions1d, err := queryCount(ctx, db, `SELECT COUNT(*) FROM chats WHERE datetime(created_at) >= datetime('now', '-24 hours')`) - if err == nil { - setCountMetric("sessions_1d", sessions1d, "sessions", "1d") - } - - messages5h, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE datetime(created_at) >= datetime('now', '-5 hours')`) - if err == nil { - setCountMetric("messages_5h", messages5h, "messages", "5h") - } - - messages1d, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE datetime(created_at) >= datetime('now', '-24 hours')`) - if err == nil { - setCountMetric("messages_1d", messages1d, "messages", "1d") - } - - requests5h, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'user' AND datetime(created_at) >= datetime('now', '-5 hours')`) - if err == nil { - setCountMetric("requests_5h", requests5h, "requests", "5h") - } - - requests1d, err := queryCount(ctx, db, `SELECT COUNT(*) FROM messages WHERE role = 'user' AND datetime(created_at) >= datetime('now', '-24 hours')`) - if err == nil { - setCountMetric("requests_1d", requests1d, "requests", "1d") - } - - toolCallsToday, err := queryCount(ctx, db, `SELECT COUNT(*) - FROM tool_calls tc - JOIN messages m ON tc.message_id = m.id - WHERE date(m.created_at, 'localtime') = date('now', 'localtime')`) - if err == nil { - setCountMetric("tool_calls_today", toolCallsToday, "calls", "today") - } - - toolCalls5h, err := queryCount(ctx, db, `SELECT COUNT(*) - FROM tool_calls tc - JOIN messages m ON tc.message_id = m.id - WHERE datetime(m.created_at) >= datetime('now', '-5 hours')`) - if err == nil { - setCountMetric("tool_calls_5h", toolCalls5h, "calls", "5h") - } - - toolCalls1d, err := queryCount(ctx, db, `SELECT COUNT(*) - FROM tool_calls tc - JOIN messages m ON tc.message_id = m.id - WHERE datetime(m.created_at) >= datetime('now', '-24 hours')`) - if err == nil { - setCountMetric("tool_calls_1d", toolCalls1d, "calls", "1d") - } - - attachmentsToday, err := queryCount(ctx, db, `SELECT COUNT(*) - FROM attachments a - JOIN messages m ON a.message_id = m.id - WHERE date(m.created_at, 'localtime') = date('now', 'localtime')`) - if err == nil { - setCountMetric("attachments_today", attachmentsToday, "attachments", "today") - } - - if err := populateModelUsageFromDB(ctx, db, snap); err != nil { - snap.SetDiagnostic("desktop_model_usage_error", err.Error()) - } - - if err := populateEstimatedTokenUsageFromDB(ctx, db, snap, p.now()); err != nil { - snap.SetDiagnostic("desktop_token_estimate_error", err.Error()) - } - - if err := populateSourceUsageFromDB(ctx, db, snap); err != nil { - snap.SetDiagnostic("desktop_source_usage_error", err.Error()) - } - - if err := populateToolUsageFromDB(ctx, db, snap); err != nil { - snap.SetDiagnostic("desktop_tool_usage_error", err.Error()) - } - - if err := populateDailySeriesFromDB(ctx, db, snap); err != nil { - snap.SetDiagnostic("desktop_daily_series_error", err.Error()) - } - - if err := populateThinkingMetricsFromDB(ctx, db, snap); err != nil { - snap.SetDiagnostic("desktop_thinking_error", err.Error()) - } - - if err := populateSettingsFromDB(ctx, db, snap); err != nil { - snap.SetDiagnostic("desktop_settings_error", err.Error()) - } - - if err := populateCachedUserFromDB(ctx, db, snap); err != nil { - snap.SetDiagnostic("desktop_user_error", err.Error()) - } - - return true, nil -} - func (p *Provider) fetchServerLogs(acct core.AccountConfig, snap *core.UsageSnapshot) (bool, error) { logFiles := resolveServerLogFiles(acct) if len(logFiles) == 0 { @@ -843,1130 +334,6 @@ func (p *Provider) fetchServerConfig(acct core.AccountConfig, snap *core.UsageSn return nil } -func (p *Provider) fetchCloudAPI(ctx context.Context, acct core.AccountConfig, apiKey string, snap *core.UsageSnapshot) (hasData, authFailed, limited bool, err error) { - cloudBaseURL := resolveCloudBaseURL(acct) - - var me map[string]any - status, headers, reqErr := doJSONRequest(ctx, http.MethodPost, cloudEndpointURL(cloudBaseURL, "/api/me"), apiKey, &me, p.Client()) - if reqErr != nil { - return false, false, false, fmt.Errorf("ollama: cloud account request failed: %w", reqErr) - } - - for k, v := range parsers.RedactHeaders(headers, "authorization") { - if strings.EqualFold(k, "X-Request-Id") { - snap.Raw["cloud_me_"+normalizeHeaderKey(k)] = v - } - } - - switch status { - case http.StatusOK: - snap.SetAttribute("auth_type", "api_key") - if applyCloudUserPayload(me, snap, p.now()) { - hasData = true - } - case http.StatusUnauthorized, http.StatusForbidden: - authFailed = true - case http.StatusTooManyRequests: - limited = true - default: - snap.SetDiagnostic("cloud_me_status", fmt.Sprintf("HTTP %d", status)) - } - - var tags tagsResponse - tagsStatus, _, tagsErr := doJSONRequest(ctx, http.MethodGet, cloudEndpointURL(cloudBaseURL, "/api/tags"), apiKey, &tags, p.Client()) - if tagsErr != nil { - if !hasData { - return hasData, authFailed, limited, fmt.Errorf("ollama: cloud tags request failed: %w", tagsErr) - } - snap.SetDiagnostic("cloud_tags_error", tagsErr.Error()) - return hasData, authFailed, limited, nil - } - - switch tagsStatus { - case http.StatusOK: - setValueMetric(snap, "cloud_catalog_models", float64(len(tags.Models)), "models", "current") - hasData = true - case http.StatusUnauthorized, http.StatusForbidden: - authFailed = true - case http.StatusTooManyRequests: - limited = true - default: - snap.SetDiagnostic("cloud_tags_status", fmt.Sprintf("HTTP %d", tagsStatus)) - } - - if _, ok := snap.Metrics["usage_five_hour"]; !ok { - if parsed, parseErr := fetchCloudUsageFromSettingsPage(ctx, cloudBaseURL, apiKey, acct, snap, p.Client()); parseErr != nil { - snap.SetDiagnostic("cloud_usage_settings_error", parseErr.Error()) - } else if parsed { - hasData = true - } - } - - return hasData, authFailed, limited, nil -} - -func applyCloudUserPayload(payload map[string]any, snap *core.UsageSnapshot, now time.Time) bool { - if len(payload) == 0 { - return false - } - - var hasData bool - - if id := anyStringCaseInsensitive(payload, "id", "ID"); id != "" { - snap.SetAttribute("account_id", id) - hasData = true - } - if email := anyStringCaseInsensitive(payload, "email", "Email"); email != "" { - snap.SetAttribute("account_email", email) - hasData = true - } - if name := anyStringCaseInsensitive(payload, "name", "Name"); name != "" { - snap.SetAttribute("account_name", name) - hasData = true - } - if plan := anyStringCaseInsensitive(payload, "plan", "Plan"); plan != "" { - snap.SetAttribute("plan_name", plan) - hasData = true - } - - if customerID := anyNullStringCaseInsensitive(payload, "customerid", "customer_id", "CustomerID"); customerID != "" { - snap.SetAttribute("customer_id", customerID) - } - if subscriptionID := anyNullStringCaseInsensitive(payload, "subscriptionid", "subscription_id", "SubscriptionID"); subscriptionID != "" { - snap.SetAttribute("subscription_id", subscriptionID) - } - if workOSUserID := anyNullStringCaseInsensitive(payload, "workosuserid", "workos_user_id", "WorkOSUserID"); workOSUserID != "" { - snap.SetAttribute("workos_user_id", workOSUserID) - } - - if billingStart, ok := anyNullTimeCaseInsensitive(payload, "subscriptionperiodstart", "subscription_period_start", "SubscriptionPeriodStart"); ok { - snap.SetAttribute("billing_cycle_start", billingStart.Format(time.RFC3339)) - } - if billingEnd, ok := anyNullTimeCaseInsensitive(payload, "subscriptionperiodend", "subscription_period_end", "SubscriptionPeriodEnd"); ok { - snap.SetAttribute("billing_cycle_end", billingEnd.Format(time.RFC3339)) - } - - if extractCloudUsageWindows(payload, snap, now) { - hasData = true - } - - return hasData -} - -func extractCloudUsageWindows(payload map[string]any, snap *core.UsageSnapshot, now time.Time) bool { - var found bool - - sessionKeys := []string{ - "session_usage", "sessionusage", "usage_5h", "usagefivehour", "five_hour_usage", "fivehourusage", - } - if metric, resetAt, ok := findUsageWindow(payload, sessionKeys, "5h", now); ok { - snap.Metrics["usage_five_hour"] = metric - if !resetAt.IsZero() { - snap.Resets["usage_five_hour"] = resetAt - snap.SetAttribute("block_end", resetAt.Format(time.RFC3339)) - if metric.Window == "5h" { - start := resetAt.Add(-5 * time.Hour) - snap.SetAttribute("block_start", start.Format(time.RFC3339)) - } - } - found = true - } - - dayKeys := []string{ - "weekly_usage", "weeklyusage", "usage_1d", "usageoneday", "one_day_usage", "daily_usage", "dailyusage", - } - if metric, resetAt, ok := findUsageWindow(payload, dayKeys, "1d", now); ok { - snap.Metrics["usage_weekly"] = core.Metric{ - Limit: metric.Limit, - Remaining: metric.Remaining, - Used: metric.Used, - Unit: metric.Unit, - Window: "1w", - } - // Backward-compatible alias for existing widgets/config. - snap.Metrics["usage_one_day"] = metric - if !resetAt.IsZero() { - snap.Resets["usage_weekly"] = resetAt - snap.Resets["usage_one_day"] = resetAt - } - found = true - } - - return found -} - -func findUsageWindow(payload map[string]any, keys []string, fallbackWindow string, now time.Time) (core.Metric, time.Time, bool) { - sources := []map[string]any{ - payload, - anyMapCaseInsensitive(payload, "usage"), - anyMapCaseInsensitive(payload, "cloud_usage"), - anyMapCaseInsensitive(payload, "quota"), - } - - for _, src := range sources { - if len(src) == 0 { - continue - } - for _, key := range keys { - v, ok := anyValueCaseInsensitive(src, key) - if !ok { - continue - } - if metric, resetAt, ok := parseUsageWindowValue(v, fallbackWindow, now); ok { - return metric, resetAt, true - } - } - } - - return core.Metric{}, time.Time{}, false -} - -func parseUsageWindowValue(v any, fallbackWindow string, now time.Time) (core.Metric, time.Time, bool) { - if pct, ok := anyFloat(v); ok { - return core.Metric{ - Used: core.Float64Ptr(pct), - Unit: "%", - Window: fallbackWindow, - }, time.Time{}, true - } - - switch raw := v.(type) { - case string: - s := strings.TrimSpace(strings.TrimSuffix(raw, "%")) - if f, err := strconv.ParseFloat(s, 64); err == nil { - return core.Metric{ - Used: core.Float64Ptr(f), - Unit: "%", - Window: fallbackWindow, - }, time.Time{}, true - } - case map[string]any: - var metric core.Metric - metric.Window = fallbackWindow - metric.Unit = anyStringCaseInsensitive(raw, "unit") - if metric.Unit == "" { - metric.Unit = "%" - } - - if window := anyStringCaseInsensitive(raw, "window"); window != "" { - metric.Window = strings.TrimSpace(window) - } - - if used, ok := anyFloatCaseInsensitive(raw, "used", "usage", "value"); ok { - metric.Used = core.Float64Ptr(used) - } - if limit, ok := anyFloatCaseInsensitive(raw, "limit", "max"); ok { - metric.Limit = core.Float64Ptr(limit) - } - if remaining, ok := anyFloatCaseInsensitive(raw, "remaining", "left"); ok { - metric.Remaining = core.Float64Ptr(remaining) - } - if pct, ok := anyFloatCaseInsensitive(raw, "percent", "pct", "used_percent", "usage_percent"); ok { - metric.Unit = "%" - metric.Used = core.Float64Ptr(pct) - metric.Limit = nil - metric.Remaining = nil - } - - var resetAt time.Time - if resetRaw := anyStringCaseInsensitive(raw, "reset_at", "resets_at", "reset_time", "reset"); resetRaw != "" { - if t, ok := parseAnyTime(resetRaw); ok { - resetAt = t - } - } - if resetAt.IsZero() { - if seconds, ok := anyFloatCaseInsensitive(raw, "reset_in", "reset_in_seconds", "resets_in", "seconds_to_reset"); ok && seconds > 0 { - resetAt = now.Add(time.Duration(seconds * float64(time.Second))) - } - } - - if metric.Used != nil || metric.Limit != nil || metric.Remaining != nil { - return metric, resetAt, true - } - } - - return core.Metric{}, time.Time{}, false -} - -func finalizeUsageWindows(snap *core.UsageSnapshot, now time.Time) { - now = now.In(time.Local) - blockStart, blockEnd := currentFiveHourBlock(now) - - // Keep usage windows strictly real-data-driven. - // If usage_five_hour exists but reset is missing, infer the current 5h block boundary. - if _, ok := snap.Metrics["usage_five_hour"]; ok { - if _, ok := snap.Resets["usage_five_hour"]; !ok { - snap.Resets["usage_five_hour"] = blockEnd - } - if _, ok := snap.Attributes["block_start"]; !ok { - snap.SetAttribute("block_start", blockStart.Format(time.RFC3339)) - } - if _, ok := snap.Attributes["block_end"]; !ok { - snap.SetAttribute("block_end", blockEnd.Format(time.RFC3339)) - } - } - - // Ensure percentage metrics have Limit=100 and Remaining for proper gauge rendering. - hundred := 100.0 - for _, key := range []string{"usage_five_hour", "usage_weekly", "usage_one_day"} { - if m, ok := snap.Metrics[key]; ok && m.Unit == "%" && m.Limit == nil { - m.Limit = core.Float64Ptr(hundred) - if m.Used != nil && m.Remaining == nil { - rem := hundred - *m.Used - m.Remaining = core.Float64Ptr(rem) - } - snap.Metrics[key] = m - } - } -} - -func currentFiveHourBlock(now time.Time) (time.Time, time.Time) { - startHour := (now.Hour() / 5) * 5 - start := time.Date(now.Year(), now.Month(), now.Day(), startHour, 0, 0, 0, now.Location()) - end := start.Add(5 * time.Hour) - return start, end -} - -func resolveCloudBaseURL(acct core.AccountConfig) string { - normalize := func(raw string) string { - raw = strings.TrimSpace(strings.TrimRight(raw, "/")) - if raw == "" { - return "" - } - u, err := url.Parse(raw) - if err != nil { - return raw - } - switch strings.TrimSpace(strings.ToLower(u.Path)) { - case "", "/": - u.Path = "" - case "/api", "/api/v1": - u.Path = "" - } - u.RawQuery = "" - u.Fragment = "" - return strings.TrimRight(u.String(), "/") - } - - if acct.ExtraData != nil { - if v := strings.TrimSpace(acct.ExtraData["cloud_base_url"]); v != "" { - return normalize(v) - } - } - if strings.HasPrefix(strings.ToLower(acct.BaseURL), "https://") && strings.Contains(strings.ToLower(acct.BaseURL), "ollama.com") { - return normalize(acct.BaseURL) - } - return normalize(defaultCloudBaseURL) -} - -func queryCount(ctx context.Context, db *sql.DB, query string) (int64, error) { - var count int64 - if err := db.QueryRowContext(ctx, query).Scan(&count); err != nil { - return 0, err - } - return count, nil -} - -func tableHasColumn(ctx context.Context, db *sql.DB, table, column string) (bool, error) { - table = strings.TrimSpace(table) - column = strings.TrimSpace(column) - if table == "" || column == "" { - return false, nil - } - safeTable := strings.ReplaceAll(table, "'", "''") - query := fmt.Sprintf(`SELECT COUNT(*) FROM pragma_table_info('%s') WHERE name = ?`, safeTable) - var count int - if err := db.QueryRowContext(ctx, query, column).Scan(&count); err != nil { - return false, err - } - return count > 0, nil -} - -func populateThinkingMetricsFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { - hasStart, _ := tableHasColumn(ctx, db, "messages", "thinking_time_start") - hasEnd, _ := tableHasColumn(ctx, db, "messages", "thinking_time_end") - if !hasStart || !hasEnd { - return nil - } - - rows, err := db.QueryContext(ctx, ` - SELECT model_name, - COUNT(*) as think_count, - SUM(CAST((julianday(thinking_time_end) - julianday(thinking_time_start)) * 86400 AS REAL)) as total_think_seconds, - AVG(CAST((julianday(thinking_time_end) - julianday(thinking_time_start)) * 86400 AS REAL)) as avg_think_seconds - FROM messages - WHERE thinking_time_start IS NOT NULL AND thinking_time_end IS NOT NULL - AND thinking_time_start != '' AND thinking_time_end != '' - GROUP BY model_name`) - if err != nil { - return err - } - defer rows.Close() - - var totalThinkRequests int64 - var totalThinkSeconds float64 - var totalAvgCount int - - for rows.Next() { - var rawModel sql.NullString - var thinkCount int64 - var totalSec sql.NullFloat64 - var avgSec sql.NullFloat64 - - if err := rows.Scan(&rawModel, &thinkCount, &totalSec, &avgSec); err != nil { - return err - } - - totalThinkRequests += thinkCount - if totalSec.Valid { - totalThinkSeconds += totalSec.Float64 - } - totalAvgCount++ - - if rawModel.Valid && strings.TrimSpace(rawModel.String) != "" { - model := normalizeModelName(rawModel.String) - if model != "" { - prefix := "model_" + sanitizeMetricPart(model) - if totalSec.Valid { - setValueMetric(snap, prefix+"_thinking_seconds", totalSec.Float64, "seconds", "all-time") - } - } - } - } - if err := rows.Err(); err != nil { - return err - } - - if totalThinkRequests > 0 { - setValueMetric(snap, "thinking_requests", float64(totalThinkRequests), "requests", "all-time") - setValueMetric(snap, "total_thinking_seconds", totalThinkSeconds, "seconds", "all-time") - if totalAvgCount > 0 { - setValueMetric(snap, "avg_thinking_seconds", totalThinkSeconds/float64(totalThinkRequests), "seconds", "all-time") - } - } - - return nil -} - -func populateSettingsFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { - var selectedModel sql.NullString - var contextLength sql.NullInt64 - err := db.QueryRowContext(ctx, `SELECT selected_model, context_length FROM settings LIMIT 1`).Scan(&selectedModel, &contextLength) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil - } - return err - } - - if selectedModel.Valid && strings.TrimSpace(selectedModel.String) != "" { - snap.SetAttribute("selected_model", selectedModel.String) - } - if contextLength.Valid && contextLength.Int64 > 0 { - setValueMetric(snap, "configured_context_length", float64(contextLength.Int64), "tokens", "current") - } - - // Read additional settings columns if they exist. - type settingsCol struct { - column string - attr string - } - extraCols := []settingsCol{ - {"websearch_enabled", "websearch_enabled"}, - {"turbo_enabled", "turbo_enabled"}, - {"agent", "agent_mode"}, - {"tools", "tools_enabled"}, - {"think_enabled", "think_enabled"}, - {"airplane_mode", "airplane_mode"}, - {"device_id", "device_id"}, - } - for _, col := range extraCols { - has, _ := tableHasColumn(ctx, db, "settings", col.column) - if !has { - continue - } - var val sql.NullString - query := fmt.Sprintf(`SELECT CAST(%s AS TEXT) FROM settings LIMIT 1`, col.column) - if err := db.QueryRowContext(ctx, query).Scan(&val); err != nil { - continue - } - if val.Valid && strings.TrimSpace(val.String) != "" { - snap.SetAttribute(col.attr, val.String) - } - } - - return nil -} - -func populateCachedUserFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { - var name sql.NullString - var email sql.NullString - var plan sql.NullString - var cachedAt sql.NullString - - err := db.QueryRowContext(ctx, `SELECT name, email, plan, cached_at FROM users ORDER BY cached_at DESC LIMIT 1`).Scan(&name, &email, &plan, &cachedAt) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil - } - return err - } - - if name.Valid && strings.TrimSpace(name.String) != "" { - snap.SetAttribute("account_name", name.String) - } - if email.Valid && strings.TrimSpace(email.String) != "" { - snap.SetAttribute("account_email", email.String) - } - if plan.Valid && strings.TrimSpace(plan.String) != "" { - snap.SetAttribute("plan_name", plan.String) - } - if cachedAt.Valid && strings.TrimSpace(cachedAt.String) != "" { - snap.SetAttribute("account_cached_at", cachedAt.String) - } - return nil -} - -func populateModelUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { - rows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) FROM messages WHERE model_name IS NOT NULL AND trim(model_name) != '' GROUP BY model_name ORDER BY COUNT(*) DESC`) - if err != nil { - return err - } - defer rows.Close() - - var top []string - for rows.Next() { - var rawModel string - var count float64 - if err := rows.Scan(&rawModel, &count); err != nil { - return err - } - model := normalizeModelName(rawModel) - if model == "" { - continue - } - - metricKey := "model_" + sanitizeMetricPart(model) + "_requests" - setValueMetric(snap, metricKey, count, "requests", "all-time") - - rec := core.ModelUsageRecord{ - RawModelID: model, - RawSource: "sqlite", - Window: "all-time", - Requests: core.Float64Ptr(count), - } - rec.SetDimension("provider", "ollama") - snap.AppendModelUsage(rec) - - if len(top) < 6 { - top = append(top, fmt.Sprintf("%s=%.0f", model, count)) - } - } - if err := rows.Err(); err != nil { - return err - } - - if len(top) > 0 { - snap.Raw["models_usage_top"] = strings.Join(top, ", ") - } - - todayRows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) - FROM messages - WHERE model_name IS NOT NULL AND trim(model_name) != '' - AND date(created_at, 'localtime') = date('now', 'localtime') - GROUP BY model_name`) - if err == nil { - defer todayRows.Close() - for todayRows.Next() { - var rawModel string - var count float64 - if err := todayRows.Scan(&rawModel, &count); err != nil { - return err - } - model := normalizeModelName(rawModel) - if model == "" { - continue - } - - metricKey := "model_" + sanitizeMetricPart(model) + "_requests_today" - setValueMetric(snap, metricKey, count, "requests", "today") - - rec := core.ModelUsageRecord{ - RawModelID: model, - RawSource: "sqlite", - Window: "today", - Requests: core.Float64Ptr(count), - } - rec.SetDimension("provider", "ollama") - snap.AppendModelUsage(rec) - } - if err := todayRows.Err(); err != nil { - return err - } - } - - perDayRows, err := db.QueryContext(ctx, `SELECT date(created_at), model_name, COUNT(*) - FROM messages - WHERE model_name IS NOT NULL AND trim(model_name) != '' - GROUP BY date(created_at), model_name`) - if err != nil { - return nil - } - defer perDayRows.Close() - - perModelDaily := make(map[string]map[string]float64) - for perDayRows.Next() { - var date string - var rawModel string - var count float64 - if err := perDayRows.Scan(&date, &rawModel, &count); err != nil { - return err - } - model := normalizeModelName(rawModel) - date = strings.TrimSpace(date) - if model == "" || date == "" { - continue - } - if perModelDaily[model] == nil { - perModelDaily[model] = make(map[string]float64) - } - perModelDaily[model][date] = count - } - if err := perDayRows.Err(); err != nil { - return err - } - - for model, byDate := range perModelDaily { - seriesKey := "requests_model_" + sanitizeMetricPart(model) - snap.DailySeries[seriesKey] = core.SortedTimePoints(byDate) - usageSeriesKey := "usage_model_" + sanitizeMetricPart(model) - snap.DailySeries[usageSeriesKey] = core.SortedTimePoints(byDate) - } - - return nil -} - -func populateEstimatedTokenUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot, now time.Time) error { - hasThinking, err := tableHasColumn(ctx, db, "messages", "thinking") - if err != nil { - return err - } - - thinkingExpr := `''` - if hasThinking { - thinkingExpr = `COALESCE(thinking, '')` - } - - query := fmt.Sprintf(`SELECT chat_id, id, role, model_name, COALESCE(content, ''), %s, COALESCE(created_at, '') - FROM messages - ORDER BY chat_id, datetime(created_at), id`, thinkingExpr) - rows, err := db.QueryContext(ctx, query) - if err != nil { - return err - } - defer rows.Close() - - type tokenAgg struct { - input float64 - output float64 - requests float64 - } - ensureAgg := func(m map[string]*tokenAgg, key string) *tokenAgg { - if m[key] == nil { - m[key] = &tokenAgg{} - } - return m[key] - } - ensureDaily := func(m map[string]map[string]float64, key string) map[string]float64 { - if m[key] == nil { - m[key] = make(map[string]float64) - } - return m[key] - } - - modelAgg := make(map[string]*tokenAgg) - sourceAgg := make(map[string]*tokenAgg) - dailyTokens := make(map[string]float64) - dailyRequests := make(map[string]float64) - modelDailyTokens := make(map[string]map[string]float64) - sourceDailyTokens := make(map[string]map[string]float64) - sourceDailyRequests := make(map[string]map[string]float64) - sessionsBySource := make(map[string]float64) - - now = now.In(time.Local) - start5h := now.Add(-5 * time.Hour) - start1d := now.Add(-24 * time.Hour) - start7d := now.Add(-7 * 24 * time.Hour) - - var tokens5h float64 - var tokens1d float64 - var tokens7d float64 - var tokensToday float64 - - currentChat := "" - pendingInputChars := 0 - chatSources := make(map[string]bool) - flushChat := func() { - for source := range chatSources { - sessionsBySource[source]++ - } - clear(chatSources) - pendingInputChars = 0 - } - - for rows.Next() { - var chatID string - var id int64 - var role sql.NullString - var modelName sql.NullString - var content sql.NullString - var thinking sql.NullString - var createdAt sql.NullString - - if err := rows.Scan(&chatID, &id, &role, &modelName, &content, &thinking, &createdAt); err != nil { - return err - } - - if currentChat == "" { - currentChat = chatID - } - if chatID != currentChat { - flushChat() - currentChat = chatID - } - - roleVal := strings.ToLower(strings.TrimSpace(role.String)) - contentLen := len(content.String) - thinkingLen := len(thinking.String) - - ts := time.Time{} - if createdAt.Valid && strings.TrimSpace(createdAt.String) != "" { - if parsed, ok := parseAnyTime(createdAt.String); ok { - ts = parsed.In(time.Local) - } - } - day := "" - if !ts.IsZero() { - day = ts.Format("2006-01-02") - } else if createdAt.Valid && len(createdAt.String) >= 10 { - day = createdAt.String[:10] - } - - if roleVal == "user" { - pendingInputChars += contentLen + thinkingLen - continue - } - if roleVal != "assistant" { - continue - } - - model := strings.TrimSpace(modelName.String) - model = normalizeModelName(model) - if model == "" { - continue - } - modelKey := sanitizeMetricPart(model) - source := sourceFromModelName(model) - sourceKey := sanitizeMetricPart(source) - - inputTokens := estimateTokensFromChars(pendingInputChars) - outputTokens := estimateTokensFromChars(contentLen + thinkingLen) - totalTokens := inputTokens + outputTokens - pendingInputChars = 0 - - modelTotals := ensureAgg(modelAgg, model) - modelTotals.input += inputTokens - modelTotals.output += outputTokens - modelTotals.requests++ - - sourceTotals := ensureAgg(sourceAgg, sourceKey) - sourceTotals.input += inputTokens - sourceTotals.output += outputTokens - sourceTotals.requests++ - chatSources[sourceKey] = true - - if day != "" { - dailyTokens[day] += totalTokens - dailyRequests[day]++ - ensureDaily(modelDailyTokens, modelKey)[day] += totalTokens - ensureDaily(sourceDailyTokens, sourceKey)[day] += totalTokens - ensureDaily(sourceDailyRequests, sourceKey)[day]++ - if day == now.Format("2006-01-02") { - tokensToday += totalTokens - } - } - - if !ts.IsZero() { - if ts.After(start5h) { - tokens5h += totalTokens - } - if ts.After(start1d) { - tokens1d += totalTokens - } - if ts.After(start7d) { - tokens7d += totalTokens - } - } - } - if err := rows.Err(); err != nil { - return err - } - if currentChat != "" { - flushChat() - } - - type modelTotal struct { - name string - tok float64 - } - var topModels []modelTotal - for model, totals := range modelAgg { - modelKey := sanitizeMetricPart(model) - setValueMetric(snap, "model_"+modelKey+"_input_tokens", totals.input, "tokens", "all-time") - setValueMetric(snap, "model_"+modelKey+"_output_tokens", totals.output, "tokens", "all-time") - setValueMetric(snap, "model_"+modelKey+"_total_tokens", totals.input+totals.output, "tokens", "all-time") - - rec := core.ModelUsageRecord{ - RawModelID: model, - RawSource: "sqlite_estimate", - Window: "all-time", - InputTokens: core.Float64Ptr(totals.input), - OutputTokens: core.Float64Ptr(totals.output), - TotalTokens: core.Float64Ptr(totals.input + totals.output), - Requests: core.Float64Ptr(totals.requests), - } - rec.SetDimension("provider", "ollama") - rec.SetDimension("estimation", "chars_div_4") - snap.AppendModelUsage(rec) - - topModels = append(topModels, modelTotal{name: model, tok: totals.input + totals.output}) - } - sort.Slice(topModels, func(i, j int) bool { - if topModels[i].tok == topModels[j].tok { - return topModels[i].name < topModels[j].name - } - return topModels[i].tok > topModels[j].tok - }) - if len(topModels) > 0 { - top := make([]string, 0, min(len(topModels), 6)) - for i := 0; i < len(topModels) && i < 6; i++ { - top = append(top, fmt.Sprintf("%s=%.0f", topModels[i].name, topModels[i].tok)) - } - snap.Raw["model_tokens_estimated_top"] = strings.Join(top, ", ") - } - - for sourceKey, totals := range sourceAgg { - totalTokens := totals.input + totals.output - setValueMetric(snap, "client_"+sourceKey+"_input_tokens", totals.input, "tokens", "all-time") - setValueMetric(snap, "client_"+sourceKey+"_output_tokens", totals.output, "tokens", "all-time") - setValueMetric(snap, "client_"+sourceKey+"_total_tokens", totalTokens, "tokens", "all-time") - setValueMetric(snap, "client_"+sourceKey+"_requests", totals.requests, "requests", "all-time") - if sessions := sessionsBySource[sourceKey]; sessions > 0 { - setValueMetric(snap, "client_"+sourceKey+"_sessions", sessions, "sessions", "all-time") - } - - setValueMetric(snap, "provider_"+sourceKey+"_input_tokens", totals.input, "tokens", "all-time") - setValueMetric(snap, "provider_"+sourceKey+"_output_tokens", totals.output, "tokens", "all-time") - setValueMetric(snap, "provider_"+sourceKey+"_requests", totals.requests, "requests", "all-time") - } - - for sourceKey, byDay := range sourceDailyTokens { - if len(byDay) == 0 { - continue - } - snap.DailySeries["tokens_client_"+sourceKey] = core.SortedTimePoints(byDay) - } - for sourceKey, byDay := range sourceDailyRequests { - if len(byDay) == 0 { - continue - } - snap.DailySeries["usage_client_"+sourceKey] = core.SortedTimePoints(byDay) - } - for modelKey, byDay := range modelDailyTokens { - if len(byDay) == 0 { - continue - } - snap.DailySeries["tokens_model_"+modelKey] = core.SortedTimePoints(byDay) - } - if len(dailyTokens) > 0 { - snap.DailySeries["analytics_tokens"] = core.SortedTimePoints(dailyTokens) - } - if len(dailyRequests) > 0 { - snap.DailySeries["analytics_requests"] = core.SortedTimePoints(dailyRequests) - } - - if tokensToday > 0 { - setValueMetric(snap, "tokens_today", tokensToday, "tokens", "today") - } - if tokens5h > 0 { - setValueMetric(snap, "tokens_5h", tokens5h, "tokens", "5h") - } - if tokens1d > 0 { - setValueMetric(snap, "tokens_1d", tokens1d, "tokens", "1d") - } - if tokens7d > 0 { - setValueMetric(snap, "7d_tokens", tokens7d, "tokens", "7d") - } - - snap.SetAttribute("token_estimation", "chars_div_4") - return nil -} - -func estimateTokensFromChars(chars int) float64 { - if chars <= 0 { - return 0 - } - return float64((chars + 3) / 4) -} - -func populateSourceUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { - allTimeRows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) - FROM messages - WHERE model_name IS NOT NULL AND trim(model_name) != '' - GROUP BY model_name`) - if err != nil { - return err - } - defer allTimeRows.Close() - - allTimeBySource := make(map[string]float64) - for allTimeRows.Next() { - var rawModel string - var count float64 - if err := allTimeRows.Scan(&rawModel, &count); err != nil { - return err - } - model := normalizeModelName(rawModel) - source := sourceFromModelName(model) - allTimeBySource[source] += count - } - if err := allTimeRows.Err(); err != nil { - return err - } - - for source, count := range allTimeBySource { - if count <= 0 { - continue - } - sourceKey := sanitizeMetricPart(source) - setValueMetric(snap, "source_"+sourceKey+"_requests", count, "requests", "all-time") - } - - todayRows, err := db.QueryContext(ctx, `SELECT model_name, COUNT(*) - FROM messages - WHERE model_name IS NOT NULL AND trim(model_name) != '' - AND date(created_at, 'localtime') = date('now', 'localtime') - GROUP BY model_name`) - if err == nil { - defer todayRows.Close() - todayBySource := make(map[string]float64) - for todayRows.Next() { - var rawModel string - var count float64 - if err := todayRows.Scan(&rawModel, &count); err != nil { - return err - } - model := normalizeModelName(rawModel) - source := sourceFromModelName(model) - todayBySource[source] += count - } - if err := todayRows.Err(); err != nil { - return err - } - - for source, count := range todayBySource { - if count <= 0 { - continue - } - sourceKey := sanitizeMetricPart(source) - setValueMetric(snap, "source_"+sourceKey+"_requests_today", count, "requests", "today") - } - } - - perDayRows, err := db.QueryContext(ctx, `SELECT date(created_at), model_name, COUNT(*) - FROM messages - WHERE model_name IS NOT NULL AND trim(model_name) != '' - GROUP BY date(created_at), model_name`) - if err != nil { - return nil - } - defer perDayRows.Close() - - perSourceDaily := make(map[string]map[string]float64) - for perDayRows.Next() { - var day string - var rawModel string - var count float64 - if err := perDayRows.Scan(&day, &rawModel, &count); err != nil { - return err - } - day = strings.TrimSpace(day) - if day == "" { - continue - } - model := normalizeModelName(rawModel) - source := sourceFromModelName(model) - sourceKey := sanitizeMetricPart(source) - if perSourceDaily[sourceKey] == nil { - perSourceDaily[sourceKey] = make(map[string]float64) - } - perSourceDaily[sourceKey][day] += count - } - if err := perDayRows.Err(); err != nil { - return err - } - - for sourceKey, byDay := range perSourceDaily { - if len(byDay) == 0 { - continue - } - snap.DailySeries["usage_source_"+sourceKey] = core.SortedTimePoints(byDay) - } - - return nil -} - -func populateToolUsageFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { - hasFunctionName, err := tableHasColumn(ctx, db, "tool_calls", "function_name") - if err != nil || !hasFunctionName { - return nil - } - - rows, err := db.QueryContext(ctx, `SELECT function_name, COUNT(*) - FROM tool_calls - WHERE trim(function_name) != '' - GROUP BY function_name - ORDER BY COUNT(*) DESC`) - if err != nil { - return err - } - defer rows.Close() - - var top []string - for rows.Next() { - var toolName string - var count float64 - if err := rows.Scan(&toolName, &count); err != nil { - return err - } - toolName = strings.TrimSpace(toolName) - if toolName == "" { - continue - } - - setValueMetric(snap, "tool_"+sanitizeMetricPart(toolName), count, "calls", "all-time") - if len(top) < 6 { - top = append(top, fmt.Sprintf("%s=%.0f", toolName, count)) - } - } - if err := rows.Err(); err != nil { - return err - } - if len(top) > 0 { - snap.Raw["tool_usage"] = strings.Join(top, ", ") - } - - perDayRows, err := db.QueryContext(ctx, `SELECT date(m.created_at), tc.function_name, COUNT(*) - FROM tool_calls tc - JOIN messages m ON tc.message_id = m.id - WHERE trim(tc.function_name) != '' - GROUP BY date(m.created_at), tc.function_name`) - if err != nil { - return nil - } - defer perDayRows.Close() - - perToolDaily := make(map[string]map[string]float64) - for perDayRows.Next() { - var day string - var toolName string - var count float64 - if err := perDayRows.Scan(&day, &toolName, &count); err != nil { - return err - } - day = strings.TrimSpace(day) - toolKey := sanitizeMetricPart(toolName) - if day == "" || toolKey == "" { - continue - } - if perToolDaily[toolKey] == nil { - perToolDaily[toolKey] = make(map[string]float64) - } - perToolDaily[toolKey][day] += count - } - if err := perDayRows.Err(); err != nil { - return err - } - - for toolKey, byDay := range perToolDaily { - if len(byDay) == 0 { - continue - } - snap.DailySeries["usage_tool_"+toolKey] = core.SortedTimePoints(byDay) - } - - return nil -} - -func sourceFromModelName(model string) string { - normalized := normalizeModelName(model) - if normalized == "" { - return "unknown" - } - if strings.HasSuffix(normalized, ":cloud") || strings.Contains(normalized, "-cloud") { - return "cloud" - } - return "local" -} - -func populateDailySeriesFromDB(ctx context.Context, db *sql.DB, snap *core.UsageSnapshot) error { - dailyQueries := []struct { - key string - query string - }{ - {"messages", `SELECT date(created_at), COUNT(*) FROM messages GROUP BY date(created_at)`}, - {"sessions", `SELECT date(created_at), COUNT(*) FROM chats GROUP BY date(created_at)`}, - {"tool_calls", `SELECT date(m.created_at), COUNT(*) - FROM tool_calls tc - JOIN messages m ON tc.message_id = m.id - GROUP BY date(m.created_at)`}, - {"requests_user", `SELECT date(created_at), COUNT(*) FROM messages WHERE role = 'user' GROUP BY date(created_at)`}, - } - - for _, dq := range dailyQueries { - rows, err := db.QueryContext(ctx, dq.query) - if err != nil { - continue - } - - byDate := make(map[string]float64) - for rows.Next() { - var date string - var count float64 - if err := rows.Scan(&date, &count); err != nil { - rows.Close() - return err - } - if strings.TrimSpace(date) == "" { - continue - } - byDate[date] = count - } - rows.Close() - if len(byDate) > 0 { - points := core.SortedTimePoints(byDate) - snap.DailySeries[dq.key] = points - if dq.key == "requests_user" { - if _, exists := snap.DailySeries["requests"]; !exists { - snap.DailySeries["requests"] = points - } - } - } - } - - return nil -} - type versionResponse struct { Version string `json:"version"` } diff --git a/internal/telemetry/test_helpers_test.go b/internal/telemetry/test_helpers_test.go new file mode 100644 index 0000000..4ca8bf4 --- /dev/null +++ b/internal/telemetry/test_helpers_test.go @@ -0,0 +1,49 @@ +package telemetry + +import ( + "context" + "database/sql" + "path/filepath" + "testing" +) + +func openUsageViewTestStore(t *testing.T) (string, *Store) { + t.Helper() + + dbPath := filepath.Join(t.TempDir(), "telemetry.db") + store, err := OpenStore(dbPath) + if err != nil { + t.Fatalf("open store: %v", err) + } + t.Cleanup(func() { + _ = store.Close() + }) + return dbPath, store +} + +func openUsageViewRawTestStore(t *testing.T) (string, *sql.DB, *Store) { + t.Helper() + + dbPath := filepath.Join(t.TempDir(), "telemetry.db") + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatalf("open db: %v", err) + } + t.Cleanup(func() { + _ = db.Close() + }) + + store := NewStore(db) + if err := store.Init(context.Background()); err != nil { + t.Fatalf("init store: %v", err) + } + return dbPath, db, store +} + +func mustIngestUsageEvent(t *testing.T, store *Store, req IngestRequest, contextLabel string) { + t.Helper() + + if _, err := store.Ingest(context.Background(), req); err != nil { + t.Fatalf("%s: %v", contextLabel, err) + } +} diff --git a/internal/telemetry/usage_view_test.go b/internal/telemetry/usage_view_test.go index ef8ad0b..bd6dab6 100644 --- a/internal/telemetry/usage_view_test.go +++ b/internal/telemetry/usage_view_test.go @@ -2,8 +2,6 @@ package telemetry import ( "context" - "database/sql" - "path/filepath" "strings" "testing" "time" @@ -16,15 +14,10 @@ import ( func float64Ptr(v float64) *float64 { return &v } func TestApplyCanonicalUsageView_MergesTelemetryWithoutReplacingRootMetrics(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) occurredAt := time.Date(2026, 2, 22, 12, 0, 0, 0, time.UTC) - _, err = store.Ingest(context.Background(), IngestRequest{ + mustIngestUsageEvent(t, store, IngestRequest{ SourceSystem: SourceSystem("opencode"), SourceChannel: SourceChannelHook, OccurredAt: occurredAt, @@ -42,12 +35,9 @@ func TestApplyCanonicalUsageView_MergesTelemetryWithoutReplacingRootMetrics(t *t CostUSD: float64Ptr(0.012), Requests: int64Ptr(1), }, - }) - if err != nil { - t.Fatalf("ingest message event: %v", err) - } + }, "ingest message event") - _, err = store.Ingest(context.Background(), IngestRequest{ + mustIngestUsageEvent(t, store, IngestRequest{ SourceSystem: SourceSystem("opencode"), SourceChannel: SourceChannelHook, OccurredAt: occurredAt.Add(1 * time.Second), @@ -62,10 +52,7 @@ func TestApplyCanonicalUsageView_MergesTelemetryWithoutReplacingRootMetrics(t *t TokenUsage: core.TokenUsage{ Requests: int64Ptr(1), }, - }) - if err != nil { - t.Fatalf("ingest tool event: %v", err) - } + }, "ingest tool event") balance := 7.92 snaps := map[string]core.UsageSnapshot{ @@ -105,20 +92,10 @@ func TestApplyCanonicalUsageView_MergesTelemetryWithoutReplacingRootMetrics(t *t } func TestApplyCanonicalUsageView_DedupsLegacyCrossAccountDuplicates(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - t.Fatalf("open db: %v", err) - } - defer db.Close() - - store := NewStore(db) - if err := store.Init(context.Background()); err != nil { - t.Fatalf("init store: %v", err) - } + dbPath, db, store := openUsageViewRawTestStore(t) occurredAt := time.Date(2026, 2, 22, 12, 0, 0, 0, time.UTC) - _, err = store.Ingest(context.Background(), IngestRequest{ + _, err := store.Ingest(context.Background(), IngestRequest{ SourceSystem: SourceSystem("opencode"), SourceChannel: SourceChannelHook, OccurredAt: occurredAt, @@ -225,15 +202,10 @@ func TestApplyCanonicalUsageView_DedupsLegacyCrossAccountDuplicates(t *testing.T } func TestApplyCanonicalUsageView_TelemetryOverridesModelAndDailyAnalytics(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) occurredAt := time.Date(2026, 2, 22, 12, 0, 0, 0, time.UTC) - _, err = store.Ingest(context.Background(), IngestRequest{ + if _, err := store.Ingest(context.Background(), IngestRequest{ SourceSystem: SourceSystem("opencode"), SourceChannel: SourceChannelHook, OccurredAt: occurredAt, @@ -251,8 +223,7 @@ func TestApplyCanonicalUsageView_TelemetryOverridesModelAndDailyAnalytics(t *tes CostUSD: float64Ptr(9.99), Requests: int64Ptr(1), }, - }) - if err != nil { + }); err != nil { t.Fatalf("ingest message event: %v", err) } @@ -298,12 +269,7 @@ func TestApplyCanonicalUsageView_TelemetryOverridesModelAndDailyAnalytics(t *tes } func TestApplyCanonicalUsageView_FallsBackToProviderScopeForAccountView(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) occurredAt := time.Date(2026, 2, 23, 7, 30, 0, 0, time.UTC) input := int64(77) @@ -356,12 +322,7 @@ func TestApplyCanonicalUsageView_FallsBackToProviderScopeForAccountView(t *testi } func TestApplyCanonicalUsageView_ClearsStalePrefixedAttributeAndDiagnosticKeys(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) occurredAt := time.Date(2026, 2, 23, 9, 0, 0, 0, time.UTC) if _, err := store.Ingest(context.Background(), IngestRequest{ @@ -424,12 +385,7 @@ func TestApplyCanonicalUsageView_ClearsStalePrefixedAttributeAndDiagnosticKeys(t } func TestApplyCanonicalUsageView_TelemetryOverwritesNativeBreakdown(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) occurredAt := time.Date(2026, 2, 23, 10, 0, 0, 0, time.UTC) if _, err := store.Ingest(context.Background(), IngestRequest{ @@ -525,12 +481,7 @@ func TestApplyCanonicalUsageView_TelemetryOverwritesNativeBreakdown(t *testing.T } func TestApplyCanonicalUsageView_ProviderFallbackUsesProviderIDWhenUpstreamMissing(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) occurredAt := time.Date(2026, 2, 23, 10, 30, 0, 0, time.UTC) if _, err := store.Ingest(context.Background(), IngestRequest{ @@ -578,12 +529,7 @@ func TestApplyCanonicalUsageView_ProviderFallbackUsesProviderIDWhenUpstreamMissi } func TestApplyCanonicalUsageView_IncludesErroredToolCallsAndMCPBreakdown(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) occurredAt := time.Now().UTC().Add(-2 * time.Minute) if _, err := store.Ingest(context.Background(), IngestRequest{ @@ -677,12 +623,7 @@ func TestParseMCPToolName_CopilotLegacyWrapper(t *testing.T) { } func TestApplyCanonicalUsageView_SkipsProviderBurnMetricsForCodex(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) occurredAt := time.Now().UTC() if _, err := store.Ingest(context.Background(), IngestRequest{ @@ -731,12 +672,7 @@ func TestApplyCanonicalUsageView_SkipsProviderBurnMetricsForCodex(t *testing.T) } func TestApplyCanonicalUsageView_DedupsCodexMessageUsageByTurnID(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) now := time.Now().UTC() if _, err := store.Ingest(context.Background(), IngestRequest{ @@ -811,12 +747,7 @@ func TestApplyCanonicalUsageView_DedupsCodexMessageUsageByTurnID(t *testing.T) { } func TestApplyCanonicalUsageView_UsesClientFromPayloadBeforeWorkspace(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) now := time.Now().UTC() if _, err := store.Ingest(context.Background(), IngestRequest{ @@ -893,12 +824,7 @@ func TestApplyCanonicalUsageView_UsesClientFromPayloadBeforeWorkspace(t *testing } func TestApplyCanonicalUsageView_EmitsProjectMetricsFromWorkspace(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "telemetry.db") - store, err := OpenStore(dbPath) - if err != nil { - t.Fatalf("open store: %v", err) - } - defer store.Close() + dbPath, store := openUsageViewTestStore(t) now := time.Now().UTC() if _, err := store.Ingest(context.Background(), IngestRequest{ diff --git a/internal/tui/analytics.go b/internal/tui/analytics.go index 7001e3f..4db628d 100644 --- a/internal/tui/analytics.go +++ b/internal/tui/analytics.go @@ -15,26 +15,17 @@ import ( ) func (m Model) renderAnalyticsContent(w, h int) string { - data := extractCostData(m.visibleSnapshots(), m.analyticsFilter.text) - sortProviders(data.providers, m.analyticsSortBy) - sortModels(data.models, m.analyticsSortBy) - summary := computeAnalyticsSummary(data) - var statusBuf strings.Builder renderStatusBar(&statusBuf, m.analyticsSortBy, m.analyticsFilter.text, w) statusStr := statusBuf.String() - hasData := data.totalCost > 0 || len(data.models) > 0 || len(data.budgets) > 0 || - len(data.usageGauges) > 0 || len(data.tokenActivity) > 0 || len(data.timeSeries) > 0 - + content, hasData := m.cachedAnalyticsPageContent(w) if !hasData { empty := "\n" + dimStyle.Render(" No cost or usage data available.") empty += "\n" + dimStyle.Render(" Analytics requires providers that report spend, tokens, or budgets.") return statusStr + empty } - content := renderAnalyticsSinglePage(data, summary, w) - lines := strings.Split(statusStr+content, "\n") for len(lines) < h { lines = append(lines, "") diff --git a/internal/tui/analytics_cache.go b/internal/tui/analytics_cache.go new file mode 100644 index 0000000..560a997 --- /dev/null +++ b/internal/tui/analytics_cache.go @@ -0,0 +1,47 @@ +package tui + +import ( + "strconv" + "strings" +) + +type analyticsRenderCacheEntry struct { + key string + hasData bool + content string +} + +func (m *Model) invalidateAnalyticsCache() { + m.analyticsCache = analyticsRenderCacheEntry{} +} + +func (m *Model) cachedAnalyticsPageContent(w int) (string, bool) { + key := strings.Join([]string{ + strconv.Itoa(w), + strconv.Itoa(m.analyticsSortBy), + m.analyticsFilter.text, + string(m.timeWindow), + }, "|") + if m.analyticsCache.key == key { + return m.analyticsCache.content, m.analyticsCache.hasData + } + + data := extractCostData(m.visibleSnapshots(), m.analyticsFilter.text) + sortProviders(data.providers, m.analyticsSortBy) + sortModels(data.models, m.analyticsSortBy) + summary := computeAnalyticsSummary(data) + hasData := data.totalCost > 0 || len(data.models) > 0 || len(data.budgets) > 0 || + len(data.usageGauges) > 0 || len(data.tokenActivity) > 0 || len(data.timeSeries) > 0 + + content := "" + if hasData { + content = renderAnalyticsSinglePage(data, summary, w) + } + + m.analyticsCache = analyticsRenderCacheEntry{ + key: key, + hasData: hasData, + content: content, + } + return content, hasData +} diff --git a/internal/tui/dashboard_views.go b/internal/tui/dashboard_views.go index c9e563c..09370fb 100644 --- a/internal/tui/dashboard_views.go +++ b/internal/tui/dashboard_views.go @@ -140,6 +140,8 @@ func (m *Model) setDashboardView(mode dashboardViewMode) { m.detailOffset = 0 m.detailTab = 0 m.tileOffset = 0 + m.invalidateTileBodyCache() + m.invalidateDetailCache() } func (m Model) nextDashboardView(step int) dashboardViewMode { diff --git a/internal/tui/detail.go b/internal/tui/detail.go index 3e26ca9..a4441dc 100644 --- a/internal/tui/detail.go +++ b/internal/tui/detail.go @@ -78,11 +78,8 @@ func RenderDetailContent(snap core.UsageSnapshot, w int, warnThresh, critThresh showTimers := tabName == "Timers" || showAll showInfo := tabName == "Info" || showAll - // Extract burn rate from metrics for spending section. - burnRate := float64(0) - if brm, ok := snap.Metrics["burn_rate"]; ok && brm.Used != nil { - burnRate = *brm.Used - } + costSummary := core.ExtractAnalyticsCostSummary(snap) + burnRate := costSummary.BurnRateUSD if len(snap.Metrics) > 0 { groups := groupMetrics(snap.Metrics, widget, details) @@ -346,60 +343,6 @@ func wrapTags(tags []string, maxWidth int) []string { return rows } -type metricGroup struct { - title string - entries []metricEntry - order int -} - -type metricEntry struct { - key string - label string - metric core.Metric -} - -func groupMetrics(metrics map[string]core.Metric, widget core.DashboardWidget, details core.DetailWidget) []metricGroup { - groups := make(map[string]*metricGroup) - - for key, m := range metrics { - // MCP metrics are rendered in their own dedicated section. - if strings.HasPrefix(key, "mcp_") { - continue - } - groupName, label, order := classifyMetric(key, m, widget, details) - g, ok := groups[groupName] - if !ok { - g = &metricGroup{title: groupName, order: order} - groups[groupName] = g - } - g.entries = append(g.entries, metricEntry{key: key, label: label, metric: m}) - } - - result := make([]metricGroup, 0, len(groups)) - for _, g := range groups { - sort.Slice(g.entries, func(i, j int) bool { - return g.entries[i].key < g.entries[j].key - }) - result = append(result, *g) - } - sort.Slice(result, func(i, j int) bool { - if result[i].order != result[j].order { - return result[i].order < result[j].order - } - return result[i].title < result[j].title - }) - - return result -} - -func classifyMetric(key string, m core.Metric, widget core.DashboardWidget, details core.DetailWidget) (group, label string, order int) { - return core.ClassifyDetailMetric(key, m, widget, details) -} - -func metricLabel(widget core.DashboardWidget, key string) string { - return core.MetricLabel(widget, key) -} - func titleCase(s string) string { if len(s) <= 1 { return s @@ -407,259 +350,6 @@ func titleCase(s string) string { return strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) } -func renderMetricGroup(sb *strings.Builder, snap core.UsageSnapshot, group metricGroup, widget core.DashboardWidget, details core.DetailWidget, w int, warnThresh, critThresh float64, series map[string][]core.TimePoint, burnRate float64) { - sb.WriteString("\n") - renderDetailSectionHeader(sb, group.title, w) - - // Zero-value suppression: filter out zero-value metrics when the provider opts in. - entries := group.entries - if widget.SuppressZeroNonUsageMetrics || len(widget.SuppressZeroMetricKeys) > 0 { - entries = filterNonZeroEntries(entries, widget) - } - - switch details.SectionStyle(group.title) { - case core.DetailSectionStyleUsage: - renderUsageSection(sb, entries, w, warnThresh, critThresh) - case core.DetailSectionStyleSpending: - renderSpendingSection(sb, entries, w, burnRate) - case core.DetailSectionStyleTokens: - renderTokensSection(sb, snap, entries, widget, w, series) - case core.DetailSectionStyleActivity: - renderActivitySection(sb, entries, widget, w, series) - case core.DetailSectionStyleLanguages: - renderListSection(sb, entries, w) - default: - renderListSection(sb, entries, w) - } -} - -func renderListSection(sb *strings.Builder, entries []metricEntry, w int) { - labelW := sectionLabelWidth(w) - for _, e := range entries { - val := formatMetricValue(e.metric) - sb.WriteString(fmt.Sprintf(" %s %s\n", - labelStyle.Width(labelW).Render(e.label), valueStyle.Render(val))) - } -} - -func renderUsageSection(sb *strings.Builder, entries []metricEntry, w int, warnThresh, critThresh float64) { - labelW := sectionLabelWidth(w) - - var usageEntries []metricEntry - var gaugeEntries []metricEntry - - for _, e := range entries { - m := e.metric - if m.Remaining != nil && m.Limit != nil && m.Unit != "%" && m.Unit != "USD" { - usageEntries = append(usageEntries, e) - } else { - gaugeEntries = append(gaugeEntries, e) - } - } - - for _, entry := range gaugeEntries { - renderGaugeEntry(sb, entry, labelW, w, warnThresh, critThresh) - } - - if len(usageEntries) > 0 { - if len(gaugeEntries) > 0 { - sb.WriteString("\n") - } - renderUsageTable(sb, usageEntries, w, warnThresh, critThresh) - } -} - -func renderSpendingSection(sb *strings.Builder, entries []metricEntry, w int, burnRate float64) { - labelW := sectionLabelWidth(w) - gaugeW := sectionGaugeWidth(w, labelW) - - var modelCosts []metricEntry - var otherCosts []metricEntry - - for _, e := range entries { - if isModelCostKey(e.key) { - modelCosts = append(modelCosts, e) - } else { - otherCosts = append(otherCosts, e) - } - } - - for _, e := range otherCosts { - if e.metric.Used != nil && e.metric.Limit != nil && *e.metric.Limit > 0 { - color := colorTeal - if *e.metric.Used >= *e.metric.Limit*0.8 { - color = colorRed - } else if *e.metric.Used >= *e.metric.Limit*0.5 { - color = colorYellow - } - line := RenderBudgetGauge(e.label, *e.metric.Used, *e.metric.Limit, gaugeW, labelW, color, burnRate) - sb.WriteString(line + "\n") - } else { - val := formatMetricValue(e.metric) - vs := metricValueStyle - if !strings.Contains(val, "$") && !strings.Contains(val, "USD") { - vs = valueStyle - } - sb.WriteString(fmt.Sprintf(" %s %s\n", - labelStyle.Width(labelW).Render(e.label), vs.Render(val))) - } - } - - if len(modelCosts) > 0 { - if len(otherCosts) > 0 { - sb.WriteString("\n") - } - renderModelCostsTable(sb, modelCosts, w) - } -} - -func renderActivitySection(sb *strings.Builder, entries []metricEntry, widget core.DashboardWidget, w int, series map[string][]core.TimePoint) { - labelW := sectionLabelWidth(w) - - for _, e := range entries { - val := formatMetricValue(e.metric) - sb.WriteString(fmt.Sprintf(" %s %s\n", - labelStyle.Width(labelW).Render(e.label), valueStyle.Render(val))) - } - - renderSectionSparklines(sb, widget, w, series, []string{ - "messages", "sessions", "tool_calls", - }) -} - -func renderTimersSection(sb *strings.Builder, resets map[string]time.Time, widget core.DashboardWidget, w int) { - labelW := sectionLabelWidth(w) - renderDetailSectionHeader(sb, "Timers", w) - - timerKeys := lo.Keys(resets) - sort.Strings(timerKeys) - - for _, k := range timerKeys { - t := resets[k] - label := metricLabel(widget, k) - remaining := time.Until(t) - dateStr := t.Format("Jan 02 15:04") - - var urgency string - if remaining <= 0 { - urgency = dimStyle.Render("○") - sb.WriteString(fmt.Sprintf(" %s %s %s (expired)\n", - urgency, - labelStyle.Width(labelW).Render(label), - dimStyle.Render(dateStr), - )) - } else { - switch { - case remaining < 15*time.Minute: - urgency = lipgloss.NewStyle().Foreground(colorCrit).Render("●") - case remaining < time.Hour: - urgency = lipgloss.NewStyle().Foreground(colorWarn).Render("●") - default: - urgency = lipgloss.NewStyle().Foreground(colorOK).Render("●") - } - sb.WriteString(fmt.Sprintf(" %s %s %s (in %s)\n", - urgency, - labelStyle.Width(labelW).Render(label), - valueStyle.Render(dateStr), - tealStyle.Render(formatDuration(remaining)), - )) - } - } -} - -func renderSectionSparklines(sb *strings.Builder, widget core.DashboardWidget, w int, series map[string][]core.TimePoint, candidates []string) { - if len(series) == 0 { - return - } - - sparkW := w - 8 - if sparkW < 12 { - sparkW = 12 - } - if sparkW > 60 { - sparkW = 60 - } - - colors := []lipgloss.Color{colorTeal, colorSapphire, colorGreen, colorPeach} - colorIdx := 0 - - for _, key := range candidates { - points, ok := series[key] - if !ok || len(points) < 2 { - continue - } - values := make([]float64, len(points)) - for i, p := range points { - values[i] = p.Value - } - c := colors[colorIdx%len(colors)] - colorIdx++ - spark := RenderSparkline(values, sparkW, c) - label := metricLabel(widget, key) - sb.WriteString(fmt.Sprintf(" %s %s\n", dimStyle.Render(label), spark)) - } - - rendered := make(map[string]bool) - for _, c := range candidates { - rendered[c] = true - } - - for _, candidate := range candidates { - prefix := candidate - if !strings.HasSuffix(prefix, "_") { - prefix += "_" - } - for key, points := range series { - if rendered[key] || len(points) < 2 { - continue - } - if strings.HasPrefix(key, prefix) { - rendered[key] = true - values := make([]float64, len(points)) - for i, p := range points { - values[i] = p.Value - } - c := colors[colorIdx%len(colors)] - colorIdx++ - spark := RenderSparkline(values, sparkW, c) - label := metricLabel(widget, key) - sb.WriteString(fmt.Sprintf(" %s %s\n", dimStyle.Render(label), spark)) - } - } - } -} - -// filterNonZeroEntries removes entries where all numeric values are nil or zero, -// respecting the widget's suppression configuration. -func filterNonZeroEntries(entries []metricEntry, widget core.DashboardWidget) []metricEntry { - suppressKeys := make(map[string]bool, len(widget.SuppressZeroMetricKeys)) - for _, k := range widget.SuppressZeroMetricKeys { - suppressKeys[k] = true - } - - var result []metricEntry - for _, e := range entries { - m := e.metric - isZero := (m.Used == nil || *m.Used == 0) && - (m.Remaining == nil || *m.Remaining == 0) && - (m.Limit == nil || *m.Limit == 0) - - if isZero { - if widget.SuppressZeroNonUsageMetrics { - // Skip if it's not a quota/usage metric (has no limit). - if m.Limit == nil { - continue - } - } - if suppressKeys[e.key] { - continue - } - } - result = append(result, e) - } - return result -} - // renderInfoSection renders Attributes, Diagnostics, and Raw as separate sub-sections. func renderInfoSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { labelW := sectionLabelWidth(w) @@ -714,234 +404,6 @@ func renderKeyValuePairs(sb *strings.Builder, data map[string]string, labelW, ma } } -func sectionLabelWidth(w int) int { - switch { - case w < 45: - return 14 - case w < 55: - return 18 - default: - return 22 - } -} - -func sectionGaugeWidth(w, labelW int) int { - gw := w - labelW - 14 - if gw < 8 { - gw = 8 - } - if gw > 28 { - gw = 28 - } - return gw -} - -func renderGaugeEntry(sb *strings.Builder, entry metricEntry, labelW, w int, warnThresh, critThresh float64) { - m := entry.metric - labelRendered := labelStyle.Width(labelW).Render(entry.label) - gaugeW := sectionGaugeWidth(w, labelW) - - if m.Unit == "%" && m.Used != nil { - gauge := RenderUsageGauge(*m.Used, gaugeW, warnThresh, critThresh) - sb.WriteString(fmt.Sprintf(" %s %s\n", labelRendered, gauge)) - if detail := formatUsageDetail(m); detail != "" { - sb.WriteString(fmt.Sprintf(" %s %s\n", - strings.Repeat(" ", labelW+2), dimStyle.Render(detail))) - } - return - } - - if pct := m.Percent(); pct >= 0 { - gauge := RenderGauge(pct, gaugeW, warnThresh, critThresh) - sb.WriteString(fmt.Sprintf(" %s %s\n", labelRendered, gauge)) - if detail := formatMetricDetail(m); detail != "" { - sb.WriteString(fmt.Sprintf(" %s %s\n", - strings.Repeat(" ", labelW+2), dimStyle.Render(detail))) - } - return - } - - val := formatMetricValue(m) - vs := valueStyle - if strings.Contains(val, "$") || strings.Contains(val, "USD") { - vs = metricValueStyle - } - sb.WriteString(fmt.Sprintf(" %s %s\n", labelRendered, vs.Render(val))) -} - -func isModelCostKey(key string) bool { - return core.IsModelCostMetricKey(key) -} - -func formatMetricValue(m core.Metric) string { - var value string - switch { - case m.Used != nil && m.Limit != nil: - value = fmt.Sprintf("%s / %s %s", - formatNumber(*m.Used), formatNumber(*m.Limit), m.Unit) - case m.Remaining != nil && m.Limit != nil: - value = fmt.Sprintf("%s / %s %s remaining", - formatNumber(*m.Remaining), formatNumber(*m.Limit), m.Unit) - case m.Used != nil: - value = fmt.Sprintf("%s %s", formatNumber(*m.Used), m.Unit) - case m.Remaining != nil: - value = fmt.Sprintf("%s %s remaining", formatNumber(*m.Remaining), m.Unit) - } - - if m.Window != "" && m.Window != "all_time" && m.Window != "current_period" { - value += " " + dimStyle.Render("["+m.Window+"]") - } - return value -} - -func renderModelCostsTable(sb *strings.Builder, entries []metricEntry, w int) { - type modelCost struct { - name string - cost float64 - window string - hasData bool - } - - var models []modelCost - var unmatched []metricEntry - - for _, e := range entries { - label := e.label - var modelName string - switch { - case strings.HasSuffix(label, "_cost"): - modelName = strings.TrimSuffix(label, "_cost") - case strings.HasSuffix(label, "_cost_usd"): - modelName = strings.TrimSuffix(label, "_cost_usd") - default: - unmatched = append(unmatched, e) - continue - } - - cost := float64(0) - if e.metric.Used != nil { - cost = *e.metric.Used - } - models = append(models, modelCost{ - name: prettifyModelName(modelName), - cost: cost, - window: e.metric.Window, - hasData: true, - }) - } - - sort.Slice(models, func(i, j int) bool { - return models[i].cost > models[j].cost - }) - - if len(models) > 0 { - nameW := 28 - if w < 55 { - nameW = 20 - } - - windowHint := "" - if len(models) > 0 && models[0].window != "" && - models[0].window != "all_time" && models[0].window != "current_period" { - windowHint = " " + dimStyle.Render("["+models[0].window+"]") - } - - sb.WriteString(fmt.Sprintf(" %-*s %10s%s\n", - nameW, dimStyle.Bold(true).Render("Model"), - dimStyle.Bold(true).Render("Cost"), - windowHint, - )) - - for _, mc := range models { - name := mc.name - if len(name) > nameW { - name = name[:nameW-1] + "…" - } - costStr := formatUSD(mc.cost) - costStyle := tealStyle - if mc.cost >= 10 { - costStyle = metricValueStyle - } - sb.WriteString(fmt.Sprintf(" %-*s %10s\n", - nameW, valueStyle.Render(name), - costStyle.Render(costStr), - )) - } - } - - for _, e := range unmatched { - val := formatMetricValue(e.metric) - sb.WriteString(fmt.Sprintf(" %s %s\n", - labelStyle.Width(22).Render(prettifyModelName(e.label)), - valueStyle.Render(val), - )) - } -} - -func renderUsageTable(sb *strings.Builder, entries []metricEntry, w int, warnThresh, critThresh float64) { - if len(entries) == 0 { - return - } - - sort.Slice(entries, func(i, j int) bool { - pi := entries[i].metric.Percent() - pj := entries[j].metric.Percent() - if pi < 0 { - pi = 200 - } - if pj < 0 { - pj = 200 - } - return pi < pj - }) - - nameW := 30 - gaugeW := 10 - if w < 65 { - nameW = 22 - gaugeW = 8 - } - if w < 50 { - nameW = 16 - gaugeW = 6 - } - - for _, entry := range entries { - m := entry.metric - name := entry.label - if len(name) > nameW { - name = name[:nameW-1] + "…" - } - - pct := m.Percent() - gauge := "" - pctStr := "" - if pct >= 0 { - gauge = RenderMiniGauge(pct, gaugeW) - var color lipgloss.Color - switch { - case pct <= critThresh*100: - color = colorCrit - case pct <= warnThresh*100: - color = colorWarn - default: - color = colorOK - } - pctStr = lipgloss.NewStyle().Foreground(color).Bold(true).Render(fmt.Sprintf("%5.1f%%", pct)) - } - - windowStr := "" - if m.Window != "" && m.Window != "all_time" && m.Window != "current_period" { - windowStr = dimStyle.Render(" [" + m.Window + "]") - } - - sb.WriteString(fmt.Sprintf(" %-*s %s %s%s\n", - nameW, labelStyle.Render(name), - gauge, pctStr, windowStr, - )) - } -} - func renderRawData(sb *strings.Builder, raw map[string]string, widget core.DashboardWidget, w int) { labelW := sectionLabelWidth(w) diff --git a/internal/tui/detail_analytics_sections.go b/internal/tui/detail_analytics_sections.go index 49b5b4b..5160084 100644 --- a/internal/tui/detail_analytics_sections.go +++ b/internal/tui/detail_analytics_sections.go @@ -75,8 +75,7 @@ func hasChartableSeries(series map[string][]core.TimePoint) bool { } func hasLanguageMetrics(snap core.UsageSnapshot) bool { - langs, _ := core.ExtractLanguageUsage(snap) - return len(langs) > 0 + return core.HasLanguageUsage(snap) } func renderLanguagesSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { @@ -151,8 +150,7 @@ func renderLanguagesSection(sb *strings.Builder, snap core.UsageSnapshot, w int) } func hasMCPMetrics(snap core.UsageSnapshot) bool { - servers, _ := core.ExtractMCPUsage(snap) - return len(servers) > 0 + return core.HasMCPUsage(snap) } func renderMCPSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { @@ -247,12 +245,7 @@ func renderMCPSection(sb *strings.Builder, snap core.UsageSnapshot, w int) { } func hasModelCostMetrics(snap core.UsageSnapshot) bool { - for key := range snap.Metrics { - if core.IsModelCostMetricKey(key) { - return true - } - } - return false + return core.HasModelCostUsage(snap) } func renderTrendsSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { diff --git a/internal/tui/detail_metrics.go b/internal/tui/detail_metrics.go new file mode 100644 index 0000000..08197af --- /dev/null +++ b/internal/tui/detail_metrics.go @@ -0,0 +1,536 @@ +package tui + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/samber/lo" +) + +type metricGroup struct { + title string + entries []metricEntry + order int +} + +type metricEntry struct { + key string + label string + metric core.Metric +} + +func groupMetrics(metrics map[string]core.Metric, widget core.DashboardWidget, details core.DetailWidget) []metricGroup { + groups := make(map[string]*metricGroup) + + for key, m := range metrics { + if !core.IncludeDetailMetricKey(key) { + continue + } + groupName, label, order := classifyMetric(key, m, widget, details) + g, ok := groups[groupName] + if !ok { + g = &metricGroup{title: groupName, order: order} + groups[groupName] = g + } + g.entries = append(g.entries, metricEntry{key: key, label: label, metric: m}) + } + + result := make([]metricGroup, 0, len(groups)) + for _, g := range groups { + sort.Slice(g.entries, func(i, j int) bool { + return g.entries[i].key < g.entries[j].key + }) + result = append(result, *g) + } + sort.Slice(result, func(i, j int) bool { + if result[i].order != result[j].order { + return result[i].order < result[j].order + } + return result[i].title < result[j].title + }) + + return result +} + +func classifyMetric(key string, m core.Metric, widget core.DashboardWidget, details core.DetailWidget) (group, label string, order int) { + return core.ClassifyDetailMetric(key, m, widget, details) +} + +func metricLabel(widget core.DashboardWidget, key string) string { + return core.MetricLabel(widget, key) +} + +func renderMetricGroup(sb *strings.Builder, snap core.UsageSnapshot, group metricGroup, widget core.DashboardWidget, details core.DetailWidget, w int, warnThresh, critThresh float64, series map[string][]core.TimePoint, burnRate float64) { + sb.WriteString("\n") + renderDetailSectionHeader(sb, group.title, w) + + entries := group.entries + if widget.SuppressZeroNonUsageMetrics || len(widget.SuppressZeroMetricKeys) > 0 { + entries = filterNonZeroEntries(entries, widget) + } + + switch details.SectionStyle(group.title) { + case core.DetailSectionStyleUsage: + renderUsageSection(sb, entries, w, warnThresh, critThresh) + case core.DetailSectionStyleSpending: + renderSpendingSection(sb, entries, w, burnRate) + case core.DetailSectionStyleTokens: + renderTokensSection(sb, snap, entries, widget, w, series) + case core.DetailSectionStyleActivity: + renderActivitySection(sb, entries, widget, w, series) + case core.DetailSectionStyleLanguages: + renderListSection(sb, entries, w) + default: + renderListSection(sb, entries, w) + } +} + +func renderListSection(sb *strings.Builder, entries []metricEntry, w int) { + labelW := sectionLabelWidth(w) + for _, e := range entries { + val := formatMetricValue(e.metric) + sb.WriteString(fmt.Sprintf(" %s %s\n", + labelStyle.Width(labelW).Render(e.label), valueStyle.Render(val))) + } +} + +func renderUsageSection(sb *strings.Builder, entries []metricEntry, w int, warnThresh, critThresh float64) { + labelW := sectionLabelWidth(w) + + var usageEntries []metricEntry + var gaugeEntries []metricEntry + + for _, e := range entries { + m := e.metric + if m.Remaining != nil && m.Limit != nil && m.Unit != "%" && m.Unit != "USD" { + usageEntries = append(usageEntries, e) + } else { + gaugeEntries = append(gaugeEntries, e) + } + } + + for _, entry := range gaugeEntries { + renderGaugeEntry(sb, entry, labelW, w, warnThresh, critThresh) + } + + if len(usageEntries) > 0 { + if len(gaugeEntries) > 0 { + sb.WriteString("\n") + } + renderUsageTable(sb, usageEntries, w, warnThresh, critThresh) + } +} + +func renderSpendingSection(sb *strings.Builder, entries []metricEntry, w int, burnRate float64) { + labelW := sectionLabelWidth(w) + gaugeW := sectionGaugeWidth(w, labelW) + + var modelCosts []metricEntry + var otherCosts []metricEntry + + for _, e := range entries { + if isModelCostKey(e.key) { + modelCosts = append(modelCosts, e) + } else { + otherCosts = append(otherCosts, e) + } + } + + for _, e := range otherCosts { + if e.metric.Used != nil && e.metric.Limit != nil && *e.metric.Limit > 0 { + color := colorTeal + if *e.metric.Used >= *e.metric.Limit*0.8 { + color = colorRed + } else if *e.metric.Used >= *e.metric.Limit*0.5 { + color = colorYellow + } + line := RenderBudgetGauge(e.label, *e.metric.Used, *e.metric.Limit, gaugeW, labelW, color, burnRate) + sb.WriteString(line + "\n") + } else { + val := formatMetricValue(e.metric) + vs := metricValueStyle + if !strings.Contains(val, "$") && !strings.Contains(val, "USD") { + vs = valueStyle + } + sb.WriteString(fmt.Sprintf(" %s %s\n", + labelStyle.Width(labelW).Render(e.label), vs.Render(val))) + } + } + + if len(modelCosts) > 0 { + if len(otherCosts) > 0 { + sb.WriteString("\n") + } + renderModelCostsTable(sb, modelCosts, w) + } +} + +func renderActivitySection(sb *strings.Builder, entries []metricEntry, widget core.DashboardWidget, w int, series map[string][]core.TimePoint) { + labelW := sectionLabelWidth(w) + + for _, e := range entries { + val := formatMetricValue(e.metric) + sb.WriteString(fmt.Sprintf(" %s %s\n", + labelStyle.Width(labelW).Render(e.label), valueStyle.Render(val))) + } + + renderSectionSparklines(sb, widget, w, series, []string{ + "messages", "sessions", "tool_calls", + }) +} + +func renderTimersSection(sb *strings.Builder, resets map[string]time.Time, widget core.DashboardWidget, w int) { + labelW := sectionLabelWidth(w) + renderDetailSectionHeader(sb, "Timers", w) + + timerKeys := lo.Keys(resets) + sort.Strings(timerKeys) + + for _, k := range timerKeys { + t := resets[k] + label := metricLabel(widget, k) + remaining := time.Until(t) + dateStr := t.Format("Jan 02 15:04") + + var urgency string + if remaining <= 0 { + urgency = dimStyle.Render("○") + sb.WriteString(fmt.Sprintf(" %s %s %s (expired)\n", + urgency, + labelStyle.Width(labelW).Render(label), + dimStyle.Render(dateStr), + )) + } else { + switch { + case remaining < 15*time.Minute: + urgency = lipgloss.NewStyle().Foreground(colorCrit).Render("●") + case remaining < time.Hour: + urgency = lipgloss.NewStyle().Foreground(colorWarn).Render("●") + default: + urgency = lipgloss.NewStyle().Foreground(colorOK).Render("●") + } + sb.WriteString(fmt.Sprintf(" %s %s %s (in %s)\n", + urgency, + labelStyle.Width(labelW).Render(label), + valueStyle.Render(dateStr), + tealStyle.Render(formatDuration(remaining)), + )) + } + } +} + +func renderSectionSparklines(sb *strings.Builder, widget core.DashboardWidget, w int, series map[string][]core.TimePoint, candidates []string) { + if len(series) == 0 { + return + } + + sparkW := w - 8 + if sparkW < 12 { + sparkW = 12 + } + if sparkW > 60 { + sparkW = 60 + } + + colors := []lipgloss.Color{colorTeal, colorSapphire, colorGreen, colorPeach} + colorIdx := 0 + + for _, key := range candidates { + points, ok := series[key] + if !ok || len(points) < 2 { + continue + } + values := make([]float64, len(points)) + for i, p := range points { + values[i] = p.Value + } + c := colors[colorIdx%len(colors)] + colorIdx++ + spark := RenderSparkline(values, sparkW, c) + label := metricLabel(widget, key) + sb.WriteString(fmt.Sprintf(" %s %s\n", dimStyle.Render(label), spark)) + } + + rendered := make(map[string]bool) + for _, c := range candidates { + rendered[c] = true + } + + for _, candidate := range candidates { + prefix := candidate + if !strings.HasSuffix(prefix, "_") { + prefix += "_" + } + for key, points := range series { + if rendered[key] || len(points) < 2 { + continue + } + if strings.HasPrefix(key, prefix) { + rendered[key] = true + values := make([]float64, len(points)) + for i, p := range points { + values[i] = p.Value + } + c := colors[colorIdx%len(colors)] + colorIdx++ + spark := RenderSparkline(values, sparkW, c) + label := metricLabel(widget, key) + sb.WriteString(fmt.Sprintf(" %s %s\n", dimStyle.Render(label), spark)) + } + } + } +} + +func filterNonZeroEntries(entries []metricEntry, widget core.DashboardWidget) []metricEntry { + suppressKeys := make(map[string]bool, len(widget.SuppressZeroMetricKeys)) + for _, k := range widget.SuppressZeroMetricKeys { + suppressKeys[k] = true + } + + var result []metricEntry + for _, e := range entries { + m := e.metric + isZero := (m.Used == nil || *m.Used == 0) && + (m.Remaining == nil || *m.Remaining == 0) && + (m.Limit == nil || *m.Limit == 0) + + if isZero { + if widget.SuppressZeroNonUsageMetrics && m.Limit == nil { + continue + } + if suppressKeys[e.key] { + continue + } + } + result = append(result, e) + } + return result +} + +func sectionLabelWidth(w int) int { + switch { + case w < 45: + return 14 + case w < 55: + return 18 + default: + return 22 + } +} + +func sectionGaugeWidth(w, labelW int) int { + gw := w - labelW - 14 + if gw < 8 { + gw = 8 + } + if gw > 28 { + gw = 28 + } + return gw +} + +func renderGaugeEntry(sb *strings.Builder, entry metricEntry, labelW, w int, warnThresh, critThresh float64) { + m := entry.metric + labelRendered := labelStyle.Width(labelW).Render(entry.label) + gaugeW := sectionGaugeWidth(w, labelW) + + if m.Unit == "%" && m.Used != nil { + gauge := RenderUsageGauge(*m.Used, gaugeW, warnThresh, critThresh) + sb.WriteString(fmt.Sprintf(" %s %s\n", labelRendered, gauge)) + if detail := formatUsageDetail(m); detail != "" { + sb.WriteString(fmt.Sprintf(" %s %s\n", + strings.Repeat(" ", labelW+2), dimStyle.Render(detail))) + } + return + } + + if pct := m.Percent(); pct >= 0 { + gauge := RenderGauge(pct, gaugeW, warnThresh, critThresh) + sb.WriteString(fmt.Sprintf(" %s %s\n", labelRendered, gauge)) + if detail := formatMetricDetail(m); detail != "" { + sb.WriteString(fmt.Sprintf(" %s %s\n", + strings.Repeat(" ", labelW+2), dimStyle.Render(detail))) + } + return + } + + val := formatMetricValue(m) + sb.WriteString(fmt.Sprintf(" %s %s\n", labelRendered, valueStyle.Render(val))) +} + +func isModelCostKey(key string) bool { + return core.IsModelCostMetricKey(key) +} + +func formatMetricValue(m core.Metric) string { + var value string + switch { + case m.Used != nil && m.Limit != nil: + value = fmt.Sprintf("%s / %s %s", + formatNumber(*m.Used), formatNumber(*m.Limit), m.Unit) + case m.Remaining != nil && m.Limit != nil: + value = fmt.Sprintf("%s / %s %s remaining", + formatNumber(*m.Remaining), formatNumber(*m.Limit), m.Unit) + case m.Used != nil: + value = fmt.Sprintf("%s %s", formatNumber(*m.Used), m.Unit) + case m.Remaining != nil: + value = fmt.Sprintf("%s %s remaining", formatNumber(*m.Remaining), m.Unit) + } + + if m.Window != "" && m.Window != "all_time" && m.Window != "current_period" { + value += " " + dimStyle.Render("["+m.Window+"]") + } + return value +} + +func renderModelCostsTable(sb *strings.Builder, entries []metricEntry, w int) { + type modelCost struct { + name string + cost float64 + window string + hasData bool + } + + var models []modelCost + var unmatched []metricEntry + + for _, e := range entries { + label := e.label + var modelName string + switch { + case strings.HasSuffix(label, "_cost"): + modelName = strings.TrimSuffix(label, "_cost") + case strings.HasSuffix(label, "_cost_usd"): + modelName = strings.TrimSuffix(label, "_cost_usd") + default: + unmatched = append(unmatched, e) + continue + } + + cost := float64(0) + if e.metric.Used != nil { + cost = *e.metric.Used + } + models = append(models, modelCost{ + name: prettifyModelName(modelName), + cost: cost, + window: e.metric.Window, + hasData: true, + }) + } + + sort.Slice(models, func(i, j int) bool { + return models[i].cost > models[j].cost + }) + + if len(models) > 0 { + nameW := 28 + if w < 55 { + nameW = 20 + } + + windowHint := "" + if len(models) > 0 && models[0].window != "" && + models[0].window != "all_time" && models[0].window != "current_period" { + windowHint = " " + dimStyle.Render("["+models[0].window+"]") + } + + sb.WriteString(fmt.Sprintf(" %-*s %10s%s\n", + nameW, dimStyle.Bold(true).Render("Model"), + dimStyle.Bold(true).Render("Cost"), + windowHint, + )) + + for _, mc := range models { + name := mc.name + if len(name) > nameW { + name = name[:nameW-1] + "…" + } + costStr := formatUSD(mc.cost) + costStyle := tealStyle + if mc.cost >= 10 { + costStyle = metricValueStyle + } + sb.WriteString(fmt.Sprintf(" %-*s %10s\n", + nameW, valueStyle.Render(name), + costStyle.Render(costStr), + )) + } + } + + for _, e := range unmatched { + val := formatMetricValue(e.metric) + sb.WriteString(fmt.Sprintf(" %s %s\n", + labelStyle.Width(22).Render(prettifyModelName(e.label)), + valueStyle.Render(val), + )) + } +} + +func renderUsageTable(sb *strings.Builder, entries []metricEntry, w int, warnThresh, critThresh float64) { + if len(entries) == 0 { + return + } + + sort.Slice(entries, func(i, j int) bool { + pi := entries[i].metric.Percent() + pj := entries[j].metric.Percent() + if pi < 0 { + pi = 200 + } + if pj < 0 { + pj = 200 + } + return pi < pj + }) + + nameW := 30 + gaugeW := 10 + if w < 65 { + nameW = 22 + gaugeW = 8 + } + if w < 50 { + nameW = 16 + gaugeW = 6 + } + + for _, entry := range entries { + m := entry.metric + name := entry.label + if len(name) > nameW { + name = name[:nameW-1] + "…" + } + + pct := m.Percent() + gauge := "" + pctStr := "" + if pct >= 0 { + gauge = RenderMiniGauge(pct, gaugeW) + var color lipgloss.Color + switch { + case pct <= critThresh*100: + color = colorCrit + case pct <= warnThresh*100: + color = colorWarn + default: + color = colorOK + } + pctStr = lipgloss.NewStyle().Foreground(color).Bold(true).Render(fmt.Sprintf("%5.1f%%", pct)) + } + + windowStr := "" + if m.Window != "" && m.Window != "all_time" && m.Window != "current_period" { + windowStr = dimStyle.Render(" [" + m.Window + "]") + } + + sb.WriteString(fmt.Sprintf(" %-*s %s %s%s\n", + nameW, labelStyle.Render(name), + gauge, pctStr, windowStr, + )) + } +} diff --git a/internal/tui/model.go b/internal/tui/model.go index bb9b4d4..741e252 100644 --- a/internal/tui/model.go +++ b/internal/tui/model.go @@ -144,6 +144,8 @@ type Model struct { tileOffset int // vertical scroll offset for selected dashboard tile row expandedModelMixTiles map[string]bool tileBodyCache map[string][]string + analyticsCache analyticsRenderCacheEntry + detailCache detailRenderCacheEntry warnThreshold float64 critThreshold float64 @@ -197,6 +199,8 @@ func NewModel( accountProviders: make(map[string]string), expandedModelMixTiles: make(map[string]bool), tileBodyCache: make(map[string][]string), + analyticsCache: analyticsRenderCacheEntry{}, + detailCache: detailRenderCacheEntry{}, daemon: daemonState{status: DaemonConnecting}, timeWindow: timeWindow, } @@ -812,7 +816,7 @@ func (m Model) renderDetailPanel(w, h int) string { activeTab = 0 } - content := RenderDetailContent(snap, w-2, m.warnThreshold, m.critThreshold, activeTab) + content := m.cachedDetailContent(ids[m.cursor], snap, w-2, activeTab) lines := strings.Split(content, "\n") totalLines := len(lines) @@ -951,6 +955,7 @@ func (m Model) settingsIDs() []string { func (m *Model) setWidgetSections(entries []config.DashboardWidgetSection) { m.widgetSections = normalizeWidgetSectionEntries(entries) m.applyWidgetSectionOverrides() + m.invalidateTileBodyCache() } func normalizeWidgetSectionEntries(entries []config.DashboardWidgetSection) []config.DashboardWidgetSection { @@ -1044,6 +1049,7 @@ func (m *Model) setWidgetSectionEntries(entries []config.DashboardWidgetSection) normalized := normalizeWidgetSectionEntries(entries) m.widgetSections = normalized m.applyWidgetSectionOverrides() + m.invalidateTileBodyCache() } func (m Model) dashboardWidgetSectionConfigEntries() []config.DashboardWidgetSection { diff --git a/internal/tui/model_commands.go b/internal/tui/model_commands.go index 4203f5a..60bff1b 100644 --- a/internal/tui/model_commands.go +++ b/internal/tui/model_commands.go @@ -152,6 +152,7 @@ func (m Model) requestRefresh() Model { func (m Model) beginTimeWindowRefresh(window core.TimeWindow) Model { m.timeWindow = window + m.invalidateRenderCaches() if m.onTimeWindowChange != nil { m.onTimeWindowChange(window) } diff --git a/internal/tui/model_display_info.go b/internal/tui/model_display_info.go index c0dea22..0444b90 100644 --- a/internal/tui/model_display_info.go +++ b/internal/tui/model_display_info.go @@ -37,6 +37,7 @@ func normalizeProviderDisplayInfoType(info providerDisplayInfo) providerDisplayI func computeDisplayInfoRaw(snap core.UsageSnapshot, widget core.DashboardWidget) providerDisplayInfo { info := providerDisplayInfo{gaugePercent: -1} + costSummary := core.ExtractAnalyticsCostSummary(snap) switch snap.Status { case core.StatusError: @@ -243,8 +244,8 @@ func computeDisplayInfoRaw(snap core.UsageSnapshot, widget core.DashboardWidget) detailParts = append(detailParts, fmt.Sprintf("~$%.2f", *dc.Used)) } } - if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("$%.2f/h", *br.Used)) + if costSummary.BurnRateUSD > 0 { + detailParts = append(detailParts, fmt.Sprintf("$%.2f/h", costSummary.BurnRateUSD)) } info.detail = strings.Join(detailParts, " · ") core.Tracef("[display] %s: branch=usage_five_hour used=%.1f gauge=%.1f -> tag=Usage", snap.ProviderID, *fh.Used, info.gaugePercent) @@ -265,8 +266,8 @@ func computeDisplayInfoRaw(snap core.UsageSnapshot, widget core.DashboardWidget) parts = append(parts, fmt.Sprintf("~$%.2f", *dc.Used)) } } - if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { - parts = append(parts, fmt.Sprintf("$%.2f/h", *br.Used)) + if costSummary.BurnRateUSD > 0 { + parts = append(parts, fmt.Sprintf("$%.2f/h", costSummary.BurnRateUSD)) } info.summary = strings.Join(parts, " · ") @@ -304,8 +305,8 @@ func computeDisplayInfoRaw(snap core.UsageSnapshot, widget core.DashboardWidget) costLabel = fmt.Sprintf("~$%.2f %s", *m.Used, tag) } parts := []string{costLabel} - if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { - parts = append(parts, fmt.Sprintf("$%.2f/h", *br.Used)) + if costSummary.BurnRateUSD > 0 { + parts = append(parts, fmt.Sprintf("$%.2f/h", costSummary.BurnRateUSD)) } info.summary = strings.Join(parts, " · ") @@ -335,8 +336,8 @@ func computeDisplayInfoRaw(snap core.UsageSnapshot, widget core.DashboardWidget) info.tagEmoji = "⚡" info.tagLabel = "Usage" info.summary = fmt.Sprintf("~$%.2f / 5h block", *m.Used) - if br, ok2 := snap.Metrics["burn_rate"]; ok2 && br.Used != nil { - info.detail = fmt.Sprintf("$%.2f/h burn rate", *br.Used) + if costSummary.BurnRateUSD > 0 { + info.detail = fmt.Sprintf("$%.2f/h burn rate", costSummary.BurnRateUSD) } return info } @@ -442,6 +443,8 @@ func computeDisplayInfoRaw(snap core.UsageSnapshot, widget core.DashboardWidget) } func computeDetailedCreditsDisplayInfo(snap core.UsageSnapshot, info providerDisplayInfo) providerDisplayInfo { + costSummary := core.ExtractAnalyticsCostSummary(snap) + if m, ok := snap.Metrics["credit_balance"]; ok && m.Limit != nil && m.Remaining != nil { info.tagEmoji = "💰" info.tagLabel = "Credits" @@ -509,8 +512,8 @@ func computeDetailedCreditsDisplayInfo(snap core.UsageSnapshot, info providerDis if byok, ok := snap.Metrics["byok_daily"]; ok && byok.Used != nil && *byok.Used > 0 { detailParts = append(detailParts, fmt.Sprintf("BYOK $%.2f", *byok.Used)) } - if burn, ok := snap.Metrics["burn_rate"]; ok && burn.Used != nil { - detailParts = append(detailParts, fmt.Sprintf("$%.2f/h", *burn.Used)) + if costSummary.BurnRateUSD > 0 { + detailParts = append(detailParts, fmt.Sprintf("$%.2f/h", costSummary.BurnRateUSD)) } if models := snapshotMeta(snap, "activity_models"); models != "" { detailParts = append(detailParts, fmt.Sprintf("%s models", models)) diff --git a/internal/tui/model_input.go b/internal/tui/model_input.go index 7811a99..200c74c 100644 --- a/internal/tui/model_input.go +++ b/internal/tui/model_input.go @@ -16,7 +16,7 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { case tea.WindowSizeMsg: m.width = msg.Width m.height = msg.Height - m.tileBodyCache = make(map[string][]string) + m.invalidateRenderCaches() return m, nil case DaemonStatusMsg: @@ -60,7 +60,7 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } m.snapshots = msg.Snapshots m.refreshing = false - m.tileBodyCache = make(map[string][]string) + m.invalidateRenderCaches() if msg.RequestID > m.lastSnapshotRequestID { m.lastSnapshotRequestID = msg.RequestID } @@ -398,6 +398,7 @@ func (m Model) handleMouseClick(msg tea.MouseMsg) (tea.Model, tea.Cmd) { m.cursor = idx m.tileOffset = 0 + m.invalidateDetailCache() return m, nil } @@ -424,14 +425,17 @@ func (m Model) handleKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.mode = modeList m.detailOffset = 0 m.tileOffset = 0 + m.invalidateDetailCache() return m, nil case "shift+tab": m.screen = m.nextScreen(-1) m.mode = modeList m.detailOffset = 0 m.tileOffset = 0 + m.invalidateDetailCache() return m, nil case "t": + m.invalidateRenderCaches() return m, m.persistThemeCmd(CycleTheme()) case "w": return m.cycleTimeWindow() @@ -477,12 +481,14 @@ func (m Model) handleAnalyticsKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { return m, tea.Quit case "s": m.analyticsSortBy = (m.analyticsSortBy + 1) % analyticsSortCount + m.invalidateAnalyticsCache() case "/": m.analyticsFilter.active = true m.analyticsFilter.text = "" case "esc": if m.analyticsFilter.text != "" { m.analyticsFilter.text = "" + m.invalidateAnalyticsCache() } case "r": m = m.requestRefresh() @@ -496,14 +502,19 @@ func (m Model) handleAnalyticsFilterKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.analyticsFilter.active = false case "esc": m.analyticsFilter.active = false - m.analyticsFilter.text = "" + if m.analyticsFilter.text != "" { + m.analyticsFilter.text = "" + m.invalidateAnalyticsCache() + } case "backspace": if len(m.analyticsFilter.text) > 0 { m.analyticsFilter.text = m.analyticsFilter.text[:len(m.analyticsFilter.text)-1] + m.invalidateAnalyticsCache() } default: if len(msg.String()) == 1 { m.analyticsFilter.text += msg.String() + m.invalidateAnalyticsCache() } } return m, nil @@ -549,6 +560,7 @@ func (m Model) handleListKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.detailOffset = 0 m.detailTab = 0 m.tileOffset = 0 + m.invalidateDetailCache() } case "down", "j": if m.cursor < len(ids)-1 { @@ -556,18 +568,22 @@ func (m Model) handleListKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.detailOffset = 0 m.detailTab = 0 m.tileOffset = 0 + m.invalidateDetailCache() } case "pgdown", "ctrl+d": if len(ids) > 0 { m.cursor = clamp(m.cursor+pageStep, 0, len(ids)-1) + m.invalidateDetailCache() } case "pgup", "ctrl+u": if len(ids) > 0 { m.cursor = clamp(m.cursor-pageStep, 0, len(ids)-1) + m.invalidateDetailCache() } case "enter", "right", "l": m.mode = modeDetail m.detailOffset = 0 + m.invalidateDetailCache() case "/": m.filter.active = true m.filter.text = "" @@ -583,6 +599,7 @@ func (m Model) handleDetailKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { return m, tea.Quit case "esc", "left", "h", "backspace": m.mode = modeList + m.invalidateDetailCache() case "up", "k": if m.detailOffset > 0 { m.detailOffset-- @@ -597,13 +614,16 @@ func (m Model) handleDetailKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { if m.detailTab > 0 { m.detailTab-- m.detailOffset = 0 + m.invalidateDetailCache() } case "]": m.detailTab++ m.detailOffset = 0 + m.invalidateDetailCache() case "1", "2", "3", "4", "5", "6", "7", "8", "9": m.detailTab = int(msg.String()[0] - '1') m.detailOffset = 0 + m.invalidateDetailCache() } return m, nil } @@ -614,11 +634,13 @@ func (m Model) handleFilterKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.filter.active = false m.cursor = 0 m.tileOffset = 0 + m.invalidateDetailCache() case "esc": m.filter.text = "" m.filter.active = false m.cursor = 0 m.tileOffset = 0 + m.invalidateDetailCache() case "backspace": if len(m.filter.text) > 0 { m.filter.text = m.filter.text[:len(m.filter.text)-1] @@ -642,21 +664,25 @@ func (m Model) handleTilesKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { if m.cursor >= cols { m.cursor -= cols m.tileOffset = 0 + m.invalidateDetailCache() } case "down", "j": if m.cursor+cols < len(ids) { m.cursor += cols m.tileOffset = 0 + m.invalidateDetailCache() } case "left", "h": if m.cursor > 0 { m.cursor-- m.tileOffset = 0 + m.invalidateDetailCache() } case "right", "l": if m.cursor < len(ids)-1 { m.cursor++ m.tileOffset = 0 + m.invalidateDetailCache() } case "pgdown", "ctrl+d": if scrollModeWidget { @@ -684,6 +710,7 @@ func (m Model) handleTilesKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { case "enter": m.mode = modeDetail m.detailOffset = 0 + m.invalidateDetailCache() case "/": m.filter.active = true m.filter.text = "" @@ -692,6 +719,7 @@ func (m Model) handleTilesKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.filter.text = "" m.cursor = 0 m.tileOffset = 0 + m.invalidateDetailCache() } case "r": m = m.requestRefresh() diff --git a/internal/tui/render_cache.go b/internal/tui/render_cache.go new file mode 100644 index 0000000..7587379 --- /dev/null +++ b/internal/tui/render_cache.go @@ -0,0 +1,57 @@ +package tui + +import ( + "strconv" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +type detailRenderCacheEntry struct { + key string + content string +} + +func (m *Model) invalidateTileBodyCache() { + m.tileBodyCache = make(map[string][]string) +} + +func (m *Model) invalidateDetailCache() { + m.detailCache = detailRenderCacheEntry{} +} + +func (m *Model) invalidateRenderCaches() { + m.invalidateTileBodyCache() + m.invalidateAnalyticsCache() + m.invalidateDetailCache() +} + +func (m *Model) cachedDetailContent(id string, snap core.UsageSnapshot, w int, activeTab int) string { + key := strings.Join([]string{ + id, + snap.ProviderID, + strconv.Itoa(w), + strconv.Itoa(activeTab), + strconv.FormatInt(snap.Timestamp.UTC().UnixNano(), 10), + strconv.Itoa(len(snap.Metrics)), + strconv.Itoa(len(snap.DailySeries)), + strconv.Itoa(len(snap.ModelUsage)), + strconv.Itoa(len(snap.Resets)), + strconv.Itoa(len(snap.Attributes)), + strconv.Itoa(len(snap.Diagnostics)), + strconv.Itoa(len(snap.Raw)), + string(m.timeWindow), + strconv.FormatFloat(m.warnThreshold, 'f', 4, 64), + strconv.FormatFloat(m.critThreshold, 'f', 4, 64), + }, "|") + if m.detailCache.key == key { + return m.detailCache.content + } + + content := RenderDetailContent(snap, w, m.warnThreshold, m.critThreshold, activeTab) + m.detailCache = detailRenderCacheEntry{ + key: key, + content: content, + } + return content +} diff --git a/internal/tui/settings_modal.go b/internal/tui/settings_modal.go index fc3a253..66154af 100644 --- a/internal/tui/settings_modal.go +++ b/internal/tui/settings_modal.go @@ -5,7 +5,6 @@ import ( "os" "strings" - tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" ) @@ -93,346 +92,6 @@ func (m Model) settingsModalInfo() string { return info } -func (m Model) handleSettingsModalKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - if m.settings.apiKeyEditing { - return m.handleAPIKeyEditKey(msg) - } - - ids := m.settingsIDs() - if m.settings.tab == settingsTabAPIKeys { - ids = m.apiKeysTabIDs() - } - - switch msg.String() { - case "ctrl+c": - return m, tea.Quit - case "q", "esc", "backspace", ",", "S": - m.closeSettingsModal() - return m, nil - case "tab", "right", "]": - m.settings.tab = (m.settings.tab + 1) % settingsTabCount - m.settings.bodyOffset = 0 - m.resetSettingsCursorForTab() - return m, nil - case "shift+tab", "left", "[": - m.settings.tab = (m.settings.tab + settingsTabCount - 1) % settingsTabCount - m.settings.bodyOffset = 0 - m.resetSettingsCursorForTab() - return m, nil - case "r": - if m.settings.tab == settingsTabIntegrations { - m.refreshIntegrationStatuses() - m.settings.status = "integration status refreshed" - return m, nil - } - m = m.requestRefresh() - return m, nil - } - if len(msg.String()) == 1 { - key := msg.String()[0] - if key >= '1' && key <= '9' { - idx := int(key - '1') - if idx >= 0 && idx < int(settingsTabCount) { - m.settings.tab = settingsModalTab(idx) - m.settings.bodyOffset = 0 - m.resetSettingsCursorForTab() - return m, nil - } - } - } - - switch m.settings.tab { - case settingsTabProviders: - switch msg.String() { - case "up", "k": - if m.settings.cursor > 0 { - m.settings.cursor-- - } - case "down", "j": - if m.settings.cursor < len(ids)-1 { - m.settings.cursor++ - } - case "K", "shift+k", "shift+up", "ctrl+up", "alt+up": - cmd := m.moveSelectedProvider(ids, -1) - if cmd != nil { - return m, cmd - } - case "J", "shift+j", "shift+down", "ctrl+down", "alt+down": - cmd := m.moveSelectedProvider(ids, 1) - if cmd != nil { - return m, cmd - } - case " ", "enter": - if len(ids) == 0 { - return m, nil - } - id := ids[clamp(m.settings.cursor, 0, len(ids)-1)] - m.providerEnabled[id] = !m.isProviderEnabled(id) - m.rebuildSortedIDs() - m.settings.status = "saving settings..." - return m, m.persistDashboardPrefsCmd() - } - case settingsTabWidgetSections: - switch msg.String() { - case "up", "k": - if m.settings.sectionRowCursor > 0 { - m.settings.sectionRowCursor-- - } - case "down", "j": - entries := m.widgetSectionEntries() - if m.settings.sectionRowCursor < len(entries)-1 { - m.settings.sectionRowCursor++ - } - case "K", "shift+k", "shift+up", "ctrl+up", "alt+up": - cmd := m.moveSelectedWidgetSection(-1) - if cmd != nil { - return m, cmd - } - case "J", "shift+j", "shift+down", "ctrl+down", "alt+down": - cmd := m.moveSelectedWidgetSection(1) - if cmd != nil { - return m, cmd - } - case " ", "enter": - cmd := m.toggleSelectedWidgetSection() - if cmd != nil { - return m, cmd - } - case "h", "H": - m.hideSectionsWithNoData = !m.hideSectionsWithNoData - m.settings.status = "saving empty-state..." - return m, m.persistDashboardHideSectionsWithNoDataCmd() - case "pgup", "ctrl+u": - m.settings.previewOffset -= 4 - if m.settings.previewOffset < 0 { - m.settings.previewOffset = 0 - } - case "pgdown", "ctrl+d": - m.settings.previewOffset += 4 - } - case settingsTabTheme: - themes := AvailableThemes() - switch msg.String() { - case "up", "k": - if m.settings.themeCursor > 0 { - m.settings.themeCursor-- - } - case "down", "j": - if m.settings.themeCursor < len(themes)-1 { - m.settings.themeCursor++ - } - case " ", "enter": - if len(themes) == 0 { - return m, nil - } - m.settings.themeCursor = clamp(m.settings.themeCursor, 0, len(themes)-1) - name := themes[m.settings.themeCursor].Name - if SetThemeByName(name) { - m.settings.status = "saving theme..." - return m, m.persistThemeCmd(name) - } - } - case settingsTabView: - switch msg.String() { - case "up", "k": - if m.settings.viewCursor > 0 { - m.settings.viewCursor-- - } - case "down", "j": - if m.settings.viewCursor < len(dashboardViewOptions)-1 { - m.settings.viewCursor++ - } - case " ", "enter": - if len(dashboardViewOptions) == 0 { - return m, nil - } - selected := dashboardViewByIndex(m.settings.viewCursor) - m.setDashboardView(selected) - m.settings.viewCursor = dashboardViewIndex(selected) - m.settings.status = "saving view..." - return m, m.persistDashboardViewCmd() - } - case settingsTabAPIKeys: - switch msg.String() { - case "up", "k": - if m.settings.cursor > 0 { - m.settings.cursor-- - } - case "down", "j": - if m.settings.cursor < len(ids)-1 { - m.settings.cursor++ - } - case " ", "enter": - if len(ids) == 0 { - return m, nil - } - id := ids[clamp(m.settings.cursor, 0, len(ids)-1)] - providerID := providerForAccountID(id, m.accountProviders) - if isAPIKeyProvider(providerID) { - m.settings.apiKeyEditing = true - m.settings.apiKeyInput = "" - m.settings.apiKeyEditAccountID = id - m.settings.apiKeyStatus = "" - // Ensure the provider mapping exists (for unregistered providers) - m.accountProviders[id] = providerID - } - case "d": - if len(ids) == 0 { - return m, nil - } - id := ids[clamp(m.settings.cursor, 0, len(ids)-1)] - providerID := providerForAccountID(id, m.accountProviders) - if isAPIKeyProvider(providerID) { - m.settings.status = "deleting key..." - return m, m.deleteCredentialCmd(id) - } - } - case settingsTabTelemetry: - twCount := len(core.ValidTimeWindows) - switch msg.String() { - case "up", "k": - if m.settings.cursor > 0 { - m.settings.cursor-- - } - case "down", "j": - if m.settings.cursor < twCount-1 { - m.settings.cursor++ - } - case " ", "enter": - if m.settings.cursor >= 0 && m.settings.cursor < twCount { - tw := core.ValidTimeWindows[m.settings.cursor] - m = m.beginTimeWindowRefresh(tw) - m.settings.status = "saving time window..." - return m, m.persistTimeWindowCmd(string(tw)) - } - case "pgup", "ctrl+u": - m.settings.bodyOffset -= 4 - if m.settings.bodyOffset < 0 { - m.settings.bodyOffset = 0 - } - case "pgdown", "ctrl+d": - m.settings.bodyOffset += 4 - } - case settingsTabIntegrations: - switch msg.String() { - case "up", "k": - if m.settings.cursor > 0 { - m.settings.cursor-- - } - case "down", "j": - if m.settings.cursor < len(m.settings.integrationStatus)-1 { - m.settings.cursor++ - } - case "i", " ", "enter": - if len(m.settings.integrationStatus) == 0 { - return m, nil - } - cursor := clamp(m.settings.cursor, 0, len(m.settings.integrationStatus)-1) - entry := m.settings.integrationStatus[cursor] - m.settings.status = "installing integration..." - return m, m.installIntegrationCmd(entry.ID) - case "u": - if len(m.settings.integrationStatus) == 0 { - return m, nil - } - cursor := clamp(m.settings.cursor, 0, len(m.settings.integrationStatus)-1) - entry := m.settings.integrationStatus[cursor] - if !entry.NeedsUpgrade { - m.settings.status = "selected integration is already current" - return m, nil - } - m.settings.status = "upgrading integration..." - return m, m.installIntegrationCmd(entry.ID) - } - } - - return m, nil -} - -func (m *Model) moveSelectedProvider(ids []string, delta int) tea.Cmd { - if m == nil || len(ids) == 0 || delta == 0 { - return nil - } - cursor := clamp(m.settings.cursor, 0, len(ids)-1) - target := cursor + delta - if target < 0 || target >= len(ids) { - return nil - } - - id := ids[cursor] - swapID := ids[target] - currIdx := m.providerOrderIndex(id) - swapIdx := m.providerOrderIndex(swapID) - if currIdx < 0 || swapIdx < 0 { - return nil - } - - m.providerOrder[currIdx], m.providerOrder[swapIdx] = m.providerOrder[swapIdx], m.providerOrder[currIdx] - m.settings.cursor = target - m.rebuildSortedIDs() - m.settings.status = "saving order..." - return m.persistDashboardPrefsCmd() -} - -func (m *Model) moveSelectedWidgetSection(delta int) tea.Cmd { - if m == nil || delta == 0 { - return nil - } - entries := m.widgetSectionEntries() - if len(entries) == 0 { - return nil - } - - cursor := clamp(m.settings.sectionRowCursor, 0, len(entries)-1) - target := cursor + delta - if target < 0 || target >= len(entries) { - return nil - } - entries[cursor], entries[target] = entries[target], entries[cursor] - m.settings.sectionRowCursor = target - m.setWidgetSectionEntries(entries) - m.settings.status = "saving sections..." - return m.persistDashboardWidgetSectionsCmd() -} - -func (m *Model) toggleSelectedWidgetSection() tea.Cmd { - if m == nil { - return nil - } - entries := m.widgetSectionEntries() - if len(entries) == 0 { - return nil - } - cursor := clamp(m.settings.sectionRowCursor, 0, len(entries)-1) - entries[cursor].Enabled = !entries[cursor].Enabled - m.setWidgetSectionEntries(entries) - m.settings.status = "saving sections..." - return m.persistDashboardWidgetSectionsCmd() -} - -func (m *Model) resetSettingsCursorForTab() { - switch m.settings.tab { - case settingsTabTelemetry: - m.settings.cursor = m.currentTimeWindowIndex() - case settingsTabView: - m.settings.viewCursor = dashboardViewIndex(m.configuredDashboardView()) - case settingsTabWidgetSections: - m.settings.sectionRowCursor = 0 - m.settings.previewOffset = 0 - default: - m.settings.cursor = 0 - } -} - -func (m Model) currentTimeWindowIndex() int { - for i, tw := range core.ValidTimeWindows { - if tw == m.timeWindow { - return i - } - } - return 0 -} - func (m Model) renderSettingsProvidersBody(w, h int) string { ids := m.settingsIDs() @@ -1055,58 +714,3 @@ func (m Model) renderSettingsIntegrationsBody(w, h int) string { return padToSize(strings.Join(lines, "\n"), w, h) } - -func (m Model) handleAPIKeyEditKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { - switch msg.String() { - case "ctrl+c": - return m, tea.Quit - case "esc": - m.settings.apiKeyEditing = false - m.settings.apiKeyInput = "" - m.settings.apiKeyStatus = "" - return m, nil - case "enter": - if m.settings.apiKeyInput == "" || m.settings.apiKeyStatus == "validating..." { - return m, nil - } - id := m.settings.apiKeyEditAccountID - providerID := m.accountProviders[id] - m.settings.apiKeyStatus = "validating..." - return m, m.validateKeyCmd(id, providerID, m.settings.apiKeyInput) - case "backspace": - if len(m.settings.apiKeyInput) > 0 { - m.settings.apiKeyInput = m.settings.apiKeyInput[:len(m.settings.apiKeyInput)-1] - } - m.settings.apiKeyStatus = "" - return m, nil - default: - if msg.Type == tea.KeyRunes { - m.settings.apiKeyInput += string(msg.Runes) - m.settings.apiKeyStatus = "" - } - return m, nil - } -} - -func listWindow(total, cursor, visible int) (int, int) { - if total <= 0 { - return 0, 0 - } - if visible <= 0 || visible > total { - visible = total - } - - start := 0 - if cursor >= visible { - start = cursor - visible + 1 - } - end := start + visible - if end > total { - end = total - start = end - visible - if start < 0 { - start = 0 - } - } - return start, end -} diff --git a/internal/tui/settings_modal_input.go b/internal/tui/settings_modal_input.go new file mode 100644 index 0000000..40e6976 --- /dev/null +++ b/internal/tui/settings_modal_input.go @@ -0,0 +1,388 @@ +package tui + +import ( + "fmt" + + tea "github.com/charmbracelet/bubbletea" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (m Model) handleSettingsModalKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + if m.settings.apiKeyEditing { + return m.handleAPIKeyEditKey(msg) + } + + ids := m.settingsIDs() + if m.settings.tab == settingsTabAPIKeys { + ids = m.apiKeysTabIDs() + } + + switch msg.String() { + case "ctrl+c": + return m, tea.Quit + case "q", "esc", "backspace", ",", "S": + m.closeSettingsModal() + return m, nil + case "tab", "right", "]": + m.settings.tab = (m.settings.tab + 1) % settingsTabCount + m.settings.bodyOffset = 0 + m.resetSettingsCursorForTab() + return m, nil + case "shift+tab", "left", "[": + m.settings.tab = (m.settings.tab + settingsTabCount - 1) % settingsTabCount + m.settings.bodyOffset = 0 + m.resetSettingsCursorForTab() + return m, nil + case "r": + if m.settings.tab == settingsTabIntegrations { + m.refreshIntegrationStatuses() + m.settings.status = "integration status refreshed" + return m, nil + } + m = m.requestRefresh() + return m, nil + } + if len(msg.String()) == 1 { + key := msg.String()[0] + if key >= '1' && key <= '9' { + idx := int(key - '1') + if idx >= 0 && idx < int(settingsTabCount) { + m.settings.tab = settingsModalTab(idx) + m.settings.bodyOffset = 0 + m.resetSettingsCursorForTab() + return m, nil + } + } + } + + switch m.settings.tab { + case settingsTabProviders: + switch msg.String() { + case "up", "k": + if m.settings.cursor > 0 { + m.settings.cursor-- + } + case "down", "j": + if m.settings.cursor < len(ids)-1 { + m.settings.cursor++ + } + case "K", "shift+k", "shift+up", "ctrl+up", "alt+up": + cmd := m.moveSelectedProvider(ids, -1) + if cmd != nil { + return m, cmd + } + case "J", "shift+j", "shift+down", "ctrl+down", "alt+down": + cmd := m.moveSelectedProvider(ids, 1) + if cmd != nil { + return m, cmd + } + case " ", "enter": + if len(ids) == 0 { + return m, nil + } + id := ids[clamp(m.settings.cursor, 0, len(ids)-1)] + m.providerEnabled[id] = !m.isProviderEnabled(id) + m.rebuildSortedIDs() + m.settings.status = "saving settings..." + return m, m.persistDashboardPrefsCmd() + } + case settingsTabWidgetSections: + switch msg.String() { + case "up", "k": + if m.settings.sectionRowCursor > 0 { + m.settings.sectionRowCursor-- + } + case "down", "j": + entries := m.widgetSectionEntries() + if m.settings.sectionRowCursor < len(entries)-1 { + m.settings.sectionRowCursor++ + } + case "K", "shift+k", "shift+up", "ctrl+up", "alt+up": + cmd := m.moveSelectedWidgetSection(-1) + if cmd != nil { + return m, cmd + } + case "J", "shift+j", "shift+down", "ctrl+down", "alt+down": + cmd := m.moveSelectedWidgetSection(1) + if cmd != nil { + return m, cmd + } + case " ", "enter": + cmd := m.toggleSelectedWidgetSection() + if cmd != nil { + return m, cmd + } + case "h", "H": + m.hideSectionsWithNoData = !m.hideSectionsWithNoData + m.invalidateTileBodyCache() + m.settings.status = "saving empty-state..." + return m, m.persistDashboardHideSectionsWithNoDataCmd() + case "pgup", "ctrl+u": + m.settings.previewOffset -= 4 + if m.settings.previewOffset < 0 { + m.settings.previewOffset = 0 + } + case "pgdown", "ctrl+d": + m.settings.previewOffset += 4 + } + case settingsTabTheme: + themes := AvailableThemes() + switch msg.String() { + case "up", "k": + if m.settings.themeCursor > 0 { + m.settings.themeCursor-- + } + case "down", "j": + if m.settings.themeCursor < len(themes)-1 { + m.settings.themeCursor++ + } + case " ", "enter": + if len(themes) == 0 { + return m, nil + } + m.settings.themeCursor = clamp(m.settings.themeCursor, 0, len(themes)-1) + name := themes[m.settings.themeCursor].Name + if SetThemeByName(name) { + m.invalidateRenderCaches() + m.settings.status = "saving theme..." + return m, m.persistThemeCmd(name) + } + } + case settingsTabView: + switch msg.String() { + case "up", "k": + if m.settings.viewCursor > 0 { + m.settings.viewCursor-- + } + case "down", "j": + if m.settings.viewCursor < len(dashboardViewOptions)-1 { + m.settings.viewCursor++ + } + case " ", "enter": + if len(dashboardViewOptions) == 0 { + return m, nil + } + selected := dashboardViewByIndex(m.settings.viewCursor) + m.setDashboardView(selected) + m.settings.viewCursor = dashboardViewIndex(selected) + m.settings.status = "saving view..." + return m, m.persistDashboardViewCmd() + } + case settingsTabAPIKeys: + switch msg.String() { + case "up", "k": + if m.settings.cursor > 0 { + m.settings.cursor-- + } + case "down", "j": + if m.settings.cursor < len(ids)-1 { + m.settings.cursor++ + } + case "enter": + if len(ids) == 0 { + return m, nil + } + m.settings.cursor = clamp(m.settings.cursor, 0, len(ids)-1) + id := ids[m.settings.cursor] + m.settings.apiKeyEditing = true + m.settings.apiKeyEditAccountID = id + m.settings.apiKeyInput = "" + m.settings.apiKeyStatus = "" + return m, nil + case "d", "backspace": + if len(ids) == 0 { + return m, nil + } + m.settings.cursor = clamp(m.settings.cursor, 0, len(ids)-1) + id := ids[m.settings.cursor] + m.settings.apiKeyStatus = "deleting..." + return m, m.deleteCredentialCmd(id) + } + case settingsTabTelemetry: + switch msg.String() { + case "up", "k": + if m.settings.cursor > 0 { + m.settings.cursor-- + } + case "down", "j": + if m.settings.cursor < len(core.ValidTimeWindows)-1 { + m.settings.cursor++ + } + case " ", "enter", "w": + tws := core.ValidTimeWindows + if len(tws) == 0 { + return m, nil + } + idx := clamp(m.settings.cursor, 0, len(tws)-1) + selected := tws[idx] + m.settings.cursor = idx + m.settings.status = "saving time window..." + m = m.beginTimeWindowRefresh(selected) + return m, m.persistTimeWindowCmd(string(selected)) + } + case settingsTabIntegrations: + switch msg.String() { + case "up", "k": + if m.settings.cursor > 0 { + m.settings.cursor-- + } + case "down", "j": + if m.settings.cursor < len(m.settings.integrationStatus)-1 { + m.settings.cursor++ + } + case " ", "enter": + if len(m.settings.integrationStatus) == 0 { + return m, nil + } + selected := m.settings.integrationStatus[clamp(m.settings.cursor, 0, len(m.settings.integrationStatus)-1)] + m.settings.status = "installing integration..." + return m, m.installIntegrationCmd(selected.ID) + } + } + + return m, nil +} + +func (m *Model) moveSelectedProvider(ids []string, delta int) tea.Cmd { + if len(ids) == 0 || delta == 0 { + return nil + } + from := clamp(m.settings.cursor, 0, len(ids)-1) + to := from + delta + if to < 0 || to >= len(ids) { + return nil + } + + m.providerOrder = loMove(m.providerOrder, from, to) + m.settings.cursor = to + m.settings.status = fmt.Sprintf("moved %s", ids[to]) + m.rebuildSortedIDs() + return m.persistDashboardPrefsCmd() +} + +func (m *Model) moveSelectedWidgetSection(delta int) tea.Cmd { + if delta == 0 { + return nil + } + entries := m.widgetSectionEntries() + if len(entries) == 0 { + return nil + } + + from := clamp(m.settings.sectionRowCursor, 0, len(entries)-1) + to := from + delta + if to < 0 || to >= len(entries) { + return nil + } + + entries = loMove(entries, from, to) + m.setWidgetSectionEntries(entries) + m.settings.sectionRowCursor = to + m.settings.status = fmt.Sprintf("moved %s", entries[to].ID) + return m.persistDashboardWidgetSectionsCmd() +} + +func (m *Model) toggleSelectedWidgetSection() tea.Cmd { + entries := m.widgetSectionEntries() + if len(entries) == 0 { + return nil + } + idx := clamp(m.settings.sectionRowCursor, 0, len(entries)-1) + entries[idx].Enabled = !entries[idx].Enabled + m.setWidgetSectionEntries(entries) + m.settings.status = "saving sections..." + return m.persistDashboardWidgetSectionsCmd() +} + +func (m *Model) resetSettingsCursorForTab() { + switch m.settings.tab { + case settingsTabProviders, settingsTabAPIKeys, settingsTabIntegrations, settingsTabTelemetry: + m.settings.cursor = 0 + case settingsTabWidgetSections: + m.settings.sectionRowCursor = 0 + m.settings.previewOffset = 0 + case settingsTabTheme: + m.settings.themeCursor = clamp(ActiveThemeIndex(), 0, max(0, len(AvailableThemes())-1)) + case settingsTabView: + m.settings.viewCursor = dashboardViewIndex(m.configuredDashboardView()) + } +} + +func (m Model) currentTimeWindowIndex() int { + for i, tw := range core.ValidTimeWindows { + if tw == m.timeWindow { + return i + } + } + return 0 +} + +func (m Model) handleAPIKeyEditKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + switch msg.String() { + case "ctrl+c": + return m, tea.Quit + case "esc": + m.settings.apiKeyEditing = false + m.settings.apiKeyInput = "" + m.settings.apiKeyStatus = "" + return m, nil + case "enter": + if m.settings.apiKeyInput == "" || m.settings.apiKeyStatus == "validating..." { + return m, nil + } + id := m.settings.apiKeyEditAccountID + providerID := m.accountProviders[id] + m.settings.apiKeyStatus = "validating..." + return m, m.validateKeyCmd(id, providerID, m.settings.apiKeyInput) + case "backspace": + if len(m.settings.apiKeyInput) > 0 { + m.settings.apiKeyInput = m.settings.apiKeyInput[:len(m.settings.apiKeyInput)-1] + } + m.settings.apiKeyStatus = "" + return m, nil + default: + if msg.Type == tea.KeyRunes { + m.settings.apiKeyInput += string(msg.Runes) + m.settings.apiKeyStatus = "" + } + return m, nil + } +} + +func listWindow(total, cursor, visible int) (int, int) { + if total <= 0 { + return 0, 0 + } + if visible <= 0 || visible > total { + visible = total + } + + start := 0 + if cursor >= visible { + start = cursor - visible + 1 + } + end := start + visible + if end > total { + end = total + start = end - visible + if start < 0 { + start = 0 + } + } + return start, end +} + +func loMove[T any](items []T, from, to int) []T { + if from == to || from < 0 || from >= len(items) || to < 0 || to >= len(items) { + return items + } + out := append([]T(nil), items...) + item := out[from] + if from < to { + copy(out[from:to], out[from+1:to+1]) + } else { + copy(out[to+1:from+1], out[to:from]) + } + out[to] = item + return out +} From 9f9f0344916de9c6924c1955dc6d8f5eb10fc821 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Tue, 10 Mar 2026 09:25:07 +0100 Subject: [PATCH 29/32] fix: anchor analytics and tile dates to snapshot time --- internal/tui/analytics.go | 10 ++++++---- internal/tui/analytics_data.go | 8 ++++++++ internal/tui/tiles_header.go | 19 +++++++++++-------- 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/internal/tui/analytics.go b/internal/tui/analytics.go index 4db628d..4e7e775 100644 --- a/internal/tui/analytics.go +++ b/internal/tui/analytics.go @@ -515,7 +515,7 @@ func buildProviderDailyCostSeries(data costData) ([]BrailleSeries, int, int) { if gg, ok := groupByProvider[p.name]; ok { g = &gg } - pts, observed, estimated := deriveProviderDailyCostPoints(p, g) + pts, observed, estimated := deriveProviderDailyCostPoints(p, g, data.referenceTime) if !hasNonZeroData(pts) { continue } @@ -556,7 +556,7 @@ func buildProviderDailyCostSeries(data costData) ([]BrailleSeries, int, int) { return out, observedCount, estimatedCount } -func deriveProviderDailyCostPoints(p providerCostEntry, group *timeSeriesGroup) ([]core.TimePoint, bool, bool) { +func deriveProviderDailyCostPoints(p providerCostEntry, group *timeSeriesGroup, referenceTime time.Time) ([]core.TimePoint, bool, bool) { if group != nil { for _, key := range []string{"cost", "analytics_cost", "daily_cost"} { if pts, ok := group.series[key]; ok && hasNonZeroData(pts) { @@ -564,8 +564,10 @@ func deriveProviderDailyCostPoints(p providerCostEntry, group *timeSeriesGroup) } } } - now := time.Now() - nowDate := now.Format("2006-01-02") + if referenceTime.IsZero() { + referenceTime = time.Now() + } + nowDate := referenceTime.Format("2006-01-02") if p.todayCost > 0 { return []core.TimePoint{{Date: nowDate, Value: p.todayCost}}, true, false diff --git a/internal/tui/analytics_data.go b/internal/tui/analytics_data.go index 9647c18..0b5e2d6 100644 --- a/internal/tui/analytics_data.go +++ b/internal/tui/analytics_data.go @@ -5,6 +5,7 @@ import ( "slices" "sort" "strings" + "time" "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" @@ -25,6 +26,7 @@ type costData struct { totalOutput float64 providerCount int activeCount int + referenceTime time.Time providers []providerCostEntry models []modelCostEntry budgets []budgetEntry @@ -160,6 +162,9 @@ func extractCostData(snapshots map[string]core.UsageSnapshot, filter string) cos if snap.Status == core.StatusOK || snap.Status == core.StatusNearLimit { data.activeCount++ } + if snap.Timestamp.After(data.referenceTime) { + data.referenceTime = snap.Timestamp + } provColor := ProviderColor(snap.ProviderID) cost := extractProviderCost(snap) @@ -197,6 +202,9 @@ func extractCostData(snapshots map[string]core.UsageSnapshot, filter string) cos } data.models = aggregateCanonicalModels(data.providers) + if data.referenceTime.IsZero() { + data.referenceTime = time.Now() + } return data } diff --git a/internal/tui/tiles_header.go b/internal/tui/tiles_header.go index 2bff5e9..fe34a93 100644 --- a/internal/tui/tiles_header.go +++ b/internal/tui/tiles_header.go @@ -21,16 +21,16 @@ func buildTileHeaderMetaLines(snap core.UsageSnapshot, widget core.DashboardWidg func buildTileCyclePills(snap core.UsageSnapshot) []string { var pills []string - if pill := buildTileCyclePill("Billing", snapshotMeta(snap, "billing_cycle_start"), snapshotMeta(snap, "billing_cycle_end")); pill != "" { + if pill := buildTileCyclePill("Billing", snapshotMeta(snap, "billing_cycle_start"), snapshotMeta(snap, "billing_cycle_end"), snap.Timestamp); pill != "" { pills = append(pills, pill) } - if pill := buildTileCyclePill("Usage 5h", snapshotMeta(snap, "block_start"), snapshotMeta(snap, "block_end")); pill != "" { + if pill := buildTileCyclePill("Usage 5h", snapshotMeta(snap, "block_start"), snapshotMeta(snap, "block_end"), snap.Timestamp); pill != "" { pills = append(pills, pill) } return pills } -func buildTileCyclePill(label, startRaw, endRaw string) string { +func buildTileCyclePill(label, startRaw, endRaw string, referenceTime time.Time) string { start, hasStart := parseTileTimestamp(startRaw) end, hasEnd := parseTileTimestamp(endRaw) if !hasStart && !hasEnd { @@ -40,11 +40,11 @@ func buildTileCyclePill(label, startRaw, endRaw string) string { var span string switch { case hasStart && hasEnd: - span = fmt.Sprintf("%s→%s", formatTileTimestamp(start), formatTileTimestamp(end)) + span = fmt.Sprintf("%s→%s", formatTileTimestamp(start, referenceTime), formatTileTimestamp(end, referenceTime)) case hasEnd: - span = "ends " + formatTileTimestamp(end) + span = "ends " + formatTileTimestamp(end, referenceTime) default: - span = "since " + formatTileTimestamp(start) + span = "since " + formatTileTimestamp(start, referenceTime) } return lipgloss.NewStyle().Foreground(colorLavender).Bold(true).Render("◷ "+label) + @@ -81,8 +81,11 @@ func parseTileTimestamp(raw string) (time.Time, bool) { return time.Time{}, false } -func formatTileTimestamp(t time.Time) string { - now := time.Now() +func formatTileTimestamp(t, referenceTime time.Time) string { + now := referenceTime + if now.IsZero() { + now = time.Now() + } isDateOnly := t.Hour() == 0 && t.Minute() == 0 && t.Second() == 0 if isDateOnly { if t.Year() == now.Year() { From 45ff4f55c1f0aa280732eefa220e1f86bd4ac50b Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Tue, 10 Mar 2026 10:19:40 +0100 Subject: [PATCH 30/32] refactor: standardize lo-backed collection helpers --- internal/core/analytics_normalize.go | 4 +- internal/core/analytics_snapshot.go | 27 +++++----- internal/core/collections.go | 32 ++++++++++-- internal/core/dashboard_display_metrics.go | 6 +-- internal/daemon/accounts.go | 2 +- internal/daemon/provider_registry_hash.go | 18 +++---- internal/daemon/source_collectors.go | 52 +++++-------------- .../claude_code/conversation_usage.go | 9 ++-- .../providers/claude_code/local_helpers.go | 7 +-- internal/providers/codex/session_usage.go | 4 +- internal/providers/copilot/local_helpers.go | 10 +--- .../copilot/telemetry_session_file.go | 7 +-- .../providers/cursor/tracking_projection.go | 5 +- internal/providers/gemini_cli/api_usage.go | 5 +- .../providers/gemini_cli/session_usage.go | 15 ++---- internal/providers/shared/telemetry.go | 13 +---- internal/providers/zai/usage_extract.go | 15 ++---- internal/providers/zai/usage_helpers.go | 16 ++---- internal/providers/zai/zai.go | 12 +---- internal/telemetry/provider_links.go | 6 +-- internal/telemetry/quota_stream.go | 5 +- internal/telemetry/usage_view_queries.go | 4 +- internal/tui/analytics.go | 11 ++-- internal/tui/analytics_data.go | 4 +- internal/tui/charts.go | 7 +-- internal/tui/detail.go | 11 +--- internal/tui/detail_metrics.go | 4 +- internal/tui/model.go | 19 ++----- internal/tui/tiles_gauge.go | 5 +- internal/tui/tiles_metrics.go | 8 +-- 30 files changed, 112 insertions(+), 231 deletions(-) diff --git a/internal/core/analytics_normalize.go b/internal/core/analytics_normalize.go index 6cd1620..081747c 100644 --- a/internal/core/analytics_normalize.go +++ b/internal/core/analytics_normalize.go @@ -1,8 +1,6 @@ package core import ( - "maps" - "slices" "strings" "time" ) @@ -149,7 +147,7 @@ func normalizeSeriesPoints(points []TimePoint) []TimePoint { } agg[date] += p.Value } - keys := slices.Sorted(maps.Keys(agg)) + keys := SortedStringKeys(agg) out := make([]TimePoint, 0, len(keys)) for _, k := range keys { out = append(out, TimePoint{Date: k, Value: agg[k]}) diff --git a/internal/core/analytics_snapshot.go b/internal/core/analytics_snapshot.go index 93227b1..aa7188d 100644 --- a/internal/core/analytics_snapshot.go +++ b/internal/core/analytics_snapshot.go @@ -3,6 +3,8 @@ package core import ( "sort" "strings" + + "github.com/samber/lo" ) type AnalyticsModelUsageEntry struct { @@ -104,18 +106,17 @@ func ExtractAnalyticsModelUsage(s UsageSnapshot) []AnalyticsModelUsageEntry { } func ExtractAnalyticsModelSeries(series map[string][]TimePoint) []NamedSeries { - keys := make([]string, 0, len(series)) - for key := range series { + hasTokenSeries := hasAnalyticsTokenSeries(series) + keys := lo.Filter(SortedStringKeys(series), func(key string, _ int) bool { switch { case strings.HasPrefix(key, "tokens_"): - keys = append(keys, key) + return true case strings.HasPrefix(key, "usage_model_"): - if !hasAnalyticsTokenSeries(series) { - keys = append(keys, key) - } + return !hasTokenSeries + default: + return false } - } - sort.Strings(keys) + }) out := make([]NamedSeries, 0, len(keys)) for _, key := range keys { @@ -148,13 +149,9 @@ func SelectAnalyticsWeightSeries(series map[string][]TimePoint) []TimePoint { return named.Points } } - keys := make([]string, 0, len(series)) - for key := range series { - if strings.HasPrefix(key, "usage_client_") { - keys = append(keys, key) - } - } - sort.Strings(keys) + keys := lo.Filter(SortedStringKeys(series), func(key string, _ int) bool { + return strings.HasPrefix(key, "usage_client_") + }) for _, key := range keys { if len(series[key]) > 0 { return series[key] diff --git a/internal/core/collections.go b/internal/core/collections.go index 83c9d61..9b60090 100644 --- a/internal/core/collections.go +++ b/internal/core/collections.go @@ -1,17 +1,43 @@ package core import ( - "maps" - "slices" + "sort" "strings" + + "github.com/samber/lo" ) +func SortedCompactStrings(values []string) []string { + if len(values) == 0 { + return nil + } + compact := lo.FilterMap(values, func(value string, _ int) (string, bool) { + trimmed := strings.TrimSpace(value) + return trimmed, trimmed != "" + }) + if len(compact) == 0 { + return nil + } + result := lo.Uniq(compact) + sort.Strings(result) + return result +} + +func SortedStringKeys[V any](values map[string]V) []string { + if len(values) == 0 { + return nil + } + keys := lo.Keys(values) + sort.Strings(keys) + return keys +} + func SortedTimePoints(values map[string]float64) []TimePoint { if len(values) == 0 { return nil } - keys := slices.Sorted(maps.Keys(values)) + keys := SortedStringKeys(values) points := make([]TimePoint, 0, len(keys)) for _, key := range keys { if strings.TrimSpace(key) == "" { diff --git a/internal/core/dashboard_display_metrics.go b/internal/core/dashboard_display_metrics.go index 1932ed0..060a001 100644 --- a/internal/core/dashboard_display_metrics.go +++ b/internal/core/dashboard_display_metrics.go @@ -46,11 +46,7 @@ func ExtractRateLimitDisplayMetrics(metrics map[string]Metric) []RateLimitDispla } func FallbackDisplayMetricKeys(metrics map[string]Metric) []string { - keys := make([]string, 0, len(metrics)) - for key := range metrics { - keys = append(keys, key) - } - slices.Sort(keys) + keys := SortedStringKeys(metrics) if len(keys) == 0 { return nil } diff --git a/internal/daemon/accounts.go b/internal/daemon/accounts.go index eac6bf3..5c43261 100644 --- a/internal/daemon/accounts.go +++ b/internal/daemon/accounts.go @@ -222,7 +222,7 @@ func ReadModelRequestKey(req ReadModelRequest) string { } linkKeys = append(linkKeys, source+"="+target) } - sort.Strings(linkKeys) + linkKeys = core.SortedCompactStrings(linkKeys) var b strings.Builder b.Grow(128 + len(accounts)*32 + len(linkKeys)*24) diff --git a/internal/daemon/provider_registry_hash.go b/internal/daemon/provider_registry_hash.go index 4b1644c..f580ebd 100644 --- a/internal/daemon/provider_registry_hash.go +++ b/internal/daemon/provider_registry_hash.go @@ -3,10 +3,11 @@ package daemon import ( "crypto/sha256" "encoding/hex" - "sort" "strings" + "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers" + "github.com/samber/lo" ) // ProviderRegistryHash returns a stable fingerprint for the set of registered providers. @@ -16,21 +17,16 @@ func ProviderRegistryHash() string { return "" } - ids := make([]string, 0, len(all)) - for _, p := range all { - id := strings.TrimSpace(p.ID()) + ids := core.SortedCompactStrings(lo.Map(all, func(provider core.UsageProvider, _ int) string { + id := strings.TrimSpace(provider.ID()) if id == "" { - id = strings.TrimSpace(p.Spec().ID) + id = strings.TrimSpace(provider.Spec().ID) } - if id != "" { - ids = append(ids, id) - } - } + return id + })) if len(ids) == 0 { return "" } - - sort.Strings(ids) sum := sha256.Sum256([]byte(strings.Join(ids, ","))) return hex.EncodeToString(sum[:]) } diff --git a/internal/daemon/source_collectors.go b/internal/daemon/source_collectors.go index 77d2978..f044800 100644 --- a/internal/daemon/source_collectors.go +++ b/internal/daemon/source_collectors.go @@ -9,6 +9,7 @@ import ( "github.com/janekbaraniewski/openusage/internal/providers" "github.com/janekbaraniewski/openusage/internal/providers/shared" "github.com/janekbaraniewski/openusage/internal/telemetry" + "github.com/samber/lo" ) type sourceCollectorSpec struct { @@ -106,11 +107,7 @@ func resolveTelemetrySourceOptionsFromAccounts( func buildSourceCollectorSpecs(accounts []core.AccountConfig) ([]sourceCollectorSpec, []string) { providersBySource := telemetrySourcesBySystem() - sourceNames := make([]string, 0, len(providersBySource)) - for sourceName := range providersBySource { - sourceNames = append(sourceNames, sourceName) - } - sort.Strings(sourceNames) + sourceNames := core.SortedStringKeys(providersBySource) specs := make([]sourceCollectorSpec, 0, len(sourceNames)) var warnings []string @@ -127,17 +124,15 @@ func buildSourceCollectorSpecs(accounts []core.AccountConfig) ([]sourceCollector groups := make(map[string][]core.AccountConfig) groupOptions := make(map[string]shared.TelemetryCollectOptions) - groupKeys := make([]string, 0, len(candidates)) for _, acct := range candidates { opts := collectOptionsForAccount(source, acct) key := collectOptionsSignature(opts) if _, ok := groups[key]; !ok { - groupKeys = append(groupKeys, key) groupOptions[key] = opts } groups[key] = append(groups[key], acct) } - sort.Strings(groupKeys) + groupKeys := core.SortedStringKeys(groups) for _, key := range groupKeys { group := groups[key] @@ -151,11 +146,9 @@ func buildSourceCollectorSpecs(accounts []core.AccountConfig) ([]sourceCollector continue } - accountIDs := make([]string, 0, len(group)) - for _, acct := range group { - accountIDs = append(accountIDs, strings.TrimSpace(acct.ID)) - } - sort.Strings(accountIDs) + accountIDs := core.SortedCompactStrings(lo.Map(group, func(acct core.AccountConfig, _ int) string { + return acct.ID + })) delete(opts.Paths, "account_id") specs = append(specs, sourceCollectorSpec{ source: source, @@ -254,27 +247,12 @@ func cloneCollectOptions(in shared.TelemetryCollectOptions) shared.TelemetryColl } func collectOptionsSignature(opts shared.TelemetryCollectOptions) string { - pathKeys := make([]string, 0, len(opts.Paths)) - for key, value := range opts.Paths { - trimmedKey := strings.TrimSpace(key) - if trimmedKey == "" || trimmedKey == "account_id" { - continue - } - if strings.TrimSpace(value) == "" { - continue - } - pathKeys = append(pathKeys, trimmedKey) - } - sort.Strings(pathKeys) - - listKeys := make([]string, 0, len(opts.PathLists)) - for key, values := range opts.PathLists { - if strings.TrimSpace(key) == "" || len(values) == 0 { - continue - } - listKeys = append(listKeys, strings.TrimSpace(key)) - } - sort.Strings(listKeys) + pathKeys := lo.Filter(core.SortedStringKeys(opts.Paths), func(key string, _ int) bool { + return key != "account_id" && strings.TrimSpace(opts.Paths[key]) != "" + }) + listKeys := lo.Filter(core.SortedStringKeys(opts.PathLists), func(key string, _ int) bool { + return len(opts.PathLists[key]) > 0 + }) var b strings.Builder for _, key := range pathKeys { @@ -285,11 +263,7 @@ func collectOptionsSignature(opts shared.TelemetryCollectOptions) string { b.WriteByte(';') } for _, key := range listKeys { - values := append([]string{}, opts.PathLists[key]...) - for i := range values { - values[i] = strings.TrimSpace(values[i]) - } - sort.Strings(values) + values := core.SortedCompactStrings(opts.PathLists[key]) b.WriteString("l:") b.WriteString(key) b.WriteByte('=') diff --git a/internal/providers/claude_code/conversation_usage.go b/internal/providers/claude_code/conversation_usage.go index 1d77e7f..58d12dd 100644 --- a/internal/providers/claude_code/conversation_usage.go +++ b/internal/providers/claude_code/conversation_usage.go @@ -407,8 +407,7 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna if snap.DailySeries == nil { snap.DailySeries = make(map[string][]core.TimePoint) } - dates := lo.Keys(dailyTokenTotals) - sort.Strings(dates) + dates := core.SortedStringKeys(dailyTokenTotals) if len(snap.DailySeries["messages"]) == 0 && len(dates) > 0 { for _, d := range dates { @@ -656,8 +655,7 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna snap.Raw["jsonl_today_web_search_requests"] = fmt.Sprintf("%d", todayWebSearch) snap.Raw["jsonl_today_web_fetch_requests"] = fmt.Sprintf("%d", todayWebFetch) - models := lo.Keys(todayModels) - sort.Strings(models) + models := core.SortedStringKeys(todayModels) snap.Raw["jsonl_today_models"] = strings.Join(models, ", ") } @@ -708,8 +706,7 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna snap.Raw["block_start"] = currentBlockStart.Format(time.RFC3339) snap.Raw["block_end"] = currentBlockEnd.Format(time.RFC3339) - blockModelList := lo.Keys(blockModels) - sort.Strings(blockModelList) + blockModelList := core.SortedStringKeys(blockModels) snap.Raw["block_models"] = strings.Join(blockModelList, ", ") elapsed := now.Sub(currentBlockStart) diff --git a/internal/providers/claude_code/local_helpers.go b/internal/providers/claude_code/local_helpers.go index 21538d1..c00e10e 100644 --- a/internal/providers/claude_code/local_helpers.go +++ b/internal/providers/claude_code/local_helpers.go @@ -163,12 +163,7 @@ func extractToolPathCandidates(input any) []string { } walk(input, false) - out := make([]string, 0, len(candidates)) - for candidate := range candidates { - out = append(out, candidate) - } - sort.Strings(out) - return out + return core.SortedStringKeys(candidates) } func extractPathTokens(raw string) []string { diff --git a/internal/providers/codex/session_usage.go b/internal/providers/codex/session_usage.go index 17817cc..4a4376b 100644 --- a/internal/providers/codex/session_usage.go +++ b/internal/providers/codex/session_usage.go @@ -11,7 +11,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/shared" - "github.com/samber/lo" ) func (p *Provider) readLatestSession(sessionsDir string, snap *core.UsageSnapshot) error { @@ -1048,8 +1047,7 @@ func (p *Provider) readDailySessionCounts(sessionsDir string, snap *core.UsageSn return } - dates := lo.Keys(dayCounts) - sort.Strings(dates) + dates := core.SortedStringKeys(dayCounts) for _, d := range dates { snap.DailySeries["sessions"] = append(snap.DailySeries["sessions"], core.TimePoint{ diff --git a/internal/providers/copilot/local_helpers.go b/internal/providers/copilot/local_helpers.go index 8d145bd..bd392b1 100644 --- a/internal/providers/copilot/local_helpers.go +++ b/internal/providers/copilot/local_helpers.go @@ -127,8 +127,7 @@ func latestSeriesValue(m map[string]float64) (string, float64) { if len(m) == 0 { return "", 0 } - dates := lo.Keys(m) - sort.Strings(dates) + dates := core.SortedStringKeys(m) last := dates[len(dates)-1] return last, m[last] } @@ -441,12 +440,7 @@ func extractCopilotToolPaths(raw json.RawMessage) []string { } walk(payload, false) - out := make([]string, 0, len(candidates)) - for c := range candidates { - out = append(out, c) - } - sort.Strings(out) - return out + return core.SortedStringKeys(candidates) } func extractCopilotPathTokens(raw string) []string { diff --git a/internal/providers/copilot/telemetry_session_file.go b/internal/providers/copilot/telemetry_session_file.go index 9a4a1bd..0d62e4c 100644 --- a/internal/providers/copilot/telemetry_session_file.go +++ b/internal/providers/copilot/telemetry_session_file.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "os" - "sort" "strings" "time" @@ -616,11 +615,7 @@ func appendSessionShutdownEvents(out *[]shared.TelemetryEvent, state *copilotTel return } - models := make([]string, 0, len(shutdown.ModelMetrics)) - for model := range shutdown.ModelMetrics { - models = append(models, model) - } - sort.Strings(models) + models := core.SortedStringKeys(shutdown.ModelMetrics) for idx, model := range models { appendShutdownModelMetricEvent(out, state, lineNum, occurredAt, shutdown, model, idx) diff --git a/internal/providers/cursor/tracking_projection.go b/internal/providers/cursor/tracking_projection.go index 97e9735..59d39be 100644 --- a/internal/providers/cursor/tracking_projection.go +++ b/internal/providers/cursor/tracking_projection.go @@ -5,12 +5,10 @@ import ( "database/sql" "fmt" "math" - "sort" "strconv" "strings" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) func (p *Provider) readTrackingDB(ctx context.Context, dbPath string, snap *core.UsageSnapshot) error { @@ -398,8 +396,7 @@ func mapToSortedDailyPoints(byDay map[string]float64) []core.TimePoint { if len(byDay) == 0 { return nil } - days := lo.Keys(byDay) - sort.Strings(days) + days := core.SortedStringKeys(byDay) points := make([]core.TimePoint, 0, len(days)) for _, day := range days { points = append(points, core.TimePoint{Date: day, Value: byDay[day]}) diff --git a/internal/providers/gemini_cli/api_usage.go b/internal/providers/gemini_cli/api_usage.go index 6a094be..3c074a7 100644 --- a/internal/providers/gemini_cli/api_usage.go +++ b/internal/providers/gemini_cli/api_usage.go @@ -9,14 +9,12 @@ import ( "net/http" "net/url" "os" - "sort" "strconv" "strings" "time" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/providers/shared" - "github.com/samber/lo" ) func (p *Provider) fetchUsageFromAPI(ctx context.Context, snap *core.UsageSnapshot, creds oauthCreds, acct core.AccountConfig) error { @@ -358,8 +356,7 @@ func applyQuotaBuckets(snap *core.UsageSnapshot, buckets []bucketInfo) quotaAggr return result } - keys := lo.Keys(aggregates) - sort.Strings(keys) + keys := core.SortedStringKeys(aggregates) modelWorst := make(map[string]float64) var summary []string diff --git a/internal/providers/gemini_cli/session_usage.go b/internal/providers/gemini_cli/session_usage.go index f50be91..22f4e9d 100644 --- a/internal/providers/gemini_cli/session_usage.go +++ b/internal/providers/gemini_cli/session_usage.go @@ -4,10 +4,8 @@ import ( "bytes" "encoding/json" "fmt" - "maps" "os" "path/filepath" - "slices" "sort" "strings" "time" @@ -21,8 +19,8 @@ func mapKeysSorted(values map[string]bool) []string { if len(values) == 0 { return nil } - out := slices.Sorted(maps.Keys(values)) - return slices.DeleteFunc(out, func(key string) bool { return strings.TrimSpace(key) == "" }) + out := core.SortedStringKeys(values) + return lo.Filter(out, func(key string, _ int) bool { return strings.TrimSpace(key) != "" }) } func formatGeminiNameList(values []string, max int) string { @@ -920,12 +918,7 @@ func extractGeminiToolPaths(raw json.RawMessage) []string { } walk(payload, false) - out := make([]string, 0, len(candidates)) - for c := range candidates { - out = append(out, c) - } - sort.Strings(out) - return out + return core.SortedStringKeys(candidates) } func extractGeminiPathTokens(raw string) []string { @@ -1243,7 +1236,7 @@ func latestSeriesValue(values map[string]float64) (string, float64) { if len(values) == 0 { return "", 0 } - dates := slices.Sorted(maps.Keys(values)) + dates := core.SortedStringKeys(values) last := dates[len(dates)-1] return last, values[last] } diff --git a/internal/providers/shared/telemetry.go b/internal/providers/shared/telemetry.go index 43f3ea2..68f9ab1 100644 --- a/internal/providers/shared/telemetry.go +++ b/internal/providers/shared/telemetry.go @@ -5,7 +5,6 @@ import ( "errors" "os" "path/filepath" - "sort" "strconv" "strings" "time" @@ -213,10 +212,7 @@ func CollectFilesByExt(roots []string, exts map[string]bool) []string { } func uniqueStrings(in []string) []string { - trimmed := lo.Map(in, func(s string, _ int) string { return strings.TrimSpace(s) }) - result := lo.Uniq(lo.Compact(trimmed)) - sort.Strings(result) - return result + return core.SortedCompactStrings(in) } // ExtractFilePathsFromPayload walks a JSON-like structure and extracts file path @@ -254,12 +250,7 @@ func ExtractFilePathsFromPayload(input any) []string { } walk(input, false) - out := make([]string, 0, len(candidates)) - for candidate := range candidates { - out = append(out, candidate) - } - sort.Strings(out) - return out + return core.SortedStringKeys(candidates) } func extractPathTokens(raw string) []string { diff --git a/internal/providers/zai/usage_extract.go b/internal/providers/zai/usage_extract.go index f6d82e8..1d77d52 100644 --- a/internal/providers/zai/usage_extract.go +++ b/internal/providers/zai/usage_extract.go @@ -3,9 +3,10 @@ package zai import ( "encoding/json" "maps" - "sort" "strings" "time" + + "github.com/janekbaraniewski/openusage/internal/core" ) func extractUsageSamples(raw json.RawMessage, kind string) []usageSample { @@ -281,11 +282,7 @@ func extractUsageRows(v any) []map[string]any { return combined } - mapKeys := make([]string, 0, len(value)) - for key := range value { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) + mapKeys := core.SortedStringKeys(value) var all []map[string]any for _, key := range mapKeys { @@ -375,11 +372,7 @@ func extractCreditGrantRows(v any) []map[string]any { return rows } - keys := make([]string, 0, len(value)) - for key := range value { - keys = append(keys, key) - } - sort.Strings(keys) + keys := core.SortedStringKeys(value) for _, key := range keys { rows = append(rows, extractCreditGrantRows(value[key])...) } diff --git a/internal/providers/zai/usage_helpers.go b/internal/providers/zai/usage_helpers.go index 852f195..45ba095 100644 --- a/internal/providers/zai/usage_helpers.go +++ b/internal/providers/zai/usage_helpers.go @@ -11,6 +11,7 @@ import ( "time" "github.com/janekbaraniewski/openusage/internal/core" + "github.com/samber/lo" ) func captureEndpointPayload(snap *core.UsageSnapshot, endpoint string, body []byte) { @@ -121,11 +122,7 @@ func walkPayloadStats(path string, v any, numericByPath map[string]*payloadNumer if objectCount != nil { *objectCount = *objectCount + 1 } - keys := make([]string, 0, len(value)) - for key := range value { - keys = append(keys, key) - } - sort.Strings(keys) + keys := core.SortedStringKeys(value) for _, key := range keys { next := appendPayloadPath(path, key) walkPayloadStats(next, value[key], numericByPath, leafCount, objectCount, arrayCount) @@ -479,12 +476,7 @@ func accumulateUsageRollup(target map[string]*usageRollup, key string, sample us } func sortedUsageRollupKeys(values map[string]*usageRollup) []string { - keys := make([]string, 0, len(values)) - for key := range values { - keys = append(keys, key) - } - sort.Strings(keys) - return keys + return core.SortedStringKeys(values) } func summarizeShareUsage(values map[string]float64, maxItems int) string { @@ -722,7 +714,7 @@ func sanitizeMetricSlug(value string) string { } func clamp(value, minVal, maxVal float64) float64 { - return math.Min(math.Max(value, minVal), maxVal) + return lo.Clamp(value, minVal, maxVal) } func apiErrorMessage(err *apiError) string { diff --git a/internal/providers/zai/zai.go b/internal/providers/zai/zai.go index f4e7fa0..aff69bb 100644 --- a/internal/providers/zai/zai.go +++ b/internal/providers/zai/zai.go @@ -872,11 +872,7 @@ func applyModelUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { snap.Raw["activity_models"] = strconv.Itoa(len(modelTotals)) snap.SetAttribute("activity_models", strconv.Itoa(len(modelTotals))) - modelKeys := make([]string, 0, len(modelTotals)) - for k := range modelTotals { - modelKeys = append(modelKeys, k) - } - sort.Strings(modelKeys) + modelKeys := core.SortedStringKeys(modelTotals) for _, model := range modelKeys { stats := modelTotals[model] @@ -1111,11 +1107,7 @@ func applyToolUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { setUsedMetric(snap, "today_tool_calls", todayCalls, "calls", "today") setUsedMetric(snap, "7d_tool_calls", totalCalls, "calls", "7d") - keys := make([]string, 0, len(toolTotals)) - for tool := range toolTotals { - keys = append(keys, tool) - } - sort.Strings(keys) + keys := core.SortedStringKeys(toolTotals) for _, tool := range keys { stats := toolTotals[tool] slug := sanitizeMetricSlug(tool) diff --git a/internal/telemetry/provider_links.go b/internal/telemetry/provider_links.go index cc11b7f..8647a5f 100644 --- a/internal/telemetry/provider_links.go +++ b/internal/telemetry/provider_links.go @@ -1,9 +1,9 @@ package telemetry import ( - "maps" - "slices" "strings" + + "github.com/janekbaraniewski/openusage/internal/core" ) func normalizeProviderLinks(in map[string]string) map[string]string { @@ -38,5 +38,5 @@ func telemetrySourceProvidersForTarget(targetProvider string, links map[string]s } } - return slices.Sorted(maps.Keys(set)) + return core.SortedStringKeys(set) } diff --git a/internal/telemetry/quota_stream.go b/internal/telemetry/quota_stream.go index 958a7eb..a5d8d68 100644 --- a/internal/telemetry/quota_stream.go +++ b/internal/telemetry/quota_stream.go @@ -3,11 +3,9 @@ package telemetry import ( "context" "fmt" - "sort" "time" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) const providerSnapshotSchemaVersion = "provider_snapshot_v1" @@ -40,8 +38,7 @@ func BuildLimitSnapshotRequests(snaps map[string]core.UsageSnapshot) []IngestReq return nil } - accountIDs := lo.Keys(snaps) - sort.Strings(accountIDs) + accountIDs := core.SortedStringKeys(snaps) out := make([]IngestRequest, 0, len(accountIDs)) for _, accountID := range accountIDs { diff --git a/internal/telemetry/usage_view_queries.go b/internal/telemetry/usage_view_queries.go index d3f7ad5..b2ddaf4 100644 --- a/internal/telemetry/usage_view_queries.go +++ b/internal/telemetry/usage_view_queries.go @@ -670,7 +670,5 @@ func normalizeProviderIDs(in []string) []string { normalized := lo.Map(in, func(s string, _ int) string { return strings.ToLower(strings.TrimSpace(s)) }) - result := lo.Uniq(lo.Compact(normalized)) - sort.Strings(result) - return result + return core.SortedCompactStrings(normalized) } diff --git a/internal/tui/analytics.go b/internal/tui/analytics.go index 4e7e775..3ea5556 100644 --- a/internal/tui/analytics.go +++ b/internal/tui/analytics.go @@ -2,16 +2,13 @@ package tui import ( "fmt" - "maps" "math" - "slices" "sort" "strings" "time" "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) func (m Model) renderAnalyticsContent(w, h int) string { @@ -617,8 +614,7 @@ func aggregateSeriesByDate(series []BrailleSeries) []core.TimePoint { if len(byDate) == 0 { return nil } - dates := lo.Keys(byDate) - sort.Strings(dates) + dates := core.SortedStringKeys(byDate) out := make([]core.TimePoint, 0, len(dates)) for _, d := range dates { out = append(out, core.TimePoint{Date: d, Value: byDate[d]}) @@ -735,8 +731,7 @@ func buildProviderModelHeatmapSpec(data costData, maxRows int, lastDays int) (He rows = rows[:maxRows] } - dates := lo.Keys(dateSet) - sort.Strings(dates) + dates := core.SortedStringKeys(dateSet) dates = clipDatesToRecent(dates, lastDays) labels := make([]string, len(rows)) @@ -1033,5 +1028,5 @@ func truncStr(s string, maxLen int) string { } func sortedMetricKeys(m map[string]core.Metric) []string { - return slices.Sorted(maps.Keys(m)) + return core.SortedStringKeys(m) } diff --git a/internal/tui/analytics_data.go b/internal/tui/analytics_data.go index 0b5e2d6..f64e066 100644 --- a/internal/tui/analytics_data.go +++ b/internal/tui/analytics_data.go @@ -1,8 +1,6 @@ package tui import ( - "maps" - "slices" "sort" "strings" "time" @@ -147,7 +145,7 @@ func extractCostData(snapshots map[string]core.UsageSnapshot, filter string) cos data.snapshots = snapshots lowerFilter := strings.ToLower(filter) - keys := slices.Sorted(maps.Keys(snapshots)) + keys := core.SortedStringKeys(snapshots) for _, k := range keys { snap := snapshots[k] diff --git a/internal/tui/charts.go b/internal/tui/charts.go index 9759f20..7f49bf5 100644 --- a/internal/tui/charts.go +++ b/internal/tui/charts.go @@ -9,7 +9,6 @@ import ( "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) type chartItem struct { @@ -480,8 +479,7 @@ func RenderBrailleChart(title string, series []BrailleSeries, w, h int, yFmt fun } } - allDates := lo.Keys(dateSet) - sort.Strings(allDates) + allDates := core.SortedStringKeys(dateSet) startIdx, endIdx := 0, len(allDates)-1 for startIdx < endIdx && !dateHasNonZero[allDates[startIdx]] { @@ -1062,8 +1060,7 @@ func alignSeriesByDate(series []BrailleSeries, continuous bool) ([]string, [][]f dateSet[p.Date] = true } } - dates := lo.Keys(dateSet) - sort.Strings(dates) + dates := core.SortedStringKeys(dateSet) if len(dates) == 0 { return nil, nil } diff --git a/internal/tui/detail.go b/internal/tui/detail.go index a4441dc..fc0d7cb 100644 --- a/internal/tui/detail.go +++ b/internal/tui/detail.go @@ -3,14 +3,12 @@ package tui import ( "fmt" "math" - "sort" "strconv" "strings" "time" "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) type DetailTab int @@ -386,11 +384,7 @@ func renderInfoSection(sb *strings.Builder, snap core.UsageSnapshot, widget core // renderKeyValuePairs renders a sorted key-value map with consistent formatting. func renderKeyValuePairs(sb *strings.Builder, data map[string]string, labelW, maxValW int, vs lipgloss.Style) { - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) + keys := core.SortedStringKeys(data) for _, k := range keys { v := smartFormatValue(data[k]) @@ -446,8 +440,7 @@ func renderRawData(sb *strings.Builder, raw map[string]string, widget core.Dashb } } - keys := lo.Keys(raw) - sort.Strings(keys) + keys := core.SortedStringKeys(raw) for _, k := range keys { if rendered[k] || strings.HasSuffix(k, "_error") { diff --git a/internal/tui/detail_metrics.go b/internal/tui/detail_metrics.go index 08197af..fa05662 100644 --- a/internal/tui/detail_metrics.go +++ b/internal/tui/detail_metrics.go @@ -8,7 +8,6 @@ import ( "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) type metricGroup struct { @@ -187,8 +186,7 @@ func renderTimersSection(sb *strings.Builder, resets map[string]time.Time, widge labelW := sectionLabelWidth(w) renderDetailSectionHeader(sb, "Timers", w) - timerKeys := lo.Keys(resets) - sort.Strings(timerKeys) + timerKeys := core.SortedStringKeys(resets) for _, k := range timerKeys { t := resets[k] diff --git a/internal/tui/model.go b/internal/tui/model.go index 741e252..fd9bcac 100644 --- a/internal/tui/model.go +++ b/internal/tui/model.go @@ -2,7 +2,6 @@ package tui import ( "fmt" - "sort" "strings" "time" @@ -917,8 +916,7 @@ func (m *Model) ensureSnapshotProvidersKnown() { if len(m.snapshots) == 0 { return } - keys := lo.Keys(m.snapshots) - sort.Strings(keys) + keys := core.SortedStringKeys(m.snapshots) for _, id := range keys { if m.providerOrderIndex(id) >= 0 { @@ -1077,9 +1075,7 @@ func (m Model) telemetryUnmappedProviders() []string { } } - out := lo.Keys(seen) - sort.Strings(out) - return out + return core.SortedStringKeys(seen) } func (m Model) telemetryProviderLinkHints() []string { @@ -1092,9 +1088,7 @@ func (m Model) telemetryProviderLinkHints() []string { seen[hint] = true } - out := lo.Keys(seen) - sort.Strings(out) - return out + return core.SortedStringKeys(seen) } func (m Model) configuredProviderIDs() []string { @@ -1115,9 +1109,7 @@ func (m Model) configuredProviderIDs() []string { seen[providerID] = true } - out := lo.Keys(seen) - sort.Strings(out) - return out + return core.SortedStringKeys(seen) } func (m *Model) refreshIntegrationStatuses() { @@ -1170,10 +1162,9 @@ func (m *Model) rebuildSortedIDs() { seen[id] = true } - extra := lo.Filter(lo.Keys(m.snapshots), func(id string, _ int) bool { + extra := lo.Filter(core.SortedStringKeys(m.snapshots), func(id string, _ int) bool { return !seen[id] && m.isProviderEnabled(id) }) - sort.Strings(extra) m.sortedIDs = append(ordered, extra...) if m.cursor >= len(m.sortedIDs) { diff --git a/internal/tui/tiles_gauge.go b/internal/tui/tiles_gauge.go index 4c9ce05..fdbc114 100644 --- a/internal/tui/tiles_gauge.go +++ b/internal/tui/tiles_gauge.go @@ -1,13 +1,11 @@ package tui import ( - "sort" "strconv" "strings" "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) func (m Model) buildTileGaugeLines(snap core.UsageSnapshot, widget core.DashboardWidget, innerW int) []string { @@ -26,8 +24,7 @@ func (m Model) buildTileGaugeLines(snap core.UsageSnapshot, widget core.Dashboar return m.buildGaugeShimmerLines(widget, maxLabelW, gaugeW, maxLines) } - keys := lo.Keys(snap.Metrics) - sort.Strings(keys) + keys := core.SortedStringKeys(snap.Metrics) keys = prioritizeMetricKeys(keys, widget.GaugePriority) // When GaugePriority is set, treat it as an allowlist — only those diff --git a/internal/tui/tiles_metrics.go b/internal/tui/tiles_metrics.go index f296e89..7ea354c 100644 --- a/internal/tui/tiles_metrics.go +++ b/internal/tui/tiles_metrics.go @@ -3,12 +3,10 @@ package tui import ( "fmt" "slices" - "sort" "strings" "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/samber/lo" ) type compactMetricRowSpec struct { @@ -116,8 +114,7 @@ func collectCompactMetricSegments(spec compactMetricRowSpec, widget core.Dashboa } if spec.match != nil && len(segments) < maxSegments { - keys := lo.Keys(metrics) - sort.Strings(keys) + keys := core.SortedStringKeys(metrics) for _, key := range keys { if len(segments) >= maxSegments { break @@ -310,8 +307,7 @@ func (m Model) buildTileMetricLines(snap core.UsageSnapshot, widget core.Dashboa return nil } - keys := lo.Keys(snap.Metrics) - sort.Strings(keys) + keys := core.SortedStringKeys(snap.Metrics) maxLabel := innerW/2 - 1 if maxLabel < 8 { From 663de119cd99527141a9fbfe5f8db18366368121 Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Tue, 10 Mar 2026 10:51:09 +0100 Subject: [PATCH 31/32] refactor: split tui render surfaces and add runtime hints --- internal/core/provider.go | 39 +- internal/detect/codex.go | 3 + internal/detect/detect.go | 7 + internal/detect/ollama.go | 5 + internal/providers/codex/codex.go | 9 +- internal/providers/codex/live_usage.go | 8 +- internal/providers/gemini_cli/api_usage.go | 4 +- internal/providers/gemini_cli/gemini_cli.go | 5 +- internal/providers/ollama/cloud_api.go | 6 +- internal/providers/ollama/local_paths.go | 31 +- internal/tui/detail.go | 354 ----------- internal/tui/detail_format.go | 219 +++++++ internal/tui/detail_info.go | 119 ++++ internal/tui/model.go | 531 ----------------- internal/tui/model_panels.go | 257 ++++++++ internal/tui/model_view.go | 242 ++++++++ internal/tui/settings_modal.go | 628 -------------------- internal/tui/settings_modal_preferences.go | 270 +++++++++ internal/tui/settings_modal_sections.go | 186 ++++++ 19 files changed, 1368 insertions(+), 1555 deletions(-) create mode 100644 internal/tui/detail_format.go create mode 100644 internal/tui/detail_info.go create mode 100644 internal/tui/model_panels.go create mode 100644 internal/tui/model_view.go create mode 100644 internal/tui/settings_modal_preferences.go create mode 100644 internal/tui/settings_modal_sections.go diff --git a/internal/core/provider.go b/internal/core/provider.go index 66ab8fe..d962bb8 100644 --- a/internal/core/provider.go +++ b/internal/core/provider.go @@ -32,12 +32,13 @@ type AccountConfig struct { // should use ProviderPaths through Path/SetPath helpers. Paths map[string]string `json:"paths,omitempty"` - Token string `json:"-"` // runtime-only: access token (never persisted) - ExtraData map[string]string `json:"-"` // runtime-only: extra detection data (never persisted) + Token string `json:"-"` // runtime-only: access token (never persisted) + RuntimeHints map[string]string `json:"-"` // runtime-only: non-persisted local/runtime hints + ExtraData map[string]string `json:"-"` // runtime-only: extra detection metadata (never persisted) } // Path returns the named provider-specific path. It checks ProviderPaths first, -// then legacy Paths, then ExtraData (for backward compat with detect), then the fallback. +// then legacy Paths, then runtime hints, then legacy ExtraData fallbacks, then the fallback. func (c AccountConfig) Path(key, fallback string) string { if c.ProviderPaths != nil { if v, ok := c.ProviderPaths[key]; ok && v != "" { @@ -49,6 +50,11 @@ func (c AccountConfig) Path(key, fallback string) string { return v } } + if c.RuntimeHints != nil { + if v, ok := c.RuntimeHints[key]; ok && v != "" { + return v + } + } if c.ExtraData != nil { if v, ok := c.ExtraData[key]; ok && v != "" { return v @@ -71,6 +77,33 @@ func (c *AccountConfig) SetPath(key, value string) { c.ProviderPaths[key] = strings.TrimSpace(value) } +func (c AccountConfig) Hint(key, fallback string) string { + if c.RuntimeHints != nil { + if v, ok := c.RuntimeHints[key]; ok && v != "" { + return v + } + } + if c.ExtraData != nil { + if v, ok := c.ExtraData[key]; ok && v != "" { + return v + } + } + if fallback != "" { + return fallback + } + return "" +} + +func (c *AccountConfig) SetHint(key, value string) { + if c == nil || strings.TrimSpace(key) == "" || strings.TrimSpace(value) == "" { + return + } + if c.RuntimeHints == nil { + c.RuntimeHints = make(map[string]string) + } + c.RuntimeHints[strings.TrimSpace(key)] = strings.TrimSpace(value) +} + // PathMap returns a merged copy of provider-local paths, preferring // ProviderPaths over legacy Paths. func (c AccountConfig) PathMap() map[string]string { diff --git a/internal/detect/codex.go b/internal/detect/codex.go index 0b0cd9b..6561354 100644 --- a/internal/detect/codex.go +++ b/internal/detect/codex.go @@ -51,13 +51,16 @@ func detectCodex(result *Result) { ExtraData: make(map[string]string), } + acct.SetHint("config_dir", configDir) acct.ExtraData["config_dir"] = configDir if hasSessions { + acct.SetHint("sessions_dir", sessionsDir) acct.ExtraData["sessions_dir"] = sessionsDir } if hasAuth { + acct.SetHint("auth_file", authFile) acct.ExtraData["auth_file"] = authFile email, accountID, planType := extractCodexAuth(authFile) if email != "" { diff --git a/internal/detect/detect.go b/internal/detect/detect.go index f4cda09..77ab650 100644 --- a/internal/detect/detect.go +++ b/internal/detect/detect.go @@ -259,6 +259,10 @@ func detectGHCopilot(result *Result) { "copilot_binary": copilotBin, "config_dir": copilotDir, }, + RuntimeHints: map[string]string{ + "copilot_binary": copilotBin, + "config_dir": copilotDir, + }, }) } @@ -306,6 +310,7 @@ func detectGeminiCLI(result *Result) { Binary: bin, ExtraData: make(map[string]string), } + acct.SetHint("config_dir", configDir) acct.ExtraData["config_dir"] = configDir if hasAccounts { @@ -321,9 +326,11 @@ func detectGeminiCLI(result *Result) { } if v := os.Getenv("GOOGLE_CLOUD_PROJECT"); v != "" { + acct.SetHint("project_id", v) acct.ExtraData["project_id"] = v log.Printf("[detect] Gemini CLI project from GOOGLE_CLOUD_PROJECT: %s", v) } else if v := os.Getenv("GOOGLE_CLOUD_PROJECT_ID"); v != "" { + acct.SetHint("project_id", v) acct.ExtraData["project_id"] = v log.Printf("[detect] Gemini CLI project from GOOGLE_CLOUD_PROJECT_ID: %s", v) } diff --git a/internal/detect/ollama.go b/internal/detect/ollama.go index df8d9cb..1e0edfc 100644 --- a/internal/detect/ollama.go +++ b/internal/detect/ollama.go @@ -45,16 +45,21 @@ func detectOllama(result *Result) { ExtraData: make(map[string]string), } + acct.SetHint("config_dir", configDir) + acct.SetHint("cloud_base_url", "https://ollama.com") acct.ExtraData["config_dir"] = configDir acct.ExtraData["cloud_base_url"] = "https://ollama.com" if fileExists(dbPath) { + acct.SetHint("db_path", dbPath) acct.ExtraData["db_path"] = dbPath } if dirExists(logsDir) { + acct.SetHint("logs_dir", logsDir) acct.ExtraData["logs_dir"] = logsDir } if fileExists(serverConfig) { + acct.SetHint("server_config", serverConfig) acct.ExtraData["server_config"] = serverConfig } diff --git a/internal/providers/codex/codex.go b/internal/providers/codex/codex.go index e404788..5d91c8b 100644 --- a/internal/providers/codex/codex.go +++ b/internal/providers/codex/codex.go @@ -198,10 +198,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa DailySeries: make(map[string][]core.TimePoint), } - configDir := "" - if acct.ExtraData != nil { - configDir = acct.ExtraData["config_dir"] - } + configDir := acct.Hint("config_dir", "") if configDir == "" { home, _ := os.UserHomeDir() if home != "" { @@ -218,8 +215,8 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa var hasLocalData bool sessionsDir := filepath.Join(configDir, "sessions") - if acct.ExtraData != nil && acct.ExtraData["sessions_dir"] != "" { - sessionsDir = acct.ExtraData["sessions_dir"] + if override := acct.Hint("sessions_dir", ""); override != "" { + sessionsDir = override } if err := p.readLatestSession(sessionsDir, &snap); err != nil { diff --git a/internal/providers/codex/live_usage.go b/internal/providers/codex/live_usage.go index 4af80f6..2cc5ebf 100644 --- a/internal/providers/codex/live_usage.go +++ b/internal/providers/codex/live_usage.go @@ -19,8 +19,8 @@ import ( func (p *Provider) fetchLiveUsage(ctx context.Context, acct core.AccountConfig, configDir string, snap *core.UsageSnapshot) (bool, error) { authPath := filepath.Join(configDir, "auth.json") - if acct.ExtraData != nil && acct.ExtraData["auth_file"] != "" { - authPath = acct.ExtraData["auth_file"] + if override := acct.Hint("auth_file", ""); override != "" { + authPath = override } data, err := os.ReadFile(authPath) @@ -48,8 +48,8 @@ func (p *Provider) fetchLiveUsage(ctx context.Context, acct core.AccountConfig, req.Header.Set("Accept", "application/json") accountID := core.FirstNonEmpty(auth.Tokens.AccountID, auth.AccountID) - if accountID == "" && acct.ExtraData != nil { - accountID = acct.ExtraData["account_id"] + if accountID == "" { + accountID = acct.Hint("account_id", "") } if accountID != "" { req.Header.Set("ChatGPT-Account-Id", accountID) diff --git a/internal/providers/gemini_cli/api_usage.go b/internal/providers/gemini_cli/api_usage.go index 3c074a7..e6f496e 100644 --- a/internal/providers/gemini_cli/api_usage.go +++ b/internal/providers/gemini_cli/api_usage.go @@ -33,8 +33,8 @@ func (p *Provider) fetchUsageFromAPI(ctx context.Context, snap *core.UsageSnapsh } else if v := os.Getenv("GOOGLE_CLOUD_PROJECT_ID"); v != "" { projectID = v } - if projectID == "" && acct.ExtraData != nil { - projectID = acct.ExtraData["project_id"] + if projectID == "" { + projectID = acct.Hint("project_id", "") } loadResp, err := loadCodeAssistDetails(ctx, accessToken, projectID, client) diff --git a/internal/providers/gemini_cli/gemini_cli.go b/internal/providers/gemini_cli/gemini_cli.go index fbbdb93..616a5ee 100644 --- a/internal/providers/gemini_cli/gemini_cli.go +++ b/internal/providers/gemini_cli/gemini_cli.go @@ -242,10 +242,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa DailySeries: make(map[string][]core.TimePoint), } - configDir := "" - if acct.ExtraData != nil { - configDir = acct.ExtraData["config_dir"] - } + configDir := acct.Hint("config_dir", "") if configDir == "" { home, _ := os.UserHomeDir() if home != "" { diff --git a/internal/providers/ollama/cloud_api.go b/internal/providers/ollama/cloud_api.go index 78ddbd0..3959d95 100644 --- a/internal/providers/ollama/cloud_api.go +++ b/internal/providers/ollama/cloud_api.go @@ -314,10 +314,8 @@ func resolveCloudBaseURL(acct core.AccountConfig) string { return strings.TrimRight(u.String(), "/") } - if acct.ExtraData != nil { - if v := strings.TrimSpace(acct.ExtraData["cloud_base_url"]); v != "" { - return normalize(v) - } + if v := strings.TrimSpace(acct.Hint("cloud_base_url", "")); v != "" { + return normalize(v) } if strings.HasPrefix(strings.ToLower(acct.BaseURL), "https://") && strings.Contains(strings.ToLower(acct.BaseURL), "ollama.com") { return normalize(acct.BaseURL) diff --git a/internal/providers/ollama/local_paths.go b/internal/providers/ollama/local_paths.go index 7c252df..9f4831a 100644 --- a/internal/providers/ollama/local_paths.go +++ b/internal/providers/ollama/local_paths.go @@ -11,11 +11,9 @@ import ( ) func resolveDesktopDBPath(acct core.AccountConfig) string { - if acct.ExtraData != nil { - for _, key := range []string{"db_path", "app_db"} { - if v := strings.TrimSpace(acct.ExtraData[key]); v != "" { - return v - } + for _, key := range []string{"db_path", "app_db"} { + if v := strings.TrimSpace(acct.Hint(key, "")); v != "" { + return v } } @@ -50,13 +48,11 @@ func resolveDesktopDBPath(acct core.AccountConfig) string { } func resolveServerConfigPath(acct core.AccountConfig) string { - if acct.ExtraData != nil { - if v := strings.TrimSpace(acct.ExtraData["server_config"]); v != "" { - return v - } - if configDir := strings.TrimSpace(acct.ExtraData["config_dir"]); configDir != "" { - return filepath.Join(configDir, "server.json") - } + if v := strings.TrimSpace(acct.Hint("server_config", "")); v != "" { + return v + } + if configDir := strings.TrimSpace(acct.Hint("config_dir", "")); configDir != "" { + return filepath.Join(configDir, "server.json") } home, err := os.UserHomeDir() @@ -67,13 +63,10 @@ func resolveServerConfigPath(acct core.AccountConfig) string { } func resolveServerLogFiles(acct core.AccountConfig) []string { - logDir := "" - if acct.ExtraData != nil { - logDir = strings.TrimSpace(acct.ExtraData["logs_dir"]) - if logDir == "" { - if configDir := strings.TrimSpace(acct.ExtraData["config_dir"]); configDir != "" { - logDir = filepath.Join(configDir, "logs") - } + logDir := strings.TrimSpace(acct.Hint("logs_dir", "")) + if logDir == "" { + if configDir := strings.TrimSpace(acct.Hint("config_dir", "")); configDir != "" { + logDir = filepath.Join(configDir, "logs") } } if logDir == "" { diff --git a/internal/tui/detail.go b/internal/tui/detail.go index fc0d7cb..77ad294 100644 --- a/internal/tui/detail.go +++ b/internal/tui/detail.go @@ -2,8 +2,6 @@ package tui import ( "fmt" - "math" - "strconv" "strings" "time" @@ -309,355 +307,3 @@ func renderDetailHeader(sb *strings.Builder, snap core.UsageSnapshot, w int) { sb.WriteString(card) sb.WriteString("\n") } - -func wrapTags(tags []string, maxWidth int) []string { - if len(tags) == 0 { - return nil - } - var rows []string - currentRow := "" - currentW := 0 - sep := " " - sepW := 1 - - for _, tag := range tags { - tagW := lipgloss.Width(tag) - if currentW > 0 && currentW+sepW+tagW > maxWidth { - rows = append(rows, currentRow) - currentRow = tag - currentW = tagW - } else { - if currentW > 0 { - currentRow += sep - currentW += sepW - } - currentRow += tag - currentW += tagW - } - } - if currentRow != "" { - rows = append(rows, currentRow) - } - return rows -} - -func titleCase(s string) string { - if len(s) <= 1 { - return s - } - return strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) -} - -// renderInfoSection renders Attributes, Diagnostics, and Raw as separate sub-sections. -func renderInfoSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { - labelW := sectionLabelWidth(w) - maxValW := w - labelW - 6 - if maxValW < 20 { - maxValW = 20 - } - if maxValW > 45 { - maxValW = 45 - } - - if len(snap.Attributes) > 0 { - renderDetailSectionHeader(sb, "Attributes", w) - renderKeyValuePairs(sb, snap.Attributes, labelW, maxValW, valueStyle) - } - - if len(snap.Diagnostics) > 0 { - if len(snap.Attributes) > 0 { - sb.WriteString("\n") - } - renderDetailSectionHeader(sb, "Diagnostics", w) - warnValueStyle := lipgloss.NewStyle().Foreground(colorYellow) - renderKeyValuePairs(sb, snap.Diagnostics, labelW, maxValW, warnValueStyle) - } - - if len(snap.Raw) > 0 { - if len(snap.Attributes) > 0 || len(snap.Diagnostics) > 0 { - sb.WriteString("\n") - } - renderDetailSectionHeader(sb, "Raw Data", w) - renderRawData(sb, snap.Raw, widget, w) - } -} - -// renderKeyValuePairs renders a sorted key-value map with consistent formatting. -func renderKeyValuePairs(sb *strings.Builder, data map[string]string, labelW, maxValW int, vs lipgloss.Style) { - keys := core.SortedStringKeys(data) - - for _, k := range keys { - v := smartFormatValue(data[k]) - if len(v) > maxValW { - v = v[:maxValW-3] + "..." - } - sb.WriteString(fmt.Sprintf(" %s %s\n", - labelStyle.Width(labelW).Render(prettifyKey(k)), - vs.Render(v), - )) - } -} - -func renderRawData(sb *strings.Builder, raw map[string]string, widget core.DashboardWidget, w int) { - labelW := sectionLabelWidth(w) - - maxValW := w - labelW - 6 - if maxValW < 20 { - maxValW = 20 - } - if maxValW > 45 { - maxValW = 45 - } - - rendered := make(map[string]bool) - - for _, g := range widget.RawGroups { - hasAny := false - for _, key := range g.Keys { - if v, ok := raw[key]; ok && v != "" { - hasAny = true - _ = v - break - } - } - if !hasAny { - continue - } - for _, key := range g.Keys { - v, ok := raw[key] - if !ok || v == "" { - continue - } - rendered[key] = true - fv := smartFormatValue(v) - if len(fv) > maxValW { - fv = fv[:maxValW-3] + "..." - } - sb.WriteString(fmt.Sprintf(" %s %s\n", - labelStyle.Width(labelW).Render(prettifyKey(key)), - valueStyle.Render(fv), - )) - } - } - - keys := core.SortedStringKeys(raw) - - for _, k := range keys { - if rendered[k] || strings.HasSuffix(k, "_error") { - continue - } - v := smartFormatValue(raw[k]) - if len(v) > maxValW { - v = v[:maxValW-3] + "..." - } - sb.WriteString(fmt.Sprintf(" %s %s\n", - labelStyle.Width(labelW).Render(prettifyKey(k)), - dimStyle.Render(v), - )) - } -} - -func smartFormatValue(v string) string { - trimmed := strings.TrimSpace(v) - - if n, err := strconv.ParseInt(trimmed, 10, 64); err == nil && n > 1e12 && n < 2e13 { - t := time.Unix(n/1000, 0) - return t.Format("Jan 02, 2006 15:04") - } - - if n, err := strconv.ParseInt(trimmed, 10, 64); err == nil && n > 1e9 && n < 2e10 { - t := time.Unix(n, 0) - return t.Format("Jan 02, 2006 15:04") - } - - return v -} - -func renderDetailSectionHeader(sb *strings.Builder, title string, w int) { - icon := sectionIcon(title) - sc := sectionColor(title) - - iconStyled := lipgloss.NewStyle().Foreground(sc).Render(icon) - titleStyled := lipgloss.NewStyle().Bold(true).Foreground(sc).Render(" " + title + " ") - left := " " + iconStyled + titleStyled - - lineLen := w - lipgloss.Width(left) - 2 - if lineLen < 4 { - lineLen = 4 - } - line := lipgloss.NewStyle().Foreground(sc).Render(strings.Repeat("─", lineLen)) - sb.WriteString(left + line + "\n") -} - -func sectionIcon(title string) string { - switch title { - case "Usage": - return "⚡" - case "Spending": - return "💰" - case "Tokens": - return "📊" - case "Activity": - return "📈" - case "Timers": - return "⏰" - case "Models": - return "🤖" - case "Languages": - return "🗂" - case "Trends": - return "📈" - case "MCP Usage": - return "🔌" - case "Attributes": - return "📋" - case "Diagnostics": - return "⚠" - case "Raw Data": - return "🔧" - default: - return "›" - } -} - -func sectionColor(title string) lipgloss.Color { - switch title { - case "Usage": - return colorYellow - case "Spending": - return colorTeal - case "Tokens": - return colorSapphire - case "Activity": - return colorGreen - case "Timers": - return colorMaroon - case "Models": - return colorLavender - case "Languages": - return colorPeach - case "Trends": - return colorSapphire - case "MCP Usage": - return colorSky - case "Attributes": - return colorBlue - case "Diagnostics": - return colorYellow - case "Raw Data": - return colorDim - default: - return colorBlue - } -} - -func formatUsageDetail(m core.Metric) string { - var parts []string - - if m.Remaining != nil { - parts = append(parts, fmt.Sprintf("%.0f%% remaining", *m.Remaining)) - } else if m.Used != nil && m.Limit != nil { - rem := *m.Limit - *m.Used - parts = append(parts, fmt.Sprintf("%.0f%% remaining", rem)) - } - - if m.Window != "" && m.Window != "all_time" && m.Window != "current_period" { - parts = append(parts, "["+m.Window+"]") - } - - return strings.Join(parts, " ") -} - -func formatMetricDetail(m core.Metric) string { - var parts []string - switch { - case m.Used != nil && m.Limit != nil: - parts = append(parts, fmt.Sprintf("%s / %s %s", - formatNumber(*m.Used), formatNumber(*m.Limit), m.Unit)) - case m.Remaining != nil && m.Limit != nil: - parts = append(parts, fmt.Sprintf("%s / %s %s remaining", - formatNumber(*m.Remaining), formatNumber(*m.Limit), m.Unit)) - case m.Used != nil: - parts = append(parts, fmt.Sprintf("%s %s", formatNumber(*m.Used), m.Unit)) - case m.Remaining != nil: - parts = append(parts, fmt.Sprintf("%s %s remaining", formatNumber(*m.Remaining), m.Unit)) - } - - if m.Window != "" && m.Window != "all_time" && m.Window != "current_period" { - parts = append(parts, "["+m.Window+"]") - } - - return strings.Join(parts, " ") -} - -func formatNumber(n float64) string { - if n == 0 { - return "0" - } - abs := math.Abs(n) - switch { - case abs >= 1_000_000: - return fmt.Sprintf("%.1fM", n/1_000_000) - case abs >= 10_000: - return fmt.Sprintf("%.1fK", n/1_000) - case abs >= 1_000: - return fmt.Sprintf("%.0f", n) - case abs == math.Floor(abs): - return fmt.Sprintf("%.0f", n) - default: - return fmt.Sprintf("%.2f", n) - } -} - -func formatTokens(n float64) string { - if n == 0 { - return "-" - } - return formatNumber(n) -} - -func formatUSD(n float64) string { - if n == 0 { - return "-" - } - if n >= 1000 { - return fmt.Sprintf("$%.0f", n) - } - return fmt.Sprintf("$%.2f", n) -} - -func formatDuration(d time.Duration) string { - if d < 0 { - d = 0 - } - switch { - case d < time.Minute: - return fmt.Sprintf("%ds", int(d.Seconds())) - case d < time.Hour: - return fmt.Sprintf("%dm%ds", int(d.Minutes()), int(d.Seconds())%60) - case d < 24*time.Hour: - return fmt.Sprintf("%dh%dm", int(d.Hours()), int(d.Minutes())%60) - default: - return fmt.Sprintf("%dd%dh", int(d.Hours())/24, int(d.Hours())%24) - } -} - -func prettifyKey(key string) string { - return core.PrettifyMetricKey(key) -} - -func prettifyModelName(name string) string { - result := strings.ReplaceAll(name, "_", "-") - - switch strings.ToLower(result) { - case "unattributed": - return "unmapped spend (missing historical mapping)" - case "default": - return "default (auto)" - case "composer-1": - return "composer-1 (agent)" - case "github-bugbot": - return "github-bugbot (auto)" - } - return result -} diff --git a/internal/tui/detail_format.go b/internal/tui/detail_format.go new file mode 100644 index 0000000..07f8ba5 --- /dev/null +++ b/internal/tui/detail_format.go @@ -0,0 +1,219 @@ +package tui + +import ( + "fmt" + "math" + "strings" + "time" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func wrapTags(tags []string, maxWidth int) []string { + if len(tags) == 0 { + return nil + } + var rows []string + currentRow := "" + currentW := 0 + for _, tag := range tags { + tagW := lipgloss.Width(tag) + if currentW > 0 && currentW+1+tagW > maxWidth { + rows = append(rows, currentRow) + currentRow = tag + currentW = tagW + continue + } + if currentW > 0 { + currentRow += " " + currentW++ + } + currentRow += tag + currentW += tagW + } + if currentRow != "" { + rows = append(rows, currentRow) + } + return rows +} + +func titleCase(s string) string { + if len(s) <= 1 { + return s + } + return strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) +} + +func renderDetailSectionHeader(sb *strings.Builder, title string, w int) { + color := sectionColor(title) + left := " " + + lipgloss.NewStyle().Foreground(color).Render(sectionIcon(title)) + + lipgloss.NewStyle().Bold(true).Foreground(color).Render(" "+title+" ") + lineLen := w - lipgloss.Width(left) - 2 + if lineLen < 4 { + lineLen = 4 + } + sb.WriteString(left + lipgloss.NewStyle().Foreground(color).Render(strings.Repeat("─", lineLen)) + "\n") +} + +func sectionIcon(title string) string { + switch title { + case "Usage": + return "⚡" + case "Spending": + return "💰" + case "Tokens": + return "📊" + case "Activity", "Trends": + return "📈" + case "Timers": + return "⏰" + case "Models": + return "🤖" + case "Languages": + return "🗂" + case "MCP Usage": + return "🔌" + case "Attributes": + return "📋" + case "Diagnostics": + return "⚠" + case "Raw Data": + return "🔧" + default: + return "›" + } +} + +func sectionColor(title string) lipgloss.Color { + switch title { + case "Usage": + return colorYellow + case "Spending": + return colorTeal + case "Tokens", "Trends": + return colorSapphire + case "Activity": + return colorGreen + case "Timers": + return colorMaroon + case "Models": + return colorLavender + case "Languages": + return colorPeach + case "MCP Usage": + return colorSky + case "Attributes": + return colorBlue + case "Diagnostics": + return colorYellow + case "Raw Data": + return colorDim + default: + return colorBlue + } +} + +func formatUsageDetail(m core.Metric) string { + var parts []string + if m.Remaining != nil { + parts = append(parts, fmt.Sprintf("%.0f%% remaining", *m.Remaining)) + } else if m.Used != nil && m.Limit != nil { + parts = append(parts, fmt.Sprintf("%.0f%% remaining", *m.Limit-*m.Used)) + } + if m.Window != "" && m.Window != "all_time" && m.Window != "current_period" { + parts = append(parts, "["+m.Window+"]") + } + return strings.Join(parts, " ") +} + +func formatMetricDetail(m core.Metric) string { + var parts []string + switch { + case m.Used != nil && m.Limit != nil: + parts = append(parts, fmt.Sprintf("%s / %s %s", formatNumber(*m.Used), formatNumber(*m.Limit), m.Unit)) + case m.Remaining != nil && m.Limit != nil: + parts = append(parts, fmt.Sprintf("%s / %s %s remaining", formatNumber(*m.Remaining), formatNumber(*m.Limit), m.Unit)) + case m.Used != nil: + parts = append(parts, fmt.Sprintf("%s %s", formatNumber(*m.Used), m.Unit)) + case m.Remaining != nil: + parts = append(parts, fmt.Sprintf("%s %s remaining", formatNumber(*m.Remaining), m.Unit)) + } + if m.Window != "" && m.Window != "all_time" && m.Window != "current_period" { + parts = append(parts, "["+m.Window+"]") + } + return strings.Join(parts, " ") +} + +func formatNumber(n float64) string { + if n == 0 { + return "0" + } + abs := math.Abs(n) + switch { + case abs >= 1_000_000: + return fmt.Sprintf("%.1fM", n/1_000_000) + case abs >= 10_000: + return fmt.Sprintf("%.1fK", n/1_000) + case abs >= 1_000: + return fmt.Sprintf("%.0f", n) + case abs == math.Floor(abs): + return fmt.Sprintf("%.0f", n) + default: + return fmt.Sprintf("%.2f", n) + } +} + +func formatTokens(n float64) string { + if n == 0 { + return "-" + } + return formatNumber(n) +} + +func formatUSD(n float64) string { + if n == 0 { + return "-" + } + if n >= 1000 { + return fmt.Sprintf("$%.0f", n) + } + return fmt.Sprintf("$%.2f", n) +} + +func formatDuration(d time.Duration) string { + if d < 0 { + d = 0 + } + switch { + case d < time.Minute: + return fmt.Sprintf("%ds", int(d.Seconds())) + case d < time.Hour: + return fmt.Sprintf("%dm%ds", int(d.Minutes()), int(d.Seconds())%60) + case d < 24*time.Hour: + return fmt.Sprintf("%dh%dm", int(d.Hours()), int(d.Minutes())%60) + default: + return fmt.Sprintf("%dd%dh", int(d.Hours())/24, int(d.Hours())%24) + } +} + +func prettifyKey(key string) string { + return core.PrettifyMetricKey(key) +} + +func prettifyModelName(name string) string { + result := strings.ReplaceAll(name, "_", "-") + switch strings.ToLower(result) { + case "unattributed": + return "unmapped spend (missing historical mapping)" + case "default": + return "default (auto)" + case "composer-1": + return "composer-1 (agent)" + case "github-bugbot": + return "github-bugbot (auto)" + default: + return result + } +} diff --git a/internal/tui/detail_info.go b/internal/tui/detail_info.go new file mode 100644 index 0000000..15a8aad --- /dev/null +++ b/internal/tui/detail_info.go @@ -0,0 +1,119 @@ +package tui + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func renderInfoSection(sb *strings.Builder, snap core.UsageSnapshot, widget core.DashboardWidget, w int) { + labelW := sectionLabelWidth(w) + maxValW := w - labelW - 6 + if maxValW < 20 { + maxValW = 20 + } + if maxValW > 45 { + maxValW = 45 + } + + if len(snap.Attributes) > 0 { + renderDetailSectionHeader(sb, "Attributes", w) + renderKeyValuePairs(sb, snap.Attributes, labelW, maxValW, valueStyle) + } + if len(snap.Diagnostics) > 0 { + if len(snap.Attributes) > 0 { + sb.WriteString("\n") + } + renderDetailSectionHeader(sb, "Diagnostics", w) + renderKeyValuePairs(sb, snap.Diagnostics, labelW, maxValW, lipgloss.NewStyle().Foreground(colorYellow)) + } + if len(snap.Raw) > 0 { + if len(snap.Attributes) > 0 || len(snap.Diagnostics) > 0 { + sb.WriteString("\n") + } + renderDetailSectionHeader(sb, "Raw Data", w) + renderRawData(sb, snap.Raw, widget, w) + } +} + +func renderKeyValuePairs(sb *strings.Builder, data map[string]string, labelW, maxValW int, valueStyle lipgloss.Style) { + for _, key := range core.SortedStringKeys(data) { + value := smartFormatValue(data[key]) + if len(value) > maxValW { + value = value[:maxValW-3] + "..." + } + sb.WriteString(fmt.Sprintf(" %s %s\n", + labelStyle.Width(labelW).Render(prettifyKey(key)), + valueStyle.Render(value), + )) + } +} + +func renderRawData(sb *strings.Builder, raw map[string]string, widget core.DashboardWidget, w int) { + labelW := sectionLabelWidth(w) + maxValW := w - labelW - 6 + if maxValW < 20 { + maxValW = 20 + } + if maxValW > 45 { + maxValW = 45 + } + + rendered := make(map[string]bool) + for _, group := range widget.RawGroups { + hasAny := false + for _, key := range group.Keys { + if value := strings.TrimSpace(raw[key]); value != "" { + hasAny = true + break + } + } + if !hasAny { + continue + } + for _, key := range group.Keys { + value := strings.TrimSpace(raw[key]) + if value == "" { + continue + } + rendered[key] = true + value = smartFormatValue(value) + if len(value) > maxValW { + value = value[:maxValW-3] + "..." + } + sb.WriteString(fmt.Sprintf(" %s %s\n", + labelStyle.Width(labelW).Render(prettifyKey(key)), + valueStyle.Render(value), + )) + } + } + + for _, key := range core.SortedStringKeys(raw) { + if rendered[key] || strings.HasSuffix(key, "_error") { + continue + } + value := smartFormatValue(raw[key]) + if len(value) > maxValW { + value = value[:maxValW-3] + "..." + } + sb.WriteString(fmt.Sprintf(" %s %s\n", + labelStyle.Width(labelW).Render(prettifyKey(key)), + dimStyle.Render(value), + )) + } +} + +func smartFormatValue(v string) string { + trimmed := strings.TrimSpace(v) + if n, err := strconv.ParseInt(trimmed, 10, 64); err == nil && n > 1e12 && n < 2e13 { + return time.Unix(n/1000, 0).Format("Jan 02, 2006 15:04") + } + if n, err := strconv.ParseInt(trimmed, 10, 64); err == nil && n > 1e9 && n < 2e10 { + return time.Unix(n, 0).Format("Jan 02, 2006 15:04") + } + return v +} diff --git a/internal/tui/model.go b/internal/tui/model.go index fd9bcac..7f65e47 100644 --- a/internal/tui/model.go +++ b/internal/tui/model.go @@ -1,12 +1,10 @@ package tui import ( - "fmt" "strings" "time" tea "github.com/charmbracelet/bubbletea" - "github.com/charmbracelet/lipgloss" "github.com/janekbaraniewski/openusage/internal/config" "github.com/janekbaraniewski/openusage/internal/core" "github.com/janekbaraniewski/openusage/internal/integrations" @@ -341,535 +339,6 @@ func (m Model) shouldUsePanelScroll() bool { return m.tileCols() == 1 } -func (m Model) View() string { - if m.width < 30 || m.height < 8 { - return lipgloss.NewStyle(). - Foreground(colorDim). - Render("\n Terminal too small. Resize to at least 30×8.") - } - if !m.hasData { - return m.renderSplash(m.width, m.height) - } - if m.showHelp { - return m.renderHelpOverlay(m.width, m.height) - } - view := m.renderDashboard() - if m.settings.show { - return m.renderSettingsModalOverlay() - } - return view -} - -func (m Model) renderDashboardContent(w, contentH int) string { - if m.mode == modeDetail { - return m.renderDetailPanel(w, contentH) - } - switch m.activeDashboardView() { - case dashboardViewTabs: - return m.renderTilesTabs(w, contentH) - case dashboardViewSplit: - return m.renderSplitPanes(w, contentH) - case dashboardViewCompare: - return m.renderComparePanes(w, contentH) - case dashboardViewStacked: - return m.renderTilesSingleColumn(w, contentH) - default: - return m.renderTiles(w, contentH) - } -} - -func (m Model) renderHeader(w int) string { - bolt := PulseChar( - lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("⚡"), - lipgloss.NewStyle().Foreground(colorDim).Bold(true).Render("⚡"), - m.animFrame, - ) - brandText := RenderGradientText("OpenUsage", m.animFrame) - - tabs := m.renderScreenTabs() - - spinnerStr := "" - if m.refreshing { - frame := m.animFrame % len(SpinnerFrames) - spinnerStr = " " + lipgloss.NewStyle().Foreground(colorAccent).Render(SpinnerFrames[frame]) - } - - ids := m.filteredIDs() - unmappedProviders := m.telemetryUnmappedProviders() - - okCount, warnCount, errCount := 0, 0, 0 - for _, id := range ids { - snap := m.snapshots[id] - switch snap.Status { - case core.StatusOK: - okCount++ - case core.StatusNearLimit: - warnCount++ - case core.StatusLimited, core.StatusError: - errCount++ - } - } - - var info string - - if m.settings.show { - info = m.settingsModalInfo() - } else { - switch m.screen { - case screenAnalytics: - info = dimStyle.Render("spend analysis") - if m.analyticsFilter.text != "" { - info += " (filtered)" - } - default: - info = fmt.Sprintf("⊞ %d providers", len(ids)) - if m.filter.text != "" { - info += " (filtered)" - } - info += " · " + m.dashboardViewStatusLabel() - } - } - if !m.settings.show { - twLabel := m.timeWindow.Label() - info += " · " + twLabel - } - if !m.settings.show && len(unmappedProviders) > 0 { - info += " · detected additional providers, check settings" - } - - statusInfo := "" - if okCount > 0 { - dot := PulseChar("●", "◉", m.animFrame) - statusInfo += lipgloss.NewStyle().Foreground(colorGreen).Render(fmt.Sprintf(" %d%s", okCount, dot)) - } - if warnCount > 0 { - dot := PulseChar("◐", "◑", m.animFrame) - statusInfo += lipgloss.NewStyle().Foreground(colorYellow).Render(fmt.Sprintf(" %d%s", warnCount, dot)) - } - if errCount > 0 { - dot := PulseChar("✗", "✕", m.animFrame) - statusInfo += lipgloss.NewStyle().Foreground(colorRed).Render(fmt.Sprintf(" %d%s", errCount, dot)) - } - if len(unmappedProviders) > 0 { - statusInfo += lipgloss.NewStyle(). - Foreground(colorPeach). - Render(fmt.Sprintf(" ⚠ %d unmapped", len(unmappedProviders))) - } - - infoRendered := lipgloss.NewStyle().Foreground(colorSubtext).Render(info) - - left := bolt + " " + brandText + " " + tabs + statusInfo + spinnerStr - gap := w - lipgloss.Width(left) - lipgloss.Width(infoRendered) - if gap < 1 { - gap = 1 - } - - line := left + strings.Repeat(" ", gap) + infoRendered - - sep := m.renderGradientSeparator(w) - - return line + "\n" + sep -} - -func (m Model) renderGradientSeparator(w int) string { - if w <= 0 { - return "" - } - sepStyle := lipgloss.NewStyle().Foreground(colorSurface1) - return sepStyle.Render(strings.Repeat("━", w)) -} - -func (m Model) renderScreenTabs() string { - screens := m.availableScreens() - if len(screens) <= 1 { - return "" - } - var parts []string - for i, screen := range screens { - label := screenLabelByTab[screen] - tabStr := fmt.Sprintf("%d:%s", i+1, label) - if screen == m.screen { - parts = append(parts, screenTabActiveStyle.Render(tabStr)) - } else { - parts = append(parts, screenTabInactiveStyle.Render(tabStr)) - } - } - return strings.Join(parts, "") -} - -func (m Model) renderFooter(w int) string { - sep := lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("━", w)) - statusLine := m.renderFooterStatusLine(w) - return sep + "\n" + statusLine -} - -func (m Model) renderFooterStatusLine(w int) string { - searchStyle := lipgloss.NewStyle().Foreground(colorSapphire) - - switch { - case m.settings.show: - if m.settings.status != "" { - return " " + dimStyle.Render(m.settings.status) - } - return " " + helpStyle.Render("? help") - case m.screen == screenAnalytics: - if m.analyticsFilter.active { - cursor := PulseChar("█", "▌", m.animFrame) - return " " + dimStyle.Render("search: ") + searchStyle.Render(m.analyticsFilter.text+cursor) - } - if m.analyticsFilter.text != "" { - return " " + dimStyle.Render("filter: ") + searchStyle.Render(m.analyticsFilter.text) - } - default: - if m.filter.active { - cursor := PulseChar("█", "▌", m.animFrame) - return " " + dimStyle.Render("search: ") + searchStyle.Render(m.filter.text+cursor) - } - if m.filter.text != "" { - return " " + dimStyle.Render("filter: ") + searchStyle.Render(m.filter.text) - } - if m.activeDashboardView() == dashboardViewTabs && m.mode == modeList { - return " " + dimStyle.Render("tabs view · \u2190/\u2192 switch tab · PgUp/PgDn scroll widget · Enter detail") - } - if m.activeDashboardView() == dashboardViewSplit && m.mode == modeList { - return " " + dimStyle.Render("split view · \u2191/\u2193 select provider · PgUp/PgDn scroll pane · Enter detail") - } - if m.activeDashboardView() == dashboardViewCompare && m.mode == modeList { - return " " + dimStyle.Render("compare view · \u2190/\u2192 switch provider · PgUp/PgDn scroll active pane") - } - if m.mode == modeList && m.shouldUseWidgetScroll() && m.tileOffset > 0 { - return " " + dimStyle.Render("widget scroll active · PgUp/PgDn · Ctrl+U/Ctrl+D") - } - if m.mode == modeList && m.shouldUsePanelScroll() && m.tileOffset > 0 { - return " " + dimStyle.Render("panel scroll active · PgUp/PgDn · Home/End") - } - } - - if m.hasAppUpdateNotice() { - msg := "Update available: " + m.daemon.appUpdateCurrent + " -> " + m.daemon.appUpdateLatest - if action := m.appUpdateAction(); action != "" { - msg += " · " + action - } - if w > 2 { - msg = truncateToWidth(msg, w-2) - } - return " " + lipgloss.NewStyle().Foreground(colorYellow).Render(msg) - } - - return " " + helpStyle.Render("? help") -} - -func (m Model) hasAppUpdateNotice() bool { - return strings.TrimSpace(m.daemon.appUpdateCurrent) != "" && strings.TrimSpace(m.daemon.appUpdateLatest) != "" -} - -func (m Model) appUpdateHeadline() string { - if !m.hasAppUpdateNotice() { - return "" - } - return "OpenUsage update available: " + m.daemon.appUpdateCurrent + " -> " + m.daemon.appUpdateLatest -} - -func (m Model) appUpdateAction() string { - hint := strings.TrimSpace(m.daemon.appUpdateHint) - if hint == "" { - return "" - } - return "Run: " + hint -} - -func (m Model) renderList(w, h int) string { - ids := m.filteredIDs() - if len(ids) == 0 { - empty := []string{ - "", - dimStyle.Render(" Loading providers…"), - "", - lipgloss.NewStyle().Foreground(colorSubtext).Render(" Fetching usage and spend data."), - } - return padToSize(strings.Join(empty, "\n"), w, h) - } - - itemHeight := 3 // each item is 3 lines (name + summary + separator) - visibleItems := h / itemHeight - if visibleItems < 1 { - visibleItems = 1 - } - - scrollStart := 0 - if m.cursor >= visibleItems { - scrollStart = m.cursor - visibleItems + 1 - } - scrollEnd := scrollStart + visibleItems - if scrollEnd > len(ids) { - scrollEnd = len(ids) - scrollStart = scrollEnd - visibleItems - if scrollStart < 0 { - scrollStart = 0 - } - } - - var lines []string - for i := scrollStart; i < scrollEnd; i++ { - id := ids[i] - snap := m.snapshots[id] - selected := i == m.cursor - item := m.renderListItem(snap, selected, w) - lines = append(lines, item) - } - - if scrollStart > 0 { - arrow := lipgloss.NewStyle().Foreground(colorDim).Render(" ▲ " + fmt.Sprintf("%d more", scrollStart)) - lines = append([]string{arrow}, lines...) - } - if scrollEnd < len(ids) { - arrow := lipgloss.NewStyle().Foreground(colorDim).Render(" ▼ " + fmt.Sprintf("%d more", len(ids)-scrollEnd)) - lines = append(lines, arrow) - } - - content := strings.Join(lines, "\n") - out := padToSize(content, w, h) - if len(ids) > visibleItems && h > 0 { - rendered := strings.Split(out, "\n") - if len(rendered) > 0 { - rendered[len(rendered)-1] = renderVerticalScrollBarLine(w, scrollStart, visibleItems, len(ids)) - out = strings.Join(rendered, "\n") - } - } - return out -} - -func (m Model) renderSplitPanes(w, h int) string { - if w < 70 { - return m.renderTilesTabs(w, h) - } - - leftW := w / 3 - if leftW < minLeftWidth { - leftW = minLeftWidth - } - if leftW > maxLeftWidth { - leftW = maxLeftWidth - } - if leftW > w-34 { - leftW = w - 34 - } - if leftW < minLeftWidth || w-leftW-1 < 30 { - return m.renderTilesTabs(w, h) - } - - left := m.renderList(leftW, h) - rightW := w - leftW - 1 - right := m.renderWidgetPanelByIndex(m.cursor, rightW, h, m.tileOffset, true) - sep := renderVerticalSep(h) - - return lipgloss.JoinHorizontal(lipgloss.Top, left, sep, right) -} - -func (m Model) renderComparePanes(w, h int) string { - ids := m.filteredIDs() - if len(ids) == 0 { - return m.renderTiles(w, h) - } - if len(ids) == 1 || w < 72 { - return m.renderWidgetPanelByIndex(m.cursor, w, h, m.tileOffset, true) - } - - gapW := tileGapH - colW := (w - gapW) / 2 - if colW < 30 { - return m.renderWidgetPanelByIndex(m.cursor, w, h, m.tileOffset, true) - } - - primary := clamp(m.cursor, 0, len(ids)-1) - secondary := primary + 1 - if secondary >= len(ids) { - secondary = primary - 1 - } - if secondary < 0 { - secondary = primary - } - - left := m.renderWidgetPanelByIndex(primary, colW, h, m.tileOffset, true) - right := m.renderWidgetPanelByIndex(secondary, colW, h, 0, false) - - row := lipgloss.JoinHorizontal(lipgloss.Top, left, strings.Repeat(" ", gapW), right) - return padToSize(row, w, h) -} - -func (m Model) renderWidgetPanelByIndex(index, w, h, bodyOffset int, selected bool) string { - ids := m.filteredIDs() - if len(ids) == 0 || index < 0 || index >= len(ids) { - return padToSize("", w, h) - } - - id := ids[index] - snap := m.snapshots[id] - modelMixExpanded := index == m.cursor && m.expandedModelMixTiles[id] - - tileW := w - 2 - tileBorderH - if tileW < tileMinWidth { - tileW = tileMinWidth - } - contentH := h - tileBorderV - if contentH < tileMinHeight { - contentH = tileMinHeight - } - - rendered := m.renderTile(snap, selected, modelMixExpanded, tileW, contentH, bodyOffset) - return normalizeAnsiBlock(rendered, w, h) -} - -func (m Model) renderListItem(snap core.UsageSnapshot, selected bool, w int) string { - di := computeDisplayInfo(snap, dashboardWidget(snap.ProviderID)) - - icon := StatusIcon(snap.Status) - iconColor := StatusColor(snap.Status) - iconStr := lipgloss.NewStyle().Foreground(iconColor).Render(icon) - - nameStyle := lipgloss.NewStyle().Foreground(colorText) - if selected { - nameStyle = nameStyle.Bold(true).Foreground(colorLavender) - } - - badge := StatusBadge(snap.Status) - var tagRendered string - if di.tagEmoji != "" && di.tagLabel != "" { - tc := tagColor(di.tagLabel) - tagRendered = lipgloss.NewStyle().Foreground(tc).Render(di.tagEmoji+" "+di.tagLabel) + " " - } - rightPart := tagRendered + badge - rightW := lipgloss.Width(rightPart) - - name := snap.AccountID - maxName := w - rightW - 6 // icon + spaces + gap - if maxName < 5 { - maxName = 5 - } - if len(name) > maxName { - name = name[:maxName-1] + "…" - } - - namePart := fmt.Sprintf(" %s %s", iconStr, nameStyle.Render(name)) - nameW := lipgloss.Width(namePart) - gapLen := w - nameW - rightW - 1 - if gapLen < 1 { - gapLen = 1 - } - line1 := namePart + strings.Repeat(" ", gapLen) + rightPart - - summary := di.summary - summaryStyle := lipgloss.NewStyle().Foreground(colorText).Bold(true) - - miniGauge := "" - if di.gaugePercent >= 0 && w > 25 { - gaugeW := 8 - if w < 35 { - gaugeW = 5 - } - miniGauge = " " + RenderMiniGauge(di.gaugePercent, gaugeW) - } - - summaryMaxW := w - 5 - lipgloss.Width(miniGauge) - if summaryMaxW < 5 { - summaryMaxW = 5 - } - if len(summary) > summaryMaxW { - summary = summary[:summaryMaxW-1] + "…" - } - - line2 := " " + summaryStyle.Render(summary) + miniGauge - - line3 := " " + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", w-4)) - - result := line1 + "\n" + line2 + "\n" + line3 - - if selected { - indicator := lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("┃") - rlines := strings.Split(result, "\n") - for i, l := range rlines { - if len(l) > 0 { - rlines[i] = indicator + l[1:] - } - } - result = strings.Join(rlines, "\n") - } - - return result -} - -func (m Model) renderDetailPanel(w, h int) string { - ids := m.filteredIDs() - if len(ids) == 0 || m.cursor >= len(ids) { - return padToSize("", w, h) - } - - snap := m.snapshots[ids[m.cursor]] - - tabs := DetailTabs(snap) - activeTab := m.detailTab - if activeTab >= len(tabs) { - activeTab = len(tabs) - 1 - } - if activeTab < 0 { - activeTab = 0 - } - - content := m.cachedDetailContent(ids[m.cursor], snap, w-2, activeTab) - - lines := strings.Split(content, "\n") - totalLines := len(lines) - - offset := m.detailOffset - if offset > totalLines-h { - offset = totalLines - h - } - if offset < 0 { - offset = 0 - } - - end := offset + h - if end > totalLines { - end = totalLines - } - - visible := lines[offset:end] - - for len(visible) < h { - visible = append(visible, "") - } - - result := strings.Join(visible, "\n") - - if m.mode == modeDetail { - rlines := strings.Split(result, "\n") - if offset > 0 && len(rlines) > 0 { - arrow := lipgloss.NewStyle().Foreground(colorAccent).Render(" ▲ scroll up") - rlines[0] = arrow - } - if len(rlines) > 1 { - if bar := renderVerticalScrollBarLine(w-2, offset, h, totalLines); bar != "" { - rlines[len(rlines)-1] = bar - } else if end < totalLines { - arrow := lipgloss.NewStyle().Foreground(colorAccent).Render(" ▼ more below") - rlines[len(rlines)-1] = arrow - } - } - result = strings.Join(rlines, "\n") - } - - return lipgloss.NewStyle().Width(w).Padding(0, 1).Render(result) -} - -func renderVerticalSep(h int) string { - style := lipgloss.NewStyle().Foreground(colorSurface1) - lines := make([]string, h) - for i := range lines { - lines[i] = style.Render("┃") - } - return strings.Join(lines, "\n") -} - func (m *Model) applyDashboardConfig(dashboardCfg config.DashboardConfig, accounts []core.AccountConfig) { m.dashboardView = normalizeDashboardViewMode(dashboardCfg.View) diff --git a/internal/tui/model_panels.go b/internal/tui/model_panels.go new file mode 100644 index 0000000..80c9c0a --- /dev/null +++ b/internal/tui/model_panels.go @@ -0,0 +1,257 @@ +package tui + +import ( + "fmt" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (m Model) renderList(w, h int) string { + ids := m.filteredIDs() + if len(ids) == 0 { + empty := []string{ + "", + dimStyle.Render(" Loading providers…"), + "", + lipgloss.NewStyle().Foreground(colorSubtext).Render(" Fetching usage and spend data."), + } + return padToSize(strings.Join(empty, "\n"), w, h) + } + + itemHeight := 3 + visibleItems := h / itemHeight + if visibleItems < 1 { + visibleItems = 1 + } + + scrollStart := 0 + if m.cursor >= visibleItems { + scrollStart = m.cursor - visibleItems + 1 + } + scrollEnd := scrollStart + visibleItems + if scrollEnd > len(ids) { + scrollEnd = len(ids) + scrollStart = scrollEnd - visibleItems + if scrollStart < 0 { + scrollStart = 0 + } + } + + var lines []string + for i := scrollStart; i < scrollEnd; i++ { + snap := m.snapshots[ids[i]] + lines = append(lines, m.renderListItem(snap, i == m.cursor, w)) + } + + if scrollStart > 0 { + lines = append([]string{lipgloss.NewStyle().Foreground(colorDim).Render(" ▲ " + fmt.Sprintf("%d more", scrollStart))}, lines...) + } + if scrollEnd < len(ids) { + lines = append(lines, lipgloss.NewStyle().Foreground(colorDim).Render(" ▼ "+fmt.Sprintf("%d more", len(ids)-scrollEnd))) + } + + content := strings.Join(lines, "\n") + out := padToSize(content, w, h) + if len(ids) > visibleItems && h > 0 { + rendered := strings.Split(out, "\n") + if len(rendered) > 0 { + rendered[len(rendered)-1] = renderVerticalScrollBarLine(w, scrollStart, visibleItems, len(ids)) + out = strings.Join(rendered, "\n") + } + } + return out +} + +func (m Model) renderSplitPanes(w, h int) string { + if w < 70 { + return m.renderTilesTabs(w, h) + } + + leftW := w / 3 + if leftW < minLeftWidth { + leftW = minLeftWidth + } + if leftW > maxLeftWidth { + leftW = maxLeftWidth + } + if leftW > w-34 { + leftW = w - 34 + } + if leftW < minLeftWidth || w-leftW-1 < 30 { + return m.renderTilesTabs(w, h) + } + + left := m.renderList(leftW, h) + rightW := w - leftW - 1 + right := m.renderWidgetPanelByIndex(m.cursor, rightW, h, m.tileOffset, true) + return lipgloss.JoinHorizontal(lipgloss.Top, left, renderVerticalSep(h), right) +} + +func (m Model) renderComparePanes(w, h int) string { + ids := m.filteredIDs() + if len(ids) == 0 { + return m.renderTiles(w, h) + } + if len(ids) == 1 || w < 72 { + return m.renderWidgetPanelByIndex(m.cursor, w, h, m.tileOffset, true) + } + + gapW := tileGapH + colW := (w - gapW) / 2 + if colW < 30 { + return m.renderWidgetPanelByIndex(m.cursor, w, h, m.tileOffset, true) + } + + primary := clamp(m.cursor, 0, len(ids)-1) + secondary := primary + 1 + if secondary >= len(ids) { + secondary = primary - 1 + } + if secondary < 0 { + secondary = primary + } + + left := m.renderWidgetPanelByIndex(primary, colW, h, m.tileOffset, true) + right := m.renderWidgetPanelByIndex(secondary, colW, h, 0, false) + return padToSize(lipgloss.JoinHorizontal(lipgloss.Top, left, strings.Repeat(" ", gapW), right), w, h) +} + +func (m Model) renderWidgetPanelByIndex(index, w, h, bodyOffset int, selected bool) string { + ids := m.filteredIDs() + if len(ids) == 0 || index < 0 || index >= len(ids) { + return padToSize("", w, h) + } + + id := ids[index] + snap := m.snapshots[id] + modelMixExpanded := index == m.cursor && m.expandedModelMixTiles[id] + + tileW := w - 2 - tileBorderH + if tileW < tileMinWidth { + tileW = tileMinWidth + } + contentH := h - tileBorderV + if contentH < tileMinHeight { + contentH = tileMinHeight + } + + rendered := m.renderTile(snap, selected, modelMixExpanded, tileW, contentH, bodyOffset) + return normalizeAnsiBlock(rendered, w, h) +} + +func (m Model) renderListItem(snap core.UsageSnapshot, selected bool, w int) string { + di := computeDisplayInfo(snap, dashboardWidget(snap.ProviderID)) + + iconStr := lipgloss.NewStyle().Foreground(StatusColor(snap.Status)).Render(StatusIcon(snap.Status)) + nameStyle := lipgloss.NewStyle().Foreground(colorText) + if selected { + nameStyle = nameStyle.Bold(true).Foreground(colorLavender) + } + + badge := StatusBadge(snap.Status) + tagRendered := "" + if di.tagEmoji != "" && di.tagLabel != "" { + tagRendered = lipgloss.NewStyle().Foreground(tagColor(di.tagLabel)).Render(di.tagEmoji+" "+di.tagLabel) + " " + } + rightPart := tagRendered + badge + rightW := lipgloss.Width(rightPart) + + name := snap.AccountID + maxName := w - rightW - 6 + if maxName < 5 { + maxName = 5 + } + if len(name) > maxName { + name = name[:maxName-1] + "…" + } + + namePart := fmt.Sprintf(" %s %s", iconStr, nameStyle.Render(name)) + gapLen := w - lipgloss.Width(namePart) - rightW - 1 + if gapLen < 1 { + gapLen = 1 + } + line1 := namePart + strings.Repeat(" ", gapLen) + rightPart + + summary := di.summary + miniGauge := "" + if di.gaugePercent >= 0 && w > 25 { + gaugeW := 8 + if w < 35 { + gaugeW = 5 + } + miniGauge = " " + RenderMiniGauge(di.gaugePercent, gaugeW) + } + summaryMaxW := w - 5 - lipgloss.Width(miniGauge) + if summaryMaxW < 5 { + summaryMaxW = 5 + } + if len(summary) > summaryMaxW { + summary = summary[:summaryMaxW-1] + "…" + } + + result := line1 + "\n" + + " " + lipgloss.NewStyle().Foreground(colorText).Bold(true).Render(summary) + miniGauge + "\n" + + " " + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", w-4)) + + if !selected { + return result + } + + indicator := lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("┃") + lines := strings.Split(result, "\n") + for i, line := range lines { + if len(line) > 0 { + lines[i] = indicator + line[1:] + } + } + return strings.Join(lines, "\n") +} + +func (m Model) renderDetailPanel(w, h int) string { + ids := m.filteredIDs() + if len(ids) == 0 || m.cursor >= len(ids) { + return padToSize("", w, h) + } + + snap := m.snapshots[ids[m.cursor]] + activeTab := clamp(m.detailTab, 0, len(DetailTabs(snap))-1) + content := m.cachedDetailContent(ids[m.cursor], snap, w-2, activeTab) + + lines := strings.Split(content, "\n") + totalLines := len(lines) + offset := clamp(m.detailOffset, 0, max(0, totalLines-h)) + end := min(offset+h, totalLines) + visible := append([]string(nil), lines[offset:end]...) + for len(visible) < h { + visible = append(visible, "") + } + + result := strings.Join(visible, "\n") + if m.mode == modeDetail { + rendered := strings.Split(result, "\n") + if offset > 0 && len(rendered) > 0 { + rendered[0] = lipgloss.NewStyle().Foreground(colorAccent).Render(" ▲ scroll up") + } + if len(rendered) > 1 { + if bar := renderVerticalScrollBarLine(w-2, offset, h, totalLines); bar != "" { + rendered[len(rendered)-1] = bar + } else if end < totalLines { + rendered[len(rendered)-1] = lipgloss.NewStyle().Foreground(colorAccent).Render(" ▼ more below") + } + } + result = strings.Join(rendered, "\n") + } + + return lipgloss.NewStyle().Width(w).Padding(0, 1).Render(result) +} + +func renderVerticalSep(h int) string { + style := lipgloss.NewStyle().Foreground(colorSurface1) + lines := make([]string, h) + for i := range lines { + lines[i] = style.Render("┃") + } + return strings.Join(lines, "\n") +} diff --git a/internal/tui/model_view.go b/internal/tui/model_view.go new file mode 100644 index 0000000..d019950 --- /dev/null +++ b/internal/tui/model_view.go @@ -0,0 +1,242 @@ +package tui + +import ( + "fmt" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (m Model) View() string { + if m.width < 30 || m.height < 8 { + return lipgloss.NewStyle(). + Foreground(colorDim). + Render("\n Terminal too small. Resize to at least 30×8.") + } + if !m.hasData { + return m.renderSplash(m.width, m.height) + } + if m.showHelp { + return m.renderHelpOverlay(m.width, m.height) + } + view := m.renderDashboard() + if m.settings.show { + return m.renderSettingsModalOverlay() + } + return view +} + +func (m Model) renderDashboardContent(w, contentH int) string { + if m.mode == modeDetail { + return m.renderDetailPanel(w, contentH) + } + switch m.activeDashboardView() { + case dashboardViewTabs: + return m.renderTilesTabs(w, contentH) + case dashboardViewSplit: + return m.renderSplitPanes(w, contentH) + case dashboardViewCompare: + return m.renderComparePanes(w, contentH) + case dashboardViewStacked: + return m.renderTilesSingleColumn(w, contentH) + default: + return m.renderTiles(w, contentH) + } +} + +func (m Model) renderHeader(w int) string { + bolt := PulseChar( + lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("⚡"), + lipgloss.NewStyle().Foreground(colorDim).Bold(true).Render("⚡"), + m.animFrame, + ) + brandText := RenderGradientText("OpenUsage", m.animFrame) + + tabs := m.renderScreenTabs() + + spinnerStr := "" + if m.refreshing { + frame := m.animFrame % len(SpinnerFrames) + spinnerStr = " " + lipgloss.NewStyle().Foreground(colorAccent).Render(SpinnerFrames[frame]) + } + + ids := m.filteredIDs() + unmappedProviders := m.telemetryUnmappedProviders() + + okCount, warnCount, errCount := 0, 0, 0 + for _, id := range ids { + snap := m.snapshots[id] + switch snap.Status { + case core.StatusOK: + okCount++ + case core.StatusNearLimit: + warnCount++ + case core.StatusLimited, core.StatusError: + errCount++ + } + } + + var info string + + if m.settings.show { + info = m.settingsModalInfo() + } else { + switch m.screen { + case screenAnalytics: + info = dimStyle.Render("spend analysis") + if m.analyticsFilter.text != "" { + info += " (filtered)" + } + default: + info = fmt.Sprintf("⊞ %d providers", len(ids)) + if m.filter.text != "" { + info += " (filtered)" + } + info += " · " + m.dashboardViewStatusLabel() + } + } + if !m.settings.show { + info += " · " + m.timeWindow.Label() + } + if !m.settings.show && len(unmappedProviders) > 0 { + info += " · detected additional providers, check settings" + } + + statusInfo := "" + if okCount > 0 { + dot := PulseChar("●", "◉", m.animFrame) + statusInfo += lipgloss.NewStyle().Foreground(colorGreen).Render(fmt.Sprintf(" %d%s", okCount, dot)) + } + if warnCount > 0 { + dot := PulseChar("◐", "◑", m.animFrame) + statusInfo += lipgloss.NewStyle().Foreground(colorYellow).Render(fmt.Sprintf(" %d%s", warnCount, dot)) + } + if errCount > 0 { + dot := PulseChar("✗", "✕", m.animFrame) + statusInfo += lipgloss.NewStyle().Foreground(colorRed).Render(fmt.Sprintf(" %d%s", errCount, dot)) + } + if len(unmappedProviders) > 0 { + statusInfo += lipgloss.NewStyle(). + Foreground(colorPeach). + Render(fmt.Sprintf(" ⚠ %d unmapped", len(unmappedProviders))) + } + + infoRendered := lipgloss.NewStyle().Foreground(colorSubtext).Render(info) + + left := bolt + " " + brandText + " " + tabs + statusInfo + spinnerStr + gap := w - lipgloss.Width(left) - lipgloss.Width(infoRendered) + if gap < 1 { + gap = 1 + } + + line := left + strings.Repeat(" ", gap) + infoRendered + return line + "\n" + m.renderGradientSeparator(w) +} + +func (m Model) renderGradientSeparator(w int) string { + if w <= 0 { + return "" + } + sepStyle := lipgloss.NewStyle().Foreground(colorSurface1) + return sepStyle.Render(strings.Repeat("━", w)) +} + +func (m Model) renderScreenTabs() string { + screens := m.availableScreens() + if len(screens) <= 1 { + return "" + } + var parts []string + for i, screen := range screens { + label := screenLabelByTab[screen] + tabStr := fmt.Sprintf("%d:%s", i+1, label) + if screen == m.screen { + parts = append(parts, screenTabActiveStyle.Render(tabStr)) + } else { + parts = append(parts, screenTabInactiveStyle.Render(tabStr)) + } + } + return strings.Join(parts, "") +} + +func (m Model) renderFooter(w int) string { + sep := lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("━", w)) + statusLine := m.renderFooterStatusLine(w) + return sep + "\n" + statusLine +} + +func (m Model) renderFooterStatusLine(w int) string { + searchStyle := lipgloss.NewStyle().Foreground(colorSapphire) + + switch { + case m.settings.show: + if m.settings.status != "" { + return " " + dimStyle.Render(m.settings.status) + } + return " " + helpStyle.Render("? help") + case m.screen == screenAnalytics: + if m.analyticsFilter.active { + cursor := PulseChar("█", "▌", m.animFrame) + return " " + dimStyle.Render("search: ") + searchStyle.Render(m.analyticsFilter.text+cursor) + } + if m.analyticsFilter.text != "" { + return " " + dimStyle.Render("filter: ") + searchStyle.Render(m.analyticsFilter.text) + } + default: + if m.filter.active { + cursor := PulseChar("█", "▌", m.animFrame) + return " " + dimStyle.Render("search: ") + searchStyle.Render(m.filter.text+cursor) + } + if m.filter.text != "" { + return " " + dimStyle.Render("filter: ") + searchStyle.Render(m.filter.text) + } + if m.activeDashboardView() == dashboardViewTabs && m.mode == modeList { + return " " + dimStyle.Render("tabs view · ←/→ switch tab · PgUp/PgDn scroll widget · Enter detail") + } + if m.activeDashboardView() == dashboardViewSplit && m.mode == modeList { + return " " + dimStyle.Render("split view · ↑/↓ select provider · PgUp/PgDn scroll pane · Enter detail") + } + if m.activeDashboardView() == dashboardViewCompare && m.mode == modeList { + return " " + dimStyle.Render("compare view · ←/→ switch provider · PgUp/PgDn scroll active pane") + } + if m.mode == modeList && m.shouldUseWidgetScroll() && m.tileOffset > 0 { + return " " + dimStyle.Render("widget scroll active · PgUp/PgDn · Ctrl+U/Ctrl+D") + } + if m.mode == modeList && m.shouldUsePanelScroll() && m.tileOffset > 0 { + return " " + dimStyle.Render("panel scroll active · PgUp/PgDn · Home/End") + } + } + + if m.hasAppUpdateNotice() { + msg := "Update available: " + m.daemon.appUpdateCurrent + " -> " + m.daemon.appUpdateLatest + if action := m.appUpdateAction(); action != "" { + msg += " · " + action + } + if w > 2 { + msg = truncateToWidth(msg, w-2) + } + return " " + lipgloss.NewStyle().Foreground(colorYellow).Render(msg) + } + + return " " + helpStyle.Render("? help") +} + +func (m Model) hasAppUpdateNotice() bool { + return strings.TrimSpace(m.daemon.appUpdateCurrent) != "" && strings.TrimSpace(m.daemon.appUpdateLatest) != "" +} + +func (m Model) appUpdateHeadline() string { + if !m.hasAppUpdateNotice() { + return "" + } + return "OpenUsage update available: " + m.daemon.appUpdateCurrent + " -> " + m.daemon.appUpdateLatest +} + +func (m Model) appUpdateAction() string { + hint := strings.TrimSpace(m.daemon.appUpdateHint) + if hint == "" { + return "" + } + return "Run: " + hint +} diff --git a/internal/tui/settings_modal.go b/internal/tui/settings_modal.go index 66154af..0ecb084 100644 --- a/internal/tui/settings_modal.go +++ b/internal/tui/settings_modal.go @@ -2,11 +2,6 @@ package tui import ( "fmt" - "os" - "strings" - - "github.com/charmbracelet/lipgloss" - "github.com/janekbaraniewski/openusage/internal/core" ) type settingsModalTab int @@ -91,626 +86,3 @@ func (m Model) settingsModalInfo() string { } return info } - -func (m Model) renderSettingsProvidersBody(w, h int) string { - ids := m.settingsIDs() - - enabledCount := 0 - for _, id := range ids { - if m.isProviderEnabled(id) { - enabledCount++ - } - } - - lines := settingsBodyHeaderLines( - "Provider Visibility & Order", - fmt.Sprintf("%d/%d enabled · Shift+J/K reorder · Enter toggle", enabledCount, len(ids)), - ) - accountW := 26 - providerW := w - accountW - 16 - if providerW < 10 { - providerW = 10 - accountW = w - providerW - 16 - } - if accountW < 12 { - accountW = 12 - } - lines = append(lines, dimStyle.Render(fmt.Sprintf(" %-3s %-3s %-*s %-*s", "#", "ON", accountW, "ACCOUNT", providerW, "PROVIDER"))) - lines = append(lines, settingsBodyRule(w)) - if len(ids) == 0 { - lines = append(lines, dimStyle.Render("No providers available.")) - return padToSize(strings.Join(lines, "\n"), w, h) - } - - cursor := clamp(m.settings.cursor, 0, len(ids)-1) - listHeight := h - len(lines) - if listHeight < 1 { - listHeight = 1 - } - start, end := listWindow(len(ids), cursor, listHeight) - - for i := start; i < end; i++ { - id := ids[i] - providerID := m.accountProviders[id] - if snap, ok := m.snapshots[id]; ok && snap.ProviderID != "" { - providerID = snap.ProviderID - } - if providerID == "" { - providerID = "unknown" - } - - onText := "OFF" - onStyle := lipgloss.NewStyle().Foreground(colorRed) - if m.isProviderEnabled(id) { - onText = "ON " - onStyle = lipgloss.NewStyle().Foreground(colorGreen) - } - - prefix := " " - if i == cursor { - prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") - } - account := truncateToWidth(id, accountW) - provider := truncateToWidth(providerID, providerW) - line := fmt.Sprintf("%s%-3d %s %-*s %-*s", prefix, i+1, onStyle.Render(onText), accountW, account, providerW, provider) - lines = append(lines, line) - } - - return padToSize(strings.Join(lines, "\n"), w, h) -} - -func (m Model) renderSettingsWidgetSectionsBody(w, h int) string { - return m.renderSettingsWidgetSectionsList(w, h) -} - -func (m Model) renderSettingsWidgetSectionsList(w, h int) string { - entries := m.widgetSectionEntries() - - visibleCount := 0 - for _, entry := range entries { - if entry.Enabled { - visibleCount++ - } - } - - lines := settingsBodyHeaderLines( - "Global Widget Sections", - fmt.Sprintf("%d/%d sections visible · applies to all providers", visibleCount, len(entries)), - ) - hideBox := "☐" - hideBoxStyle := lipgloss.NewStyle().Foreground(colorRed) - if m.hideSectionsWithNoData { - hideBox = "☑" - hideBoxStyle = lipgloss.NewStyle().Foreground(colorGreen) - } - lines = append(lines, fmt.Sprintf("Hide sections with no data: %s %s", hideBoxStyle.Render(hideBox), dimStyle.Render("press h to toggle"))) - lines = append(lines, "") - nameW := w - 24 - if nameW < 12 { - nameW = 12 - } - lines = append(lines, dimStyle.Render(fmt.Sprintf(" %-3s %-3s %-*s %s", "#", "ON", nameW, "SECTION", "ID"))) - lines = append(lines, settingsBodyRule(w)) - if len(entries) == 0 { - lines = append(lines, dimStyle.Render("No dashboard sections available.")) - return padToSize(strings.Join(lines, "\n"), w, h) - } - - cursor := clamp(m.settings.sectionRowCursor, 0, len(entries)-1) - listHeight := h - len(lines) - if listHeight < 1 { - listHeight = 1 - } - start, end := listWindow(len(entries), cursor, listHeight) - - for i := start; i < end; i++ { - entry := entries[i] - prefix := " " - if i == cursor { - prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") - } - - onText := "OFF" - onStyle := lipgloss.NewStyle().Foreground(colorRed) - if entry.Enabled { - onText = "ON " - onStyle = lipgloss.NewStyle().Foreground(colorGreen) - } - - name := settingsSectionLabel(entry.ID) - name = truncateToWidth(name, nameW) - line := fmt.Sprintf("%s%-3d %s %-*s %s", prefix, i+1, onStyle.Render(onText), nameW, name, dimStyle.Render(string(entry.ID))) - lines = append(lines, line) - } - - return padToSize(strings.Join(lines, "\n"), w, h) -} - -func (m Model) renderSettingsWidgetSectionsPreview(w, h int) string { - if w < 24 || h < 5 { - return padToSize(dimStyle.Render("Live preview unavailable at this size."), w, h) - } - - title := lipgloss.NewStyle().Foreground(colorTeal).Bold(true).Render("Live Preview") - hint := dimStyle.Render("Claude Code preset · synthetic data · PgUp/PgDn scroll") - lines := []string{title, hint, ""} - - tileW := w - if tileW > 2 { - tileW -= 2 - } - if tileW < tileMinWidth { - tileW = tileMinWidth - } - - // Render full tile content to avoid nested-scroll artifacts inside the preview panel. - previewTile := m.renderTile(settingsWidgetSectionsPreviewSnapshot(), false, false, tileW, 0, 0) - all := append(lines, strings.Split(previewTile, "\n")...) - maxOffset := len(all) - h - if maxOffset < 0 { - maxOffset = 0 - } - offset := clamp(m.settings.previewOffset, 0, maxOffset) - visible := all - if len(visible) > h { - visible = visible[offset:] - if len(visible) > h { - visible = visible[:h] - } - } - if len(visible) > 0 && offset > 0 { - visible[0] = dimStyle.Render(" ▲ preview above") - } - if len(visible) > 0 && offset+h < len(all) { - visible[len(visible)-1] = dimStyle.Render(" ▼ preview below") - } - return padToSize(strings.Join(visible, "\n"), w, h) -} - -func (m Model) renderSettingsWidgetPreviewPanel(contentW, contentH int) string { - innerW := contentW - 4 - if innerW < 24 { - innerW = contentW - } - bodyH := contentH - 1 - if bodyH < 4 { - bodyH = 4 - } - title := lipgloss.NewStyle().Bold(true).Foreground(colorRosewater).Render("Widget Preview") - body := m.renderSettingsWidgetSectionsPreview(innerW, bodyH) - lines := []string{ - title, - lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", innerW)), - body, - } - return lipgloss.NewStyle(). - Border(lipgloss.RoundedBorder()). - BorderForeground(colorAccent). - Background(colorBase). - Padding(1, 2, 0, 2). - Width(contentW). - Render(strings.Join(lines, "\n")) -} - -func (m Model) settingsWidgetPreviewBodyHeight(contentW, contentH int, sideBySide bool) int { - minBodyH := settingsWidgetPreviewMinBodyH - maxBodyH := contentH - if sideBySide { - // Keep breathing room around the combined modal while allowing growth. - maxBodyH = m.height - 12 - } else { - // Stacked layout should stay balanced and avoid dominating the viewport. - maxBodyH = (m.height - 12) / 2 - } - if maxBodyH < minBodyH { - maxBodyH = minBodyH - } - - innerW := contentW - 4 - if innerW < 24 { - innerW = 24 - } - targetBodyH := m.settingsWidgetPreviewContentLineCount(innerW) - if targetBodyH < minBodyH { - targetBodyH = minBodyH - } - if targetBodyH > maxBodyH { - targetBodyH = maxBodyH - } - - // renderSettingsWidgetPreviewPanel reserves one line for panel internals. - return targetBodyH + 1 -} - -func (m Model) settingsWidgetPreviewContentLineCount(innerW int) int { - if innerW < 24 { - return 4 - } - tileW := innerW - if tileW > 2 { - tileW -= 2 - } - if tileW < tileMinWidth { - tileW = tileMinWidth - } - previewTile := m.renderTile(settingsWidgetSectionsPreviewSnapshot(), false, false, tileW, 0, 0) - // Includes preview title line, hint line, and spacing line. - return 3 + len(strings.Split(previewTile, "\n")) -} - -func centerPanelVertically(panel string, targetHeight int) string { - current := lipgloss.Height(panel) - if current >= targetHeight { - return panel - } - diff := targetHeight - current - top := diff / 2 - bottom := diff - top - return strings.Repeat("\n", top) + panel + strings.Repeat("\n", bottom) -} - -func (m Model) renderSettingsThemeBody(w, h int) string { - themes := AvailableThemes() - activeThemeIdx := ActiveThemeIndex() - activeThemeName := "none" - if activeThemeIdx >= 0 && activeThemeIdx < len(themes) { - activeThemeName = themes[activeThemeIdx].Name - } - lines := settingsBodyHeaderLines( - "Theme Selection", - fmt.Sprintf("%d themes available · active: %s", len(themes), activeThemeName), - ) - nameW := w - 16 - if nameW < 12 { - nameW = 12 - } - lines = append(lines, dimStyle.Render(fmt.Sprintf(" %-3s %-3s %-3s %-*s", "#", "CUR", "ACT", nameW, "THEME"))) - lines = append(lines, settingsBodyRule(w)) - if len(themes) == 0 { - lines = append(lines, dimStyle.Render("No themes available.")) - return padToSize(strings.Join(lines, "\n"), w, h) - } - - cursor := clamp(m.settings.themeCursor, 0, len(themes)-1) - listHeight := h - len(lines) - if listHeight < 1 { - listHeight = 1 - } - start, end := listWindow(len(themes), cursor, listHeight) - - for i := start; i < end; i++ { - theme := themes[i] - prefix := " " - if i == cursor { - prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") - } - - current := "." - if i == activeThemeIdx { - current = "*" - } - selected := "." - if i == cursor { - selected = ">" - } - name := truncateToWidth(theme.Name, nameW) - lines = append(lines, fmt.Sprintf("%s%-3d %-3s %-3s %-*s", prefix, i+1, selected, current, nameW, name)) - } - - return padToSize(strings.Join(lines, "\n"), w, h) -} - -func (m Model) renderSettingsViewBody(w, h int) string { - configured := m.configuredDashboardView() - active := m.activeDashboardView() - lines := settingsBodyHeaderLines( - "Dashboard View Mode", - fmt.Sprintf("configured: %s · active: %s", configured, active), - ) - lines = append(lines, dimStyle.Render(" CUR MODE")) - lines = append(lines, settingsBodyRule(w)) - if len(dashboardViewOptions) == 0 { - lines = append(lines, dimStyle.Render("No dashboard views available.")) - return padToSize(strings.Join(lines, "\n"), w, h) - } - - cursor := clamp(m.settings.viewCursor, 0, len(dashboardViewOptions)-1) - listHeight := h - len(lines) - if listHeight < 1 { - listHeight = 1 - } - start, end := listWindow(len(dashboardViewOptions), cursor, listHeight) - - for i := start; i < end; i++ { - option := dashboardViewOptions[i] - - prefix := " " - if i == cursor { - prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") - } - - current := " " - if option.ID == configured { - current = lipgloss.NewStyle().Foreground(colorGreen).Bold(true).Render("● ") - } - - label := option.Label - if option.ID == active && option.ID != configured { - label += " (auto)" - } - - lines = append(lines, fmt.Sprintf("%s%s%s", prefix, current, label)) - lines = append(lines, " "+dimStyle.Render(option.Description)) - } - - return padToSize(strings.Join(lines, "\n"), w, h) -} - -// apiKeysTabIDs returns account IDs for the API Keys tab, including -// unregistered API-key providers that the user can configure. -func (m Model) apiKeysTabIDs() []string { - registeredProviders := make(map[string]bool) - var ids []string - for _, id := range m.providerOrder { - providerID := m.accountProviders[id] - if isAPIKeyProvider(providerID) { - ids = append(ids, id) - registeredProviders[providerID] = true - } - } - for _, entry := range apiKeyProviderEntries() { - if registeredProviders[entry.ProviderID] { - continue - } - ids = append(ids, entry.AccountID) - } - return ids -} - -// providerForAccountID looks up the provider ID for an account, falling back -// to the default API-key account mapping for unregistered providers. -func providerForAccountID(accountID string, accountProviders map[string]string) string { - if p, ok := accountProviders[accountID]; ok && p != "" { - return p - } - for _, entry := range apiKeyProviderEntries() { - if entry.AccountID == accountID { - return entry.ProviderID - } - } - return "" -} - -func maskAPIKey(key string) string { - if len(key) <= 12 { - return key - } - return key[:8] + "..." + key[len(key)-4:] -} - -func (m Model) renderSettingsAPIKeysBody(w, h int) string { - ids := m.apiKeysTabIDs() - - configuredCount := 0 - for _, id := range ids { - providerID := providerForAccountID(id, m.accountProviders) - if !isAPIKeyProvider(providerID) { - continue - } - if envVar := envVarForProvider(providerID); envVar != "" && os.Getenv(envVar) != "" { - configuredCount++ - continue - } - if snap, ok := m.snapshots[id]; ok && snap.Status == core.StatusOK { - configuredCount++ - } - } - - lines := settingsBodyHeaderLines( - "API Key Management", - fmt.Sprintf("%d/%d configured (env or validated)", configuredCount, len(ids)), - ) - accountW := 20 - envW := w - accountW - 18 - if envW < 10 { - envW = 10 - accountW = w - envW - 18 - } - if accountW < 10 { - accountW = 10 - } - lines = append(lines, dimStyle.Render(fmt.Sprintf(" %-3s %-5s %-*s %-*s", "#", "STAT", accountW, "ACCOUNT", envW, "ENV VAR"))) - lines = append(lines, settingsBodyRule(w)) - if len(ids) == 0 { - lines = append(lines, dimStyle.Render("No API-key providers available.")) - return padToSize(strings.Join(lines, "\n"), w, h) - } - - cursor := clamp(m.settings.cursor, 0, len(ids)-1) - listHeight := h - len(lines) - if listHeight < 1 { - listHeight = 1 - } - start, end := listWindow(len(ids), cursor, listHeight) - - for i := start; i < end; i++ { - id := ids[i] - providerID := providerForAccountID(id, m.accountProviders) - if snap, ok := m.snapshots[id]; ok && snap.ProviderID != "" { - providerID = snap.ProviderID - } - if providerID == "" { - providerID = "unknown" - } - - prefix := " " - if i == cursor { - prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") - } - - if !isAPIKeyProvider(providerID) { - line := fmt.Sprintf("%s%-3d %-5s %-*s %-*s", prefix, i+1, "N/A", accountW, truncateToWidth(id, accountW), envW, "-") - lines = append(lines, line) - continue - } - - envVar := envVarForProvider(providerID) - - var statusText string - if snap, ok := m.snapshots[id]; ok && snap.Status == core.StatusOK { - statusText = "OK" - } else if envVar != "" && os.Getenv(envVar) != "" { - statusText = "ENV" - } else { - statusText = "MISS" - } - - account := truncateToWidth(id, accountW) - envLabel := "-" - if envVar != "" { - envLabel = envVar - } - envLabel = truncateToWidth(envLabel, envW) - - if m.settings.apiKeyEditing && i == cursor { - masked := maskAPIKey(m.settings.apiKeyInput) - inputStyle := lipgloss.NewStyle().Foreground(colorSapphire) - cursorChar := PulseChar("█", "▌", m.animFrame) - line := fmt.Sprintf("%s%-3d %-5s %-*s %-*s", prefix, i+1, statusText, accountW, account, envW, envLabel) - lines = append(lines, line) - keyLine := fmt.Sprintf(" key: %s", inputStyle.Render(masked+cursorChar)) - if m.settings.apiKeyStatus != "" { - keyLine += " " + dimStyle.Render(m.settings.apiKeyStatus) - } - lines = append(lines, keyLine) - } else { - line := fmt.Sprintf("%s%-3d %-5s %-*s %-*s", prefix, i+1, statusText, accountW, account, envW, envLabel) - lines = append(lines, line) - } - } - - return padToSize(strings.Join(lines, "\n"), w, h) -} - -func (m Model) renderSettingsTelemetryBody(w, h int) string { - lines := settingsBodyHeaderLines( - "Telemetry & Time Window", - "Choose aggregation window and map raw telemetry providers", - ) - lines = append(lines, settingsBodyRule(w)) - lines = append(lines, "") - - // Time window selector - lines = append(lines, lipgloss.NewStyle().Foreground(colorTeal).Bold(true).Render("Time Window")+" "+dimStyle.Render("press w or select below")) - lines = append(lines, "") - for i, tw := range core.ValidTimeWindows { - prefix := " " - if i == m.settings.cursor { - prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") - } - current := " " - if tw == m.timeWindow { - current = lipgloss.NewStyle().Foreground(colorGreen).Bold(true).Render("● ") - } - lines = append(lines, fmt.Sprintf("%s%s%s", prefix, current, tw.Label())) - } - lines = append(lines, "") - - // Telemetry provider mapping section - unmapped := m.telemetryUnmappedProviders() - hints := m.telemetryProviderLinkHints() - configured := m.configuredProviderIDs() - - if len(unmapped) == 0 { - lines = append(lines, lipgloss.NewStyle().Foreground(colorGreen).Render("All telemetry providers are mapped.")) - } else { - lines = append(lines, lipgloss.NewStyle().Foreground(colorPeach).Bold(true).Render("Detected additional telemetry providers:")) - for _, providerID := range unmapped { - lines = append(lines, " - "+providerID) - } - lines = append(lines, "") - lines = append(lines, "Map them in settings.json under telemetry.provider_links:") - lines = append(lines, " =") - if len(hints) > 0 { - lines = append(lines, "") - lines = append(lines, "Hint:") - lines = append(lines, " "+hints[0]) - } - if len(configured) > 0 { - lines = append(lines, "") - lines = append(lines, "Configured provider IDs:") - lines = append(lines, " "+strings.Join(configured, ", ")) - } - } - - start, end := listWindow(len(lines), m.settings.bodyOffset, h) - return padToSize(strings.Join(lines[start:end], "\n"), w, h) -} - -func (m Model) renderSettingsIntegrationsBody(w, h int) string { - statuses := m.settings.integrationStatus - ready := 0 - outdated := 0 - for _, entry := range statuses { - if entry.State == "ready" { - ready++ - } - if entry.NeedsUpgrade || entry.State == "outdated" { - outdated++ - } - } - lines := settingsBodyHeaderLines( - "Integrations", - fmt.Sprintf("%d total · %d ready · %d need attention", len(statuses), ready, outdated), - ) - lines = append(lines, settingsBodyRule(w)) - if len(statuses) == 0 { - lines = append(lines, dimStyle.Render("No integration status available yet. Press r to refresh.")) - return padToSize(strings.Join(lines, "\n"), w, h) - } - - cursor := clamp(m.settings.cursor, 0, len(statuses)-1) - listHeight := h - len(lines) - 4 - if listHeight < 1 { - listHeight = 1 - } - start, end := listWindow(len(statuses), cursor, listHeight) - - for i := start; i < end; i++ { - entry := statuses[i] - prefix := " " - if i == cursor { - prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") - } - - stateColor := colorRed - switch entry.State { - case "ready": - stateColor = colorGreen - case "outdated": - stateColor = colorYellow - case "partial": - stateColor = colorPeach - } - - versionText := entry.DesiredVersion - if strings.TrimSpace(entry.InstalledVersion) != "" { - versionText = entry.InstalledVersion - } - stateText := lipgloss.NewStyle().Foreground(stateColor).Render(strings.ToUpper(entry.State)) - line := fmt.Sprintf("%s%s %s %s", prefix, entry.Name, stateText, dimStyle.Render("v"+versionText)) - lines = append(lines, line) - lines = append(lines, " "+dimStyle.Render(entry.Summary)) - } - - selected := statuses[cursor] - lines = append(lines, "") - lines = append(lines, "Selected:") - lines = append(lines, fmt.Sprintf(" %s · installed=%t configured=%t", selected.Name, selected.Installed, selected.Configured)) - if selected.NeedsUpgrade { - lines = append(lines, " "+lipgloss.NewStyle().Foreground(colorYellow).Render("Upgrade recommended: installed version differs from current integration version")) - } - lines = append(lines, " Install/configure command writes plugin/hook files and updates tool configs automatically.") - - return padToSize(strings.Join(lines, "\n"), w, h) -} diff --git a/internal/tui/settings_modal_preferences.go b/internal/tui/settings_modal_preferences.go new file mode 100644 index 0000000..4468f69 --- /dev/null +++ b/internal/tui/settings_modal_preferences.go @@ -0,0 +1,270 @@ +package tui + +import ( + "fmt" + "os" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (m Model) renderSettingsThemeBody(w, h int) string { + themes := AvailableThemes() + activeThemeIdx := ActiveThemeIndex() + activeThemeName := "none" + if activeThemeIdx >= 0 && activeThemeIdx < len(themes) { + activeThemeName = themes[activeThemeIdx].Name + } + lines := settingsBodyHeaderLines("Theme Selection", fmt.Sprintf("%d themes available · active: %s", len(themes), activeThemeName)) + nameW := max(12, w-16) + lines = append(lines, dimStyle.Render(fmt.Sprintf(" %-3s %-3s %-3s %-*s", "#", "CUR", "ACT", nameW, "THEME")), settingsBodyRule(w)) + if len(themes) == 0 { + lines = append(lines, dimStyle.Render("No themes available.")) + return padToSize(strings.Join(lines, "\n"), w, h) + } + + cursor := clamp(m.settings.themeCursor, 0, len(themes)-1) + start, end := listWindow(len(themes), cursor, max(1, h-len(lines))) + for i := start; i < end; i++ { + prefix := " " + if i == cursor { + prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") + } + current := "." + if i == activeThemeIdx { + current = "*" + } + selected := "." + if i == cursor { + selected = ">" + } + lines = append(lines, fmt.Sprintf("%s%-3d %-3s %-3s %-*s", prefix, i+1, selected, current, nameW, truncateToWidth(themes[i].Name, nameW))) + } + return padToSize(strings.Join(lines, "\n"), w, h) +} + +func (m Model) renderSettingsViewBody(w, h int) string { + configured := m.configuredDashboardView() + active := m.activeDashboardView() + lines := settingsBodyHeaderLines("Dashboard View Mode", fmt.Sprintf("configured: %s · active: %s", configured, active)) + lines = append(lines, dimStyle.Render(" CUR MODE"), settingsBodyRule(w)) + if len(dashboardViewOptions) == 0 { + lines = append(lines, dimStyle.Render("No dashboard views available.")) + return padToSize(strings.Join(lines, "\n"), w, h) + } + + cursor := clamp(m.settings.viewCursor, 0, len(dashboardViewOptions)-1) + start, end := listWindow(len(dashboardViewOptions), cursor, max(1, h-len(lines))) + for i := start; i < end; i++ { + option := dashboardViewOptions[i] + prefix := " " + if i == cursor { + prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") + } + current := " " + if option.ID == configured { + current = lipgloss.NewStyle().Foreground(colorGreen).Bold(true).Render("● ") + } + label := option.Label + if option.ID == active && option.ID != configured { + label += " (auto)" + } + lines = append(lines, fmt.Sprintf("%s%s%s", prefix, current, label), " "+dimStyle.Render(option.Description)) + } + return padToSize(strings.Join(lines, "\n"), w, h) +} + +func (m Model) apiKeysTabIDs() []string { + registered := make(map[string]bool) + var ids []string + for _, id := range m.providerOrder { + providerID := m.accountProviders[id] + if isAPIKeyProvider(providerID) { + ids = append(ids, id) + registered[providerID] = true + } + } + for _, entry := range apiKeyProviderEntries() { + if !registered[entry.ProviderID] { + ids = append(ids, entry.AccountID) + } + } + return ids +} + +func providerForAccountID(accountID string, accountProviders map[string]string) string { + if providerID := strings.TrimSpace(accountProviders[accountID]); providerID != "" { + return providerID + } + for _, entry := range apiKeyProviderEntries() { + if entry.AccountID == accountID { + return entry.ProviderID + } + } + return "" +} + +func maskAPIKey(key string) string { + if len(key) <= 12 { + return key + } + return key[:8] + "..." + key[len(key)-4:] +} + +func (m Model) renderSettingsAPIKeysBody(w, h int) string { + ids := m.apiKeysTabIDs() + configuredCount := 0 + for _, id := range ids { + providerID := providerForAccountID(id, m.accountProviders) + if !isAPIKeyProvider(providerID) { + continue + } + if envVar := envVarForProvider(providerID); envVar != "" && os.Getenv(envVar) != "" { + configuredCount++ + continue + } + if snap, ok := m.snapshots[id]; ok && snap.Status == core.StatusOK { + configuredCount++ + } + } + + lines := settingsBodyHeaderLines("API Key Management", fmt.Sprintf("%d/%d configured (env or validated)", configuredCount, len(ids))) + accountW := 20 + envW := max(10, w-accountW-18) + if accountW = max(10, w-envW-18); accountW < 10 { + accountW = 10 + } + lines = append(lines, dimStyle.Render(fmt.Sprintf(" %-3s %-5s %-*s %-*s", "#", "STAT", accountW, "ACCOUNT", envW, "ENV VAR")), settingsBodyRule(w)) + if len(ids) == 0 { + lines = append(lines, dimStyle.Render("No API-key providers available.")) + return padToSize(strings.Join(lines, "\n"), w, h) + } + + cursor := clamp(m.settings.cursor, 0, len(ids)-1) + start, end := listWindow(len(ids), cursor, max(1, h-len(lines))) + for i := start; i < end; i++ { + id := ids[i] + providerID := providerForAccountID(id, m.accountProviders) + if snap, ok := m.snapshots[id]; ok && snap.ProviderID != "" { + providerID = snap.ProviderID + } + if providerID == "" { + providerID = "unknown" + } + prefix := " " + if i == cursor { + prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") + } + if !isAPIKeyProvider(providerID) { + lines = append(lines, fmt.Sprintf("%s%-3d %-5s %-*s %-*s", prefix, i+1, "N/A", accountW, truncateToWidth(id, accountW), envW, "-")) + continue + } + + envLabel := truncateToWidth(core.FirstNonEmpty(envVarForProvider(providerID), "-"), envW) + statusText := "MISS" + if snap, ok := m.snapshots[id]; ok && snap.Status == core.StatusOK { + statusText = "OK" + } else if envVar := envVarForProvider(providerID); envVar != "" && os.Getenv(envVar) != "" { + statusText = "ENV" + } + lines = append(lines, fmt.Sprintf("%s%-3d %-5s %-*s %-*s", prefix, i+1, statusText, accountW, truncateToWidth(id, accountW), envW, envLabel)) + if m.settings.apiKeyEditing && i == cursor { + cursorChar := PulseChar("█", "▌", m.animFrame) + keyLine := fmt.Sprintf(" key: %s", lipgloss.NewStyle().Foreground(colorSapphire).Render(maskAPIKey(m.settings.apiKeyInput)+cursorChar)) + if m.settings.apiKeyStatus != "" { + keyLine += " " + dimStyle.Render(m.settings.apiKeyStatus) + } + lines = append(lines, keyLine) + } + } + return padToSize(strings.Join(lines, "\n"), w, h) +} + +func (m Model) renderSettingsTelemetryBody(w, h int) string { + lines := settingsBodyHeaderLines("Telemetry & Time Window", "Choose aggregation window and map raw telemetry providers") + lines = append(lines, settingsBodyRule(w), "", lipgloss.NewStyle().Foreground(colorTeal).Bold(true).Render("Time Window")+" "+dimStyle.Render("press w or select below"), "") + for i, tw := range core.ValidTimeWindows { + prefix := " " + if i == m.settings.cursor { + prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") + } + current := " " + if tw == m.timeWindow { + current = lipgloss.NewStyle().Foreground(colorGreen).Bold(true).Render("● ") + } + lines = append(lines, fmt.Sprintf("%s%s%s", prefix, current, tw.Label())) + } + lines = append(lines, "") + + unmapped := m.telemetryUnmappedProviders() + if len(unmapped) == 0 { + lines = append(lines, lipgloss.NewStyle().Foreground(colorGreen).Render("All telemetry providers are mapped.")) + } else { + lines = append(lines, lipgloss.NewStyle().Foreground(colorPeach).Bold(true).Render("Detected additional telemetry providers:")) + for _, providerID := range unmapped { + lines = append(lines, " - "+providerID) + } + lines = append(lines, "", "Map them in settings.json under telemetry.provider_links:", " =") + if hints := m.telemetryProviderLinkHints(); len(hints) > 0 { + lines = append(lines, "", "Hint:", " "+hints[0]) + } + if configured := m.configuredProviderIDs(); len(configured) > 0 { + lines = append(lines, "", "Configured provider IDs:", " "+strings.Join(configured, ", ")) + } + } + start, end := listWindow(len(lines), m.settings.bodyOffset, h) + return padToSize(strings.Join(lines[start:end], "\n"), w, h) +} + +func (m Model) renderSettingsIntegrationsBody(w, h int) string { + statuses := m.settings.integrationStatus + ready := 0 + outdated := 0 + for _, entry := range statuses { + if entry.State == "ready" { + ready++ + } + if entry.NeedsUpgrade || entry.State == "outdated" { + outdated++ + } + } + lines := settingsBodyHeaderLines("Integrations", fmt.Sprintf("%d total · %d ready · %d need attention", len(statuses), ready, outdated)) + lines = append(lines, settingsBodyRule(w)) + if len(statuses) == 0 { + lines = append(lines, dimStyle.Render("No integration status available yet. Press r to refresh.")) + return padToSize(strings.Join(lines, "\n"), w, h) + } + + cursor := clamp(m.settings.cursor, 0, len(statuses)-1) + start, end := listWindow(len(statuses), cursor, max(1, h-len(lines)-4)) + for i := start; i < end; i++ { + entry := statuses[i] + prefix := " " + if i == cursor { + prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") + } + stateColor := colorRed + switch entry.State { + case "ready": + stateColor = colorGreen + case "outdated": + stateColor = colorYellow + case "partial": + stateColor = colorPeach + } + versionText := core.FirstNonEmpty(strings.TrimSpace(entry.InstalledVersion), entry.DesiredVersion) + lines = append(lines, + fmt.Sprintf("%s%s %s %s", prefix, entry.Name, lipgloss.NewStyle().Foreground(stateColor).Render(strings.ToUpper(entry.State)), dimStyle.Render("v"+versionText)), + " "+dimStyle.Render(entry.Summary), + ) + } + + selected := statuses[cursor] + lines = append(lines, "", "Selected:", fmt.Sprintf(" %s · installed=%t configured=%t", selected.Name, selected.Installed, selected.Configured)) + if selected.NeedsUpgrade { + lines = append(lines, " "+lipgloss.NewStyle().Foreground(colorYellow).Render("Upgrade recommended: installed version differs from current integration version")) + } + lines = append(lines, " Install/configure command writes plugin/hook files and updates tool configs automatically.") + return padToSize(strings.Join(lines, "\n"), w, h) +} diff --git a/internal/tui/settings_modal_sections.go b/internal/tui/settings_modal_sections.go new file mode 100644 index 0000000..5f4b2fd --- /dev/null +++ b/internal/tui/settings_modal_sections.go @@ -0,0 +1,186 @@ +package tui + +import ( + "fmt" + "strings" + + "github.com/charmbracelet/lipgloss" +) + +func (m Model) renderSettingsProvidersBody(w, h int) string { + ids := m.settingsIDs() + enabledCount := 0 + for _, id := range ids { + if m.isProviderEnabled(id) { + enabledCount++ + } + } + + lines := settingsBodyHeaderLines( + "Provider Visibility & Order", + fmt.Sprintf("%d/%d enabled · Shift+J/K reorder · Enter toggle", enabledCount, len(ids)), + ) + accountW := 26 + providerW := max(10, w-accountW-16) + if accountW = max(12, w-providerW-16); accountW < 12 { + accountW = 12 + } + lines = append(lines, dimStyle.Render(fmt.Sprintf(" %-3s %-3s %-*s %-*s", "#", "ON", accountW, "ACCOUNT", providerW, "PROVIDER"))) + lines = append(lines, settingsBodyRule(w)) + if len(ids) == 0 { + lines = append(lines, dimStyle.Render("No providers available.")) + return padToSize(strings.Join(lines, "\n"), w, h) + } + + cursor := clamp(m.settings.cursor, 0, len(ids)-1) + start, end := listWindow(len(ids), cursor, max(1, h-len(lines))) + for i := start; i < end; i++ { + id := ids[i] + providerID := m.accountProviders[id] + if snap, ok := m.snapshots[id]; ok && snap.ProviderID != "" { + providerID = snap.ProviderID + } + if providerID == "" { + providerID = "unknown" + } + onText := "OFF" + onStyle := lipgloss.NewStyle().Foreground(colorRed) + if m.isProviderEnabled(id) { + onText = "ON " + onStyle = lipgloss.NewStyle().Foreground(colorGreen) + } + prefix := " " + if i == cursor { + prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") + } + lines = append(lines, fmt.Sprintf("%s%-3d %s %-*s %-*s", + prefix, i+1, onStyle.Render(onText), accountW, truncateToWidth(id, accountW), providerW, truncateToWidth(providerID, providerW))) + } + return padToSize(strings.Join(lines, "\n"), w, h) +} + +func (m Model) renderSettingsWidgetSectionsBody(w, h int) string { + return m.renderSettingsWidgetSectionsList(w, h) +} + +func (m Model) renderSettingsWidgetSectionsList(w, h int) string { + entries := m.widgetSectionEntries() + visibleCount := 0 + for _, entry := range entries { + if entry.Enabled { + visibleCount++ + } + } + + lines := settingsBodyHeaderLines( + "Global Widget Sections", + fmt.Sprintf("%d/%d sections visible · applies to all providers", visibleCount, len(entries)), + ) + hideBox := "☐" + hideStyle := lipgloss.NewStyle().Foreground(colorRed) + if m.hideSectionsWithNoData { + hideBox = "☑" + hideStyle = lipgloss.NewStyle().Foreground(colorGreen) + } + lines = append(lines, fmt.Sprintf("Hide sections with no data: %s %s", hideStyle.Render(hideBox), dimStyle.Render("press h to toggle")), "") + + nameW := max(12, w-24) + lines = append(lines, dimStyle.Render(fmt.Sprintf(" %-3s %-3s %-*s %s", "#", "ON", nameW, "SECTION", "ID"))) + lines = append(lines, settingsBodyRule(w)) + if len(entries) == 0 { + lines = append(lines, dimStyle.Render("No dashboard sections available.")) + return padToSize(strings.Join(lines, "\n"), w, h) + } + + cursor := clamp(m.settings.sectionRowCursor, 0, len(entries)-1) + start, end := listWindow(len(entries), cursor, max(1, h-len(lines))) + for i := start; i < end; i++ { + entry := entries[i] + prefix := " " + if i == cursor { + prefix = lipgloss.NewStyle().Foreground(colorAccent).Bold(true).Render("➤ ") + } + onText := "OFF" + onStyle := lipgloss.NewStyle().Foreground(colorRed) + if entry.Enabled { + onText = "ON " + onStyle = lipgloss.NewStyle().Foreground(colorGreen) + } + lines = append(lines, fmt.Sprintf("%s%-3d %s %-*s %s", + prefix, i+1, onStyle.Render(onText), nameW, truncateToWidth(settingsSectionLabel(entry.ID), nameW), dimStyle.Render(string(entry.ID)))) + } + return padToSize(strings.Join(lines, "\n"), w, h) +} + +func (m Model) renderSettingsWidgetSectionsPreview(w, h int) string { + if w < 24 || h < 5 { + return padToSize(dimStyle.Render("Live preview unavailable at this size."), w, h) + } + + lines := []string{ + lipgloss.NewStyle().Foreground(colorTeal).Bold(true).Render("Live Preview"), + dimStyle.Render("Claude Code preset · synthetic data · PgUp/PgDn scroll"), + "", + } + tileW := max(tileMinWidth, w-2) + all := append(lines, strings.Split(m.renderTile(settingsWidgetSectionsPreviewSnapshot(), false, false, tileW, 0, 0), "\n")...) + maxOffset := max(0, len(all)-h) + offset := clamp(m.settings.previewOffset, 0, maxOffset) + visible := all + if len(visible) > h { + visible = visible[offset:min(offset+h, len(visible))] + } + if len(visible) > 0 && offset > 0 { + visible[0] = dimStyle.Render(" ▲ preview above") + } + if len(visible) > 0 && offset+h < len(all) { + visible[len(visible)-1] = dimStyle.Render(" ▼ preview below") + } + return padToSize(strings.Join(visible, "\n"), w, h) +} + +func (m Model) renderSettingsWidgetPreviewPanel(contentW, contentH int) string { + innerW := max(24, contentW-4) + bodyH := max(4, contentH-1) + lines := []string{ + lipgloss.NewStyle().Bold(true).Foreground(colorRosewater).Render("Widget Preview"), + lipgloss.NewStyle().Foreground(colorSurface1).Render(strings.Repeat("─", innerW)), + m.renderSettingsWidgetSectionsPreview(innerW, bodyH), + } + return lipgloss.NewStyle(). + Border(lipgloss.RoundedBorder()). + BorderForeground(colorAccent). + Background(colorBase). + Padding(1, 2, 0, 2). + Width(contentW). + Render(strings.Join(lines, "\n")) +} + +func (m Model) settingsWidgetPreviewBodyHeight(contentW, contentH int, sideBySide bool) int { + maxBodyH := contentH + if sideBySide { + maxBodyH = m.height - 12 + } else { + maxBodyH = (m.height - 12) / 2 + } + maxBodyH = max(settingsWidgetPreviewMinBodyH, maxBodyH) + targetBodyH := max(settingsWidgetPreviewMinBodyH, m.settingsWidgetPreviewContentLineCount(max(24, contentW-4))) + return min(targetBodyH, maxBodyH) + 1 +} + +func (m Model) settingsWidgetPreviewContentLineCount(innerW int) int { + if innerW < 24 { + return 4 + } + tileW := max(tileMinWidth, innerW-2) + return 3 + len(strings.Split(m.renderTile(settingsWidgetSectionsPreviewSnapshot(), false, false, tileW, 0, 0), "\n")) +} + +func centerPanelVertically(panel string, targetHeight int) string { + current := lipgloss.Height(panel) + if current >= targetHeight { + return panel + } + diff := targetHeight - current + return strings.Repeat("\n", diff/2) + panel + strings.Repeat("\n", diff-diff/2) +} From 0105a59b2bda5b56b426e5e3c18cd40e1f5e168d Mon Sep 17 00:00:00 2001 From: Jan Baraniewski Date: Tue, 10 Mar 2026 11:53:28 +0100 Subject: [PATCH 32/32] refactor: finish provider cleanup and close audit follow-ups --- .../CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md | 14 +- ...W_DUPLICATION_AND_RESPONSIBILITY_REPORT.md | 17 +- internal/detect/cursor.go | 2 + internal/providers/claude_code/claude_code.go | 2 +- .../providers/claude_code/claude_code_test.go | 30 +- .../claude_code/conversation_usage.go | 515 +---- .../conversation_usage_projection.go | 410 ++++ .../claude_code/test_helpers_test.go | 18 + internal/providers/codex/live_usage.go | 4 +- internal/providers/copilot/copilot.go | 20 +- .../providers/copilot/copilot_metrics_test.go | 327 +++ .../copilot/copilot_sessions_test.go | 671 ++++++ internal/providers/copilot/copilot_test.go | 994 -------- internal/providers/copilot/local_config.go | 29 + internal/providers/copilot/local_data.go | 182 -- internal/providers/copilot/local_logs.go | 120 + internal/providers/copilot/local_types.go | 55 + .../copilot/telemetry_session_file.go | 429 ---- .../copilot/telemetry_session_helpers.go | 439 ++++ .../providers/copilot/test_helpers_test.go | 25 + .../providers/cursor/cursor_local_test.go | 689 ++++++ internal/providers/cursor/cursor_test.go | 697 +----- internal/providers/cursor/fetch.go | 2 + .../providers/cursor/test_helpers_test.go | 21 + .../providers/gemini_cli/gemini_cli_test.go | 28 +- .../providers/gemini_cli/session_usage.go | 847 ------- .../gemini_cli/session_usage_helpers.go | 470 ++++ .../providers/gemini_cli/session_usage_io.go | 72 + .../gemini_cli/session_usage_metrics.go | 336 +++ .../providers/gemini_cli/test_helpers_test.go | 13 + .../providers/ollama/ollama_details_test.go | 439 ++++ internal/providers/ollama/ollama_test.go | 424 ---- internal/providers/ollama/request_helpers.go | 8 +- .../openrouter/openrouter_activity_test.go | 968 ++++++++ .../openrouter_analytics_rollups_test.go | 534 +++++ .../openrouter/openrouter_analytics_test.go | 579 +++++ .../providers/openrouter/openrouter_test.go | 2038 ----------------- internal/providers/zai/usage_projection.go | 398 ++++ internal/providers/zai/zai.go | 412 +--- 39 files changed, 6739 insertions(+), 6539 deletions(-) create mode 100644 internal/providers/claude_code/conversation_usage_projection.go create mode 100644 internal/providers/claude_code/test_helpers_test.go create mode 100644 internal/providers/copilot/copilot_metrics_test.go create mode 100644 internal/providers/copilot/copilot_sessions_test.go create mode 100644 internal/providers/copilot/local_config.go create mode 100644 internal/providers/copilot/local_logs.go create mode 100644 internal/providers/copilot/local_types.go create mode 100644 internal/providers/copilot/telemetry_session_helpers.go create mode 100644 internal/providers/copilot/test_helpers_test.go create mode 100644 internal/providers/cursor/cursor_local_test.go create mode 100644 internal/providers/cursor/test_helpers_test.go create mode 100644 internal/providers/gemini_cli/session_usage_helpers.go create mode 100644 internal/providers/gemini_cli/session_usage_io.go create mode 100644 internal/providers/gemini_cli/session_usage_metrics.go create mode 100644 internal/providers/gemini_cli/test_helpers_test.go create mode 100644 internal/providers/ollama/ollama_details_test.go create mode 100644 internal/providers/openrouter/openrouter_activity_test.go create mode 100644 internal/providers/openrouter/openrouter_analytics_rollups_test.go create mode 100644 internal/providers/openrouter/openrouter_analytics_test.go create mode 100644 internal/providers/zai/usage_projection.go diff --git a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md index 43aeff4..7aea4ef 100644 --- a/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md +++ b/docs/CODEBASE_AUDIT_ACTION_TABLE_2026-03-09.md @@ -15,19 +15,15 @@ Branch: `feat/dashboard-race-parser-cleanups` | R61 | Fixed | Gemini CLI provider decomposition | `internal/providers/gemini_cli/gemini_cli.go`, `internal/providers/gemini_cli/api_usage.go`, `internal/providers/gemini_cli/session_usage.go` | API/quota/account flows and local session aggregation are split out of the coordinator file. The main provider file is now mostly wiring plus fetch orchestration. | Keep future Gemini changes inside the matching helper unit. | | R62 | Fixed | Ollama provider decomposition follow-through | `internal/providers/ollama/ollama.go`, `internal/providers/ollama/local_api.go`, `internal/providers/ollama/cloud_api.go`, `internal/providers/ollama/desktop_db.go`, `internal/providers/ollama/desktop_db_settings.go`, `internal/providers/ollama/desktop_db_tokens.go`, `internal/providers/ollama/desktop_db_breakdowns.go` | Ollama’s coordinator, local API, cloud API, and desktop SQLite flows are now separated by concern. The remaining large desktop DB path is split into settings/schema helpers, token estimation, and usage breakdown/daily series helpers. | Keep future SQLite-specific work inside the dedicated desktop DB helper files. | | R63 | Fixed | Telemetry and config fixture cleanup | `internal/telemetry/test_helpers_test.go`, `internal/telemetry/usage_view_test.go`, `internal/config/test_helpers_test.go` | Shared store/file helpers now cover the repeated setup patterns in the telemetry and config suites, and `usage_view_test.go` is reduced below the previous monolith threshold. | Apply the same helper pattern to other large suites when they next change. | +| R64 | Fixed | Runtime-hint rollout follow-through | `internal/core/provider.go`, `internal/detect/codex.go`, `internal/detect/cursor.go`, `internal/detect/ollama.go`, `internal/providers/codex/live_usage.go`, `internal/providers/copilot/copilot.go`, `internal/providers/ollama/request_helpers.go` | Remaining runtime-only config/account hints now flow through `RuntimeHints` and `Hint`/`SetHint` helpers instead of direct provider code reaching into ad hoc `ExtraData` keys for local paths and overrides. | Keep new runtime-only provider hints behind `Hint`/`SetHint` rather than adding more direct map reads. | +| R65 | Fixed | Provider/session and test-suite decomposition follow-through | `internal/providers/claude_code/conversation_usage.go`, `internal/providers/claude_code/conversation_usage_projection.go`, `internal/providers/copilot/local_data.go`, `internal/providers/copilot/telemetry_session_file.go`, `internal/providers/copilot/copilot_test.go`, `internal/providers/openrouter/openrouter_analytics_test.go`, `internal/providers/openrouter/openrouter_analytics_rollups_test.go`, `internal/providers/zai/zai.go` | The remaining long provider/session paths are now split by parser/projection/aggregation concern, and the last oversized high-churn test suites are divided by scenario family with shared helpers extracted. | Split again only when a specific family regrows into another mixed-responsibility file. | -## Residual Non-Blocking Follow-Up +## Remaining Review State -These are no longer review blockers or known correctness issues. They are explicit maintenance opportunities left after the main cleanup. - -| ID | Priority | Area | Evidence | Current state | Optional follow-up | -| --- | --- | --- | --- | --- | --- | -| A6 | P3 | Telemetry usage-view orchestration | `internal/telemetry/usage_view.go`, `internal/telemetry/usage_view_projection.go`, `internal/telemetry/usage_view_materialize.go`, `internal/telemetry/usage_view_aggregate.go` | The usage-view path is materially decomposed and validated. The remaining top-level coordinator is acceptable and no longer a review issue. | Split further only if future telemetry changes start re-coupling query planning, cache application, and projection. | -| A7 | P3 | Daemon service follow-through | `internal/daemon/server.go`, `internal/daemon/server_collect.go`, `internal/daemon/server_poll.go`, `internal/daemon/server_spool.go`, `internal/daemon/server_http.go`, `internal/daemon/server_read_model.go` | Daemon loops and HTTP/read-model flows are already separated, and no new race or lifecycle bug was found in the follow-up review. | Add extra worker abstractions only if future concurrency pressure justifies them. | -| A8 | P3 | Ambiguous local-source attribution | `internal/daemon/source_collectors.go`, `internal/daemon/server_http.go`, `cmd/openusage/telemetry.go` | Ambiguous shared-path local sources still intentionally require explicit user disambiguation instead of silent guessing. This is a product decision, not a hidden bug. | Add persisted source/account aliasing only if multi-account shared-path workflows become common. | +No active `P1`, `P2`, or `P3` review items remain from this audit. The earlier follow-up rows were either resolved in this branch or explicitly reclassified as optional future design choices rather than outstanding issues. ## Summary - The original high-risk review items `A1`, `A2`, `A3`, `A4`, `A12`, `A14`, and `A15` are addressed in this branch. +- The remaining provider/session decomposition, runtime-hint rollout, and large-suite cleanup work is also addressed in this branch. - No additional high-confidence correctness bug was found during the follow-up review after the dashboard timeframe race fix. -- Remaining entries are intentional tradeoffs or low-priority structural opportunities, not outstanding breakages. diff --git a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md index 77e3dec..6dff33b 100644 --- a/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md +++ b/docs/SYSTEM_REVIEW_DUPLICATION_AND_RESPONSIBILITY_REPORT.md @@ -6,7 +6,7 @@ Branch: `feat/dashboard-race-parser-cleanups` ## Scope -This report reflects the tree after the dashboard timeframe-race fix, parser consolidation work, daemon/read-model cleanup, provider decomposition, TUI decomposition, render-cache follow-through, and the final `A1`/`A2`/`A3`/`A4`/`A12`/`A14`/`A15` cleanup pass. +This report reflects the tree after the dashboard timeframe-race fix, parser consolidation work, daemon/read-model cleanup, provider decomposition, TUI decomposition, render-cache follow-through, runtime-hint cleanup, large-suite splitting, and the final `A1`/`A2`/`A3`/`A4`/`A12`/`A14`/`A15` cleanup pass. It replaces the earlier “remaining gaps” snapshot. The goal now is to document the actual post-cleanup state, not to preserve stale open items. @@ -24,7 +24,9 @@ The following earlier review themes are materially closed in this branch: - Remaining detail/analytics metric-prefix parsing pockets that were still living in renderer code. - Tile/detail/analytics render-path recomputation on every frame. - Account-config runtime-path overload in the hot path. -- Repeated telemetry/config test setup boilerplate in the most actively changed suites. +- Repeated telemetry/config/provider test setup boilerplate in the most actively changed suites. +- Remaining runtime-only provider overrides reaching directly into ad hoc `ExtraData` fields. +- The last oversized high-churn Copilot/OpenRouter test suites. ## Current Findings @@ -49,15 +51,8 @@ The most change-prone areas are no longer concentrated the way they were at the This reduces review blast radius and makes future concurrency/data-flow work easier to reason about. -### 3. Residual items are explicit, low-risk follow-up opportunities - -There are still a few non-blocking areas worth keeping in mind: - -- `usage_view.go` still owns top-level orchestration, but it is no longer a monolith and does not currently hide a correctness issue. -- The daemon could be pushed into more formal worker abstractions later, but present lifecycle/context handling is consistent in the active paths. -- Ambiguous shared-path local account attribution still requires explicit user disambiguation by design; the code now avoids silent guessing. - -These are not “unfinished fixes”. They are optional future design work. +### 3. No active audit-priority items remain +The earlier follow-up list is now closed for the purposes of this review. What remains in the repo are ordinary future refactor options, not unresolved `P1`/`P2`/`P3` findings from this audit. ## References diff --git a/internal/detect/cursor.go b/internal/detect/cursor.go index cad6aec..7be1b0f 100644 --- a/internal/detect/cursor.go +++ b/internal/detect/cursor.go @@ -53,10 +53,12 @@ func detectCursor(result *Result) { if hasTracking { acct.SetPath("tracking_db", trackingDB) + acct.SetHint("tracking_db", trackingDB) acct.ExtraData["tracking_db"] = trackingDB } if hasState { acct.SetPath("state_db", stateDB) + acct.SetHint("state_db", stateDB) acct.ExtraData["state_db"] = stateDB } diff --git a/internal/providers/claude_code/claude_code.go b/internal/providers/claude_code/claude_code.go index 5cac350..e172398 100644 --- a/internal/providers/claude_code/claude_code.go +++ b/internal/providers/claude_code/claude_code.go @@ -280,7 +280,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa home, _ := os.UserHomeDir() claudeDir := filepath.Join(home, ".claude") - if override, ok := acct.ExtraData["claude_dir"]; ok && override != "" { + if override := acct.Hint("claude_dir", ""); override != "" { claudeDir = override home = filepath.Dir(claudeDir) // derive "home" from the override } diff --git a/internal/providers/claude_code/claude_code_test.go b/internal/providers/claude_code/claude_code_test.go index 7f2b15f..6940801 100644 --- a/internal/providers/claude_code/claude_code_test.go +++ b/internal/providers/claude_code/claude_code_test.go @@ -78,11 +78,7 @@ func TestProvider_Fetch_WithStatsFile(t *testing.T) { os.WriteFile(accountPath, []byte(acctData), 0644) p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "test-claude", - Binary: statsPath, - BaseURL: accountPath, - }) + snap, err := p.Fetch(context.Background(), testClaudeAccount("test-claude", statsPath, accountPath)) if err != nil { t.Fatalf("Fetch failed: %v", err) } @@ -111,14 +107,15 @@ func TestProvider_Fetch_WithStatsFile(t *testing.T) { func TestProvider_Fetch_NoData(t *testing.T) { tmpDir := t.TempDir() p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "test-claude", - Binary: filepath.Join(tmpDir, "nonexistent-stats.json"), - BaseURL: filepath.Join(tmpDir, "nonexistent-account.json"), - ExtraData: map[string]string{ - "claude_dir": filepath.Join(tmpDir, ".claude"), - }, - }) + snap, err := p.Fetch( + context.Background(), + testClaudeAccountWithDir( + "test-claude", + filepath.Join(tmpDir, "nonexistent-stats.json"), + filepath.Join(tmpDir, "nonexistent-account.json"), + filepath.Join(tmpDir, ".claude"), + ), + ) if err != nil { t.Fatalf("Fetch should not error, got: %v", err) } @@ -570,12 +567,7 @@ func TestProviderFetch_UsesBackupStatsPath(t *testing.T) { } p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "claude-backup-path", - ExtraData: map[string]string{ - "claude_dir": claudeDir, - }, - }) + snap, err := p.Fetch(context.Background(), testClaudeAccountWithDir("claude-backup-path", "", "", claudeDir)) if err != nil { t.Fatalf("fetch failed: %v", err) } diff --git a/internal/providers/claude_code/conversation_usage.go b/internal/providers/claude_code/conversation_usage.go index 58d12dd..a2661b5 100644 --- a/internal/providers/claude_code/conversation_usage.go +++ b/internal/providers/claude_code/conversation_usage.go @@ -2,7 +2,6 @@ package claude_code import ( "fmt" - "math" "path/filepath" "sort" "strings" @@ -27,7 +26,6 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna snap.Raw["jsonl_files_found"] = fmt.Sprintf("%d", len(jsonlFiles)) now := time.Now() - today := now.Format("2006-01-02") todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) weekStart := now.Add(-7 * 24 * time.Hour) @@ -380,447 +378,76 @@ func (p *Provider) readConversationJSONL(projectsDir, altProjectsDir string, sna } } - for model, totals := range modelTotals { - modelPrefix := "model_" + model - setMetricMax(snap, modelPrefix+"_input_tokens", totals.input, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_output_tokens", totals.output, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cached_tokens", totals.cached, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cache_creation_tokens", totals.cacheCreate, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cache_creation_5m_tokens", totals.cache5m, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cache_creation_1h_tokens", totals.cache1h, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_reasoning_tokens", totals.reasoning, "tokens", "all-time estimate") - setMetricMax(snap, modelPrefix+"_web_search_requests", totals.webSearch, "requests", "all-time estimate") - setMetricMax(snap, modelPrefix+"_web_fetch_requests", totals.webFetch, "requests", "all-time estimate") - setMetricMax(snap, modelPrefix+"_cost_usd", totals.cost, "USD", "all-time estimate") - } - - for client, totals := range clientTotals { - key := "client_" + client - setMetricMax(snap, key+"_input_tokens", totals.input, "tokens", "all-time") - setMetricMax(snap, key+"_output_tokens", totals.output, "tokens", "all-time") - setMetricMax(snap, key+"_cached_tokens", totals.cached, "tokens", "all-time") - setMetricMax(snap, key+"_reasoning_tokens", totals.reasoning, "tokens", "all-time") - setMetricMax(snap, key+"_total_tokens", totals.input+totals.output+totals.cached+totals.cacheCreate+totals.reasoning, "tokens", "all-time") - setMetricMax(snap, key+"_sessions", totals.sessions, "sessions", "all-time") - } - - if snap.DailySeries == nil { - snap.DailySeries = make(map[string][]core.TimePoint) - } - dates := core.SortedStringKeys(dailyTokenTotals) - - if len(snap.DailySeries["messages"]) == 0 && len(dates) > 0 { - for _, d := range dates { - snap.DailySeries["messages"] = append(snap.DailySeries["messages"], core.TimePoint{Date: d, Value: float64(dailyMessages[d])}) - snap.DailySeries["tokens_total"] = append(snap.DailySeries["tokens_total"], core.TimePoint{Date: d, Value: float64(dailyTokenTotals[d])}) - snap.DailySeries["cost"] = append(snap.DailySeries["cost"], core.TimePoint{Date: d, Value: dailyCost[d]}) - } - - allModels := make(map[string]int64) - for _, dm := range dailyModelTokens { - for model, tokens := range dm { - allModels[model] += int64(tokens) - } - } - type mVol struct { - name string - total int64 - } - var mv []mVol - for m, t := range allModels { - mv = append(mv, mVol{m, t}) - } - sort.Slice(mv, func(i, j int) bool { return mv[i].total > mv[j].total }) - limit := 5 - if len(mv) < limit { - limit = len(mv) - } - for i := 0; i < limit; i++ { - model := mv[i].name - key := fmt.Sprintf("tokens_%s", sanitizeModelName(model)) - for _, d := range dates { - tokens := dailyModelTokens[d][model] - snap.DailySeries[key] = append(snap.DailySeries[key], - core.TimePoint{Date: d, Value: float64(tokens)}) - } - } - } - - if len(dates) > 0 { - clientNames := make(map[string]bool) - for _, byClient := range dailyClientTokens { - for client := range byClient { - clientNames[client] = true - } - } - for client := range clientNames { - key := "tokens_client_" + client - for _, d := range dates { - snap.DailySeries[key] = append(snap.DailySeries[key], core.TimePoint{ - Date: d, - Value: dailyClientTokens[d][client], - }) - } - } - } - - if todayCostUSD > 0 { - snap.Metrics["today_api_cost"] = core.Metric{ - Used: core.Float64Ptr(todayCostUSD), - Unit: "USD", - Window: "since midnight", - } - } - if todayInputTokens > 0 { - in := float64(todayInputTokens) - snap.Metrics["today_input_tokens"] = core.Metric{ - Used: &in, - Unit: "tokens", - Window: "since midnight", - } - } - if todayOutputTokens > 0 { - out := float64(todayOutputTokens) - snap.Metrics["today_output_tokens"] = core.Metric{ - Used: &out, - Unit: "tokens", - Window: "since midnight", - } - } - if todayCacheRead > 0 { - cacheRead := float64(todayCacheRead) - snap.Metrics["today_cache_read_tokens"] = core.Metric{ - Used: &cacheRead, - Unit: "tokens", - Window: "since midnight", - } - } - if todayCacheCreate > 0 { - cacheCreate := float64(todayCacheCreate) - snap.Metrics["today_cache_create_tokens"] = core.Metric{ - Used: &cacheCreate, - Unit: "tokens", - Window: "since midnight", - } - } - if todayMessages > 0 { - msgs := float64(todayMessages) - setMetricMax(snap, "messages_today", msgs, "messages", "since midnight") - } - if len(todaySessions) > 0 { - setMetricMax(snap, "sessions_today", float64(len(todaySessions)), "sessions", "since midnight") - } - if todayToolCalls > 0 { - setMetricMax(snap, "tool_calls_today", float64(todayToolCalls), "calls", "since midnight") - } - if todayReasoning > 0 { - v := float64(todayReasoning) - snap.Metrics["today_reasoning_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "since midnight", - } - } - if todayCacheCreate5m > 0 { - v := float64(todayCacheCreate5m) - snap.Metrics["today_cache_create_5m_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "since midnight", - } - } - if todayCacheCreate1h > 0 { - v := float64(todayCacheCreate1h) - snap.Metrics["today_cache_create_1h_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "since midnight", - } - } - if todayWebSearch > 0 { - v := float64(todayWebSearch) - snap.Metrics["today_web_search_requests"] = core.Metric{ - Used: &v, - Unit: "requests", - Window: "since midnight", - } - } - if todayWebFetch > 0 { - v := float64(todayWebFetch) - snap.Metrics["today_web_fetch_requests"] = core.Metric{ - Used: &v, - Unit: "requests", - Window: "since midnight", - } - } - - if weeklyCostUSD > 0 { - snap.Metrics["7d_api_cost"] = core.Metric{ - Used: core.Float64Ptr(weeklyCostUSD), - Unit: "USD", - Window: "rolling 7 days", - } - } - if weeklyMessages > 0 { - wm := float64(weeklyMessages) - snap.Metrics["7d_messages"] = core.Metric{ - Used: &wm, - Unit: "messages", - Window: "rolling 7 days", - } - wIn := float64(weeklyInputTokens) - snap.Metrics["7d_input_tokens"] = core.Metric{ - Used: &wIn, - Unit: "tokens", - Window: "rolling 7 days", - } - wOut := float64(weeklyOutputTokens) - snap.Metrics["7d_output_tokens"] = core.Metric{ - Used: &wOut, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyCacheRead > 0 { - v := float64(weeklyCacheRead) - snap.Metrics["7d_cache_read_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyCacheCreate > 0 { - v := float64(weeklyCacheCreate) - snap.Metrics["7d_cache_create_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyCacheCreate5m > 0 { - v := float64(weeklyCacheCreate5m) - snap.Metrics["7d_cache_create_5m_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyCacheCreate1h > 0 { - v := float64(weeklyCacheCreate1h) - snap.Metrics["7d_cache_create_1h_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyReasoning > 0 { - v := float64(weeklyReasoning) - snap.Metrics["7d_reasoning_tokens"] = core.Metric{ - Used: &v, - Unit: "tokens", - Window: "rolling 7 days", - } - } - if weeklyToolCalls > 0 { - setMetricMax(snap, "7d_tool_calls", float64(weeklyToolCalls), "calls", "rolling 7 days") - } - if weeklyWebSearch > 0 { - v := float64(weeklyWebSearch) - snap.Metrics["7d_web_search_requests"] = core.Metric{ - Used: &v, - Unit: "requests", - Window: "rolling 7 days", - } - } - if weeklyWebFetch > 0 { - v := float64(weeklyWebFetch) - snap.Metrics["7d_web_fetch_requests"] = core.Metric{ - Used: &v, - Unit: "requests", - Window: "rolling 7 days", - } - } - if len(weeklySessions) > 0 { - setMetricMax(snap, "7d_sessions", float64(len(weeklySessions)), "sessions", "rolling 7 days") - } - - if todayMessages > 0 { - snap.Raw["jsonl_today_date"] = today - snap.Raw["jsonl_today_messages"] = fmt.Sprintf("%d", todayMessages) - snap.Raw["jsonl_today_input_tokens"] = fmt.Sprintf("%d", todayInputTokens) - snap.Raw["jsonl_today_output_tokens"] = fmt.Sprintf("%d", todayOutputTokens) - snap.Raw["jsonl_today_cache_read_tokens"] = fmt.Sprintf("%d", todayCacheRead) - snap.Raw["jsonl_today_cache_create_tokens"] = fmt.Sprintf("%d", todayCacheCreate) - snap.Raw["jsonl_today_reasoning_tokens"] = fmt.Sprintf("%d", todayReasoning) - snap.Raw["jsonl_today_web_search_requests"] = fmt.Sprintf("%d", todayWebSearch) - snap.Raw["jsonl_today_web_fetch_requests"] = fmt.Sprintf("%d", todayWebFetch) - - models := core.SortedStringKeys(todayModels) - snap.Raw["jsonl_today_models"] = strings.Join(models, ", ") - } - - if inCurrentBlock { - snap.Metrics["5h_block_cost"] = core.Metric{ - Used: core.Float64Ptr(blockCostUSD), - Unit: "USD", - Window: fmt.Sprintf("%s – %s", currentBlockStart.Format("15:04"), currentBlockEnd.Format("15:04")), - } - - blockIn := float64(blockInputTokens) - snap.Metrics["5h_block_input"] = core.Metric{ - Used: &blockIn, - Unit: "tokens", - Window: "current 5h block", - } - - blockOut := float64(blockOutputTokens) - snap.Metrics["5h_block_output"] = core.Metric{ - Used: &blockOut, - Unit: "tokens", - Window: "current 5h block", - } - - blockMsgs := float64(blockMessages) - snap.Metrics["5h_block_msgs"] = core.Metric{ - Used: &blockMsgs, - Unit: "messages", - Window: "current 5h block", - } - if blockCacheRead > 0 { - setMetricMax(snap, "5h_block_cache_read_tokens", float64(blockCacheRead), "tokens", "current 5h block") - } - if blockCacheCreate > 0 { - setMetricMax(snap, "5h_block_cache_create_tokens", float64(blockCacheCreate), "tokens", "current 5h block") - } - - remaining := currentBlockEnd.Sub(now) - if remaining > 0 { - snap.Resets["billing_block"] = currentBlockEnd - snap.Raw["block_time_remaining"] = fmt.Sprintf("%s", remaining.Round(time.Minute)) - - elapsed := now.Sub(currentBlockStart) - progress := math.Min(elapsed.Seconds()/billingBlockDuration.Seconds()*100, 100) - snap.Raw["block_progress_pct"] = fmt.Sprintf("%.0f", progress) - } - - snap.Raw["block_start"] = currentBlockStart.Format(time.RFC3339) - snap.Raw["block_end"] = currentBlockEnd.Format(time.RFC3339) - - blockModelList := core.SortedStringKeys(blockModels) - snap.Raw["block_models"] = strings.Join(blockModelList, ", ") - - elapsed := now.Sub(currentBlockStart) - if elapsed > time.Minute && blockCostUSD > 0 { - burnRate := blockCostUSD / elapsed.Hours() - snap.Metrics["burn_rate"] = core.Metric{ - Used: core.Float64Ptr(burnRate), - Unit: "USD/h", - Window: "current 5h block", - } - snap.Raw["burn_rate"] = fmt.Sprintf("$%.2f/hour", burnRate) - } - } - - if allTimeCostUSD > 0 { - snap.Metrics["all_time_api_cost"] = core.Metric{ - Used: core.Float64Ptr(allTimeCostUSD), - Unit: "USD", - Window: "all-time estimate", - } - } - if allTimeInputTokens > 0 { - setMetricMax(snap, "all_time_input_tokens", float64(allTimeInputTokens), "tokens", "all-time estimate") - } - if allTimeOutputTokens > 0 { - setMetricMax(snap, "all_time_output_tokens", float64(allTimeOutputTokens), "tokens", "all-time estimate") - } - if allTimeCacheRead > 0 { - setMetricMax(snap, "all_time_cache_read_tokens", float64(allTimeCacheRead), "tokens", "all-time estimate") - } - if allTimeCacheCreate > 0 { - setMetricMax(snap, "all_time_cache_create_tokens", float64(allTimeCacheCreate), "tokens", "all-time estimate") - } - if allTimeCacheCreate5m > 0 { - setMetricMax(snap, "all_time_cache_create_5m_tokens", float64(allTimeCacheCreate5m), "tokens", "all-time estimate") - } - if allTimeCacheCreate1h > 0 { - setMetricMax(snap, "all_time_cache_create_1h_tokens", float64(allTimeCacheCreate1h), "tokens", "all-time estimate") - } - if allTimeReasoning > 0 { - setMetricMax(snap, "all_time_reasoning_tokens", float64(allTimeReasoning), "tokens", "all-time estimate") - } - if allTimeToolCalls > 0 { - setMetricMax(snap, "all_time_tool_calls", float64(allTimeToolCalls), "calls", "all-time estimate") - setMetricMax(snap, "tool_calls_total", float64(allTimeToolCalls), "calls", "all-time estimate") - setMetricMax(snap, "tool_completed", float64(allTimeToolCalls), "calls", "all-time estimate") - setMetricMax(snap, "tool_success_rate", 100.0, "%", "all-time estimate") - } - if len(seenUsageKeys) > 0 { - setMetricMax(snap, "total_prompts", float64(len(seenUsageKeys)), "prompts", "all-time estimate") - } - if len(changedFiles) > 0 { - setMetricMax(snap, "composer_files_changed", float64(len(changedFiles)), "files", "all-time estimate") - } - if allTimeLinesAdded > 0 { - setMetricMax(snap, "composer_lines_added", float64(allTimeLinesAdded), "lines", "all-time estimate") - } - if allTimeLinesRemoved > 0 { - setMetricMax(snap, "composer_lines_removed", float64(allTimeLinesRemoved), "lines", "all-time estimate") - } - if allTimeCommitCount > 0 { - setMetricMax(snap, "scored_commits", float64(allTimeCommitCount), "commits", "all-time estimate") - } - if allTimeLinesAdded > 0 || allTimeLinesRemoved > 0 { - hundred := 100.0 - zero := 0.0 - snap.Metrics["ai_code_percentage"] = core.Metric{ - Used: &hundred, - Remaining: &zero, - Limit: &hundred, - Unit: "%", - Window: "all-time estimate", - } - } - for lang, count := range languageUsageCounts { - if count <= 0 { - continue - } - setMetricMax(snap, "lang_"+sanitizeModelName(lang), float64(count), "requests", "all-time estimate") - } - for toolName, count := range toolUsageCounts { - if count <= 0 { - continue - } - setMetricMax(snap, "tool_"+sanitizeModelName(toolName), float64(count), "calls", "all-time estimate") - } - if allTimeWebSearch > 0 { - setMetricMax(snap, "all_time_web_search_requests", float64(allTimeWebSearch), "requests", "all-time estimate") - } - if allTimeWebFetch > 0 { - setMetricMax(snap, "all_time_web_fetch_requests", float64(allTimeWebFetch), "requests", "all-time estimate") - } - - snap.Raw["tool_usage"] = summarizeCountMap(toolUsageCounts, 6) - snap.Raw["language_usage"] = summarizeCountMap(languageUsageCounts, 8) - snap.Raw["project_usage"] = summarizeTotalsMap(projectTotals, true, 6) - snap.Raw["agent_usage"] = summarizeTotalsMap(agentTotals, false, 4) - snap.Raw["service_tier_usage"] = summarizeFloatMap(serviceTierTotals, "tok", 4) - snap.Raw["inference_geo_usage"] = summarizeFloatMap(inferenceGeoTotals, "tok", 4) - if allTimeCacheRead > 0 || allTimeCacheCreate > 0 { - snap.Raw["cache_usage"] = fmt.Sprintf("read %s · create %s (1h %s, 5m %s)", - shortTokenCount(float64(allTimeCacheRead)), - shortTokenCount(float64(allTimeCacheCreate)), - shortTokenCount(float64(allTimeCacheCreate1h)), - shortTokenCount(float64(allTimeCacheCreate5m)), - ) - } - snap.Raw["project_count"] = fmt.Sprintf("%d", len(projectTotals)) - snap.Raw["tool_count"] = fmt.Sprintf("%d", len(toolUsageCounts)) - - snap.Raw["jsonl_total_entries"] = fmt.Sprintf("%d", allTimeEntries) - snap.Raw["jsonl_total_blocks"] = fmt.Sprintf("%d", len(blockStartCandidates)) - snap.Raw["jsonl_unique_requests"] = fmt.Sprintf("%d", len(seenUsageKeys)) - buildModelUsageSummaryRaw(snap) - + applyConversationUsageProjection(snap, conversationUsageProjection{ + now: now, + inCurrentBlock: inCurrentBlock, + currentBlockStart: currentBlockStart, + currentBlockEnd: currentBlockEnd, + blockCostUSD: blockCostUSD, + blockInputTokens: blockInputTokens, + blockOutputTokens: blockOutputTokens, + blockCacheRead: blockCacheRead, + blockCacheCreate: blockCacheCreate, + blockMessages: blockMessages, + blockModels: blockModels, + blockStartCandidates: blockStartCandidates, + todayCostUSD: todayCostUSD, + todayInputTokens: todayInputTokens, + todayOutputTokens: todayOutputTokens, + todayCacheRead: todayCacheRead, + todayCacheCreate: todayCacheCreate, + todayMessages: todayMessages, + todayModels: todayModels, + todaySessions: todaySessions, + todayCacheCreate5m: todayCacheCreate5m, + todayCacheCreate1h: todayCacheCreate1h, + todayReasoning: todayReasoning, + todayToolCalls: todayToolCalls, + todayWebSearch: todayWebSearch, + todayWebFetch: todayWebFetch, + weeklyCostUSD: weeklyCostUSD, + weeklyInputTokens: weeklyInputTokens, + weeklyOutputTokens: weeklyOutputTokens, + weeklyMessages: weeklyMessages, + weeklySessions: weeklySessions, + weeklyCacheRead: weeklyCacheRead, + weeklyCacheCreate: weeklyCacheCreate, + weeklyCacheCreate5m: weeklyCacheCreate5m, + weeklyCacheCreate1h: weeklyCacheCreate1h, + weeklyReasoning: weeklyReasoning, + weeklyToolCalls: weeklyToolCalls, + weeklyWebSearch: weeklyWebSearch, + weeklyWebFetch: weeklyWebFetch, + allTimeCostUSD: allTimeCostUSD, + allTimeEntries: allTimeEntries, + allTimeInputTokens: allTimeInputTokens, + allTimeOutputTokens: allTimeOutputTokens, + allTimeCacheRead: allTimeCacheRead, + allTimeCacheCreate: allTimeCacheCreate, + allTimeCacheCreate5m: allTimeCacheCreate5m, + allTimeCacheCreate1h: allTimeCacheCreate1h, + allTimeReasoning: allTimeReasoning, + allTimeToolCalls: allTimeToolCalls, + allTimeWebSearch: allTimeWebSearch, + allTimeWebFetch: allTimeWebFetch, + allTimeLinesAdded: allTimeLinesAdded, + allTimeLinesRemoved: allTimeLinesRemoved, + allTimeCommitCount: allTimeCommitCount, + modelTotals: modelTotals, + clientTotals: clientTotals, + projectTotals: projectTotals, + agentTotals: agentTotals, + serviceTierTotals: serviceTierTotals, + inferenceGeoTotals: inferenceGeoTotals, + toolUsageCounts: toolUsageCounts, + languageUsageCounts: languageUsageCounts, + changedFiles: changedFiles, + seenUsageKeys: seenUsageKeys, + dailyClientTokens: dailyClientTokens, + dailyTokenTotals: dailyTokenTotals, + dailyMessages: dailyMessages, + dailyCost: dailyCost, + dailyModelTokens: dailyModelTokens, + }) return nil } diff --git a/internal/providers/claude_code/conversation_usage_projection.go b/internal/providers/claude_code/conversation_usage_projection.go new file mode 100644 index 0000000..d06a913 --- /dev/null +++ b/internal/providers/claude_code/conversation_usage_projection.go @@ -0,0 +1,410 @@ +package claude_code + +import ( + "fmt" + "math" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +type conversationUsageProjection struct { + now time.Time + inCurrentBlock bool + currentBlockStart time.Time + currentBlockEnd time.Time + blockCostUSD float64 + blockInputTokens int + blockOutputTokens int + blockCacheRead int + blockCacheCreate int + blockMessages int + blockModels map[string]bool + blockStartCandidates []time.Time + + todayCostUSD float64 + todayInputTokens int + todayOutputTokens int + todayCacheRead int + todayCacheCreate int + todayMessages int + todayModels map[string]bool + todaySessions map[string]bool + todayCacheCreate5m int + todayCacheCreate1h int + todayReasoning int + todayToolCalls int + todayWebSearch int + todayWebFetch int + + weeklyCostUSD float64 + weeklyInputTokens int + weeklyOutputTokens int + weeklyMessages int + weeklySessions map[string]bool + weeklyCacheRead int + weeklyCacheCreate int + weeklyCacheCreate5m int + weeklyCacheCreate1h int + weeklyReasoning int + weeklyToolCalls int + weeklyWebSearch int + weeklyWebFetch int + + allTimeCostUSD float64 + allTimeEntries int + allTimeInputTokens int + allTimeOutputTokens int + allTimeCacheRead int + allTimeCacheCreate int + allTimeCacheCreate5m int + allTimeCacheCreate1h int + allTimeReasoning int + allTimeToolCalls int + allTimeWebSearch int + allTimeWebFetch int + allTimeLinesAdded int + allTimeLinesRemoved int + allTimeCommitCount int + + modelTotals map[string]*modelUsageTotals + clientTotals map[string]*modelUsageTotals + projectTotals map[string]*modelUsageTotals + agentTotals map[string]*modelUsageTotals + serviceTierTotals map[string]float64 + inferenceGeoTotals map[string]float64 + + toolUsageCounts map[string]int + languageUsageCounts map[string]int + changedFiles map[string]bool + seenUsageKeys map[string]bool + + dailyClientTokens map[string]map[string]float64 + dailyTokenTotals map[string]int + dailyMessages map[string]int + dailyCost map[string]float64 + dailyModelTokens map[string]map[string]int +} + +func applyConversationUsageProjection(snap *core.UsageSnapshot, p conversationUsageProjection) { + for model, totals := range p.modelTotals { + modelPrefix := "model_" + model + setMetricMax(snap, modelPrefix+"_input_tokens", totals.input, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_output_tokens", totals.output, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cached_tokens", totals.cached, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cache_creation_tokens", totals.cacheCreate, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cache_creation_5m_tokens", totals.cache5m, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cache_creation_1h_tokens", totals.cache1h, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_reasoning_tokens", totals.reasoning, "tokens", "all-time estimate") + setMetricMax(snap, modelPrefix+"_web_search_requests", totals.webSearch, "requests", "all-time estimate") + setMetricMax(snap, modelPrefix+"_web_fetch_requests", totals.webFetch, "requests", "all-time estimate") + setMetricMax(snap, modelPrefix+"_cost_usd", totals.cost, "USD", "all-time estimate") + } + + for client, totals := range p.clientTotals { + key := "client_" + client + setMetricMax(snap, key+"_input_tokens", totals.input, "tokens", "all-time") + setMetricMax(snap, key+"_output_tokens", totals.output, "tokens", "all-time") + setMetricMax(snap, key+"_cached_tokens", totals.cached, "tokens", "all-time") + setMetricMax(snap, key+"_reasoning_tokens", totals.reasoning, "tokens", "all-time") + setMetricMax(snap, key+"_total_tokens", totals.input+totals.output+totals.cached+totals.cacheCreate+totals.reasoning, "tokens", "all-time") + setMetricMax(snap, key+"_sessions", totals.sessions, "sessions", "all-time") + } + + if snap.DailySeries == nil { + snap.DailySeries = make(map[string][]core.TimePoint) + } + dates := core.SortedStringKeys(p.dailyTokenTotals) + + if len(snap.DailySeries["messages"]) == 0 && len(dates) > 0 { + for _, d := range dates { + snap.DailySeries["messages"] = append(snap.DailySeries["messages"], core.TimePoint{Date: d, Value: float64(p.dailyMessages[d])}) + snap.DailySeries["tokens_total"] = append(snap.DailySeries["tokens_total"], core.TimePoint{Date: d, Value: float64(p.dailyTokenTotals[d])}) + snap.DailySeries["cost"] = append(snap.DailySeries["cost"], core.TimePoint{Date: d, Value: p.dailyCost[d]}) + } + + allModels := make(map[string]int64) + for _, dm := range p.dailyModelTokens { + for model, tokens := range dm { + allModels[model] += int64(tokens) + } + } + type modelVolume struct { + name string + total int64 + } + var ranked []modelVolume + for model, total := range allModels { + ranked = append(ranked, modelVolume{name: model, total: total}) + } + sort.Slice(ranked, func(i, j int) bool { return ranked[i].total > ranked[j].total }) + limit := min(5, len(ranked)) + for i := 0; i < limit; i++ { + model := ranked[i].name + key := fmt.Sprintf("tokens_%s", sanitizeModelName(model)) + for _, d := range dates { + snap.DailySeries[key] = append(snap.DailySeries[key], core.TimePoint{ + Date: d, + Value: float64(p.dailyModelTokens[d][model]), + }) + } + } + } + + if len(dates) > 0 { + clientNames := make(map[string]bool) + for _, byClient := range p.dailyClientTokens { + for client := range byClient { + clientNames[client] = true + } + } + for client := range clientNames { + key := "tokens_client_" + client + for _, d := range dates { + snap.DailySeries[key] = append(snap.DailySeries[key], core.TimePoint{ + Date: d, + Value: p.dailyClientTokens[d][client], + }) + } + } + } + + if p.todayCostUSD > 0 { + snap.Metrics["today_api_cost"] = core.Metric{Used: core.Float64Ptr(p.todayCostUSD), Unit: "USD", Window: "since midnight"} + } + if p.todayInputTokens > 0 { + in := float64(p.todayInputTokens) + snap.Metrics["today_input_tokens"] = core.Metric{Used: &in, Unit: "tokens", Window: "since midnight"} + } + if p.todayOutputTokens > 0 { + out := float64(p.todayOutputTokens) + snap.Metrics["today_output_tokens"] = core.Metric{Used: &out, Unit: "tokens", Window: "since midnight"} + } + if p.todayCacheRead > 0 { + value := float64(p.todayCacheRead) + snap.Metrics["today_cache_read_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "since midnight"} + } + if p.todayCacheCreate > 0 { + value := float64(p.todayCacheCreate) + snap.Metrics["today_cache_create_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "since midnight"} + } + if p.todayMessages > 0 { + setMetricMax(snap, "messages_today", float64(p.todayMessages), "messages", "since midnight") + } + if len(p.todaySessions) > 0 { + setMetricMax(snap, "sessions_today", float64(len(p.todaySessions)), "sessions", "since midnight") + } + if p.todayToolCalls > 0 { + setMetricMax(snap, "tool_calls_today", float64(p.todayToolCalls), "calls", "since midnight") + } + if p.todayReasoning > 0 { + value := float64(p.todayReasoning) + snap.Metrics["today_reasoning_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "since midnight"} + } + if p.todayCacheCreate5m > 0 { + value := float64(p.todayCacheCreate5m) + snap.Metrics["today_cache_create_5m_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "since midnight"} + } + if p.todayCacheCreate1h > 0 { + value := float64(p.todayCacheCreate1h) + snap.Metrics["today_cache_create_1h_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "since midnight"} + } + if p.todayWebSearch > 0 { + value := float64(p.todayWebSearch) + snap.Metrics["today_web_search_requests"] = core.Metric{Used: &value, Unit: "requests", Window: "since midnight"} + } + if p.todayWebFetch > 0 { + value := float64(p.todayWebFetch) + snap.Metrics["today_web_fetch_requests"] = core.Metric{Used: &value, Unit: "requests", Window: "since midnight"} + } + + if p.weeklyCostUSD > 0 { + snap.Metrics["7d_api_cost"] = core.Metric{Used: core.Float64Ptr(p.weeklyCostUSD), Unit: "USD", Window: "rolling 7 days"} + } + if p.weeklyMessages > 0 { + wm := float64(p.weeklyMessages) + snap.Metrics["7d_messages"] = core.Metric{Used: &wm, Unit: "messages", Window: "rolling 7 days"} + in := float64(p.weeklyInputTokens) + out := float64(p.weeklyOutputTokens) + snap.Metrics["7d_input_tokens"] = core.Metric{Used: &in, Unit: "tokens", Window: "rolling 7 days"} + snap.Metrics["7d_output_tokens"] = core.Metric{Used: &out, Unit: "tokens", Window: "rolling 7 days"} + } + if p.weeklyCacheRead > 0 { + value := float64(p.weeklyCacheRead) + snap.Metrics["7d_cache_read_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "rolling 7 days"} + } + if p.weeklyCacheCreate > 0 { + value := float64(p.weeklyCacheCreate) + snap.Metrics["7d_cache_create_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "rolling 7 days"} + } + if p.weeklyCacheCreate5m > 0 { + value := float64(p.weeklyCacheCreate5m) + snap.Metrics["7d_cache_create_5m_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "rolling 7 days"} + } + if p.weeklyCacheCreate1h > 0 { + value := float64(p.weeklyCacheCreate1h) + snap.Metrics["7d_cache_create_1h_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "rolling 7 days"} + } + if p.weeklyReasoning > 0 { + value := float64(p.weeklyReasoning) + snap.Metrics["7d_reasoning_tokens"] = core.Metric{Used: &value, Unit: "tokens", Window: "rolling 7 days"} + } + if p.weeklyToolCalls > 0 { + setMetricMax(snap, "7d_tool_calls", float64(p.weeklyToolCalls), "calls", "rolling 7 days") + } + if p.weeklyWebSearch > 0 { + value := float64(p.weeklyWebSearch) + snap.Metrics["7d_web_search_requests"] = core.Metric{Used: &value, Unit: "requests", Window: "rolling 7 days"} + } + if p.weeklyWebFetch > 0 { + value := float64(p.weeklyWebFetch) + snap.Metrics["7d_web_fetch_requests"] = core.Metric{Used: &value, Unit: "requests", Window: "rolling 7 days"} + } + if len(p.weeklySessions) > 0 { + setMetricMax(snap, "7d_sessions", float64(len(p.weeklySessions)), "sessions", "rolling 7 days") + } + + if p.todayMessages > 0 { + today := p.now.Format("2006-01-02") + snap.Raw["jsonl_today_date"] = today + snap.Raw["jsonl_today_messages"] = fmt.Sprintf("%d", p.todayMessages) + snap.Raw["jsonl_today_input_tokens"] = fmt.Sprintf("%d", p.todayInputTokens) + snap.Raw["jsonl_today_output_tokens"] = fmt.Sprintf("%d", p.todayOutputTokens) + snap.Raw["jsonl_today_cache_read_tokens"] = fmt.Sprintf("%d", p.todayCacheRead) + snap.Raw["jsonl_today_cache_create_tokens"] = fmt.Sprintf("%d", p.todayCacheCreate) + snap.Raw["jsonl_today_reasoning_tokens"] = fmt.Sprintf("%d", p.todayReasoning) + snap.Raw["jsonl_today_web_search_requests"] = fmt.Sprintf("%d", p.todayWebSearch) + snap.Raw["jsonl_today_web_fetch_requests"] = fmt.Sprintf("%d", p.todayWebFetch) + snap.Raw["jsonl_today_models"] = strings.Join(core.SortedStringKeys(p.todayModels), ", ") + } + + if p.inCurrentBlock { + snap.Metrics["5h_block_cost"] = core.Metric{ + Used: core.Float64Ptr(p.blockCostUSD), + Unit: "USD", + Window: fmt.Sprintf("%s – %s", p.currentBlockStart.Format("15:04"), p.currentBlockEnd.Format("15:04")), + } + blockIn := float64(p.blockInputTokens) + blockOut := float64(p.blockOutputTokens) + blockMsgs := float64(p.blockMessages) + snap.Metrics["5h_block_input"] = core.Metric{Used: &blockIn, Unit: "tokens", Window: "current 5h block"} + snap.Metrics["5h_block_output"] = core.Metric{Used: &blockOut, Unit: "tokens", Window: "current 5h block"} + snap.Metrics["5h_block_msgs"] = core.Metric{Used: &blockMsgs, Unit: "messages", Window: "current 5h block"} + if p.blockCacheRead > 0 { + setMetricMax(snap, "5h_block_cache_read_tokens", float64(p.blockCacheRead), "tokens", "current 5h block") + } + if p.blockCacheCreate > 0 { + setMetricMax(snap, "5h_block_cache_create_tokens", float64(p.blockCacheCreate), "tokens", "current 5h block") + } + + remaining := p.currentBlockEnd.Sub(p.now) + if remaining > 0 { + snap.Resets["billing_block"] = p.currentBlockEnd + snap.Raw["block_time_remaining"] = fmt.Sprintf("%s", remaining.Round(time.Minute)) + elapsed := p.now.Sub(p.currentBlockStart) + progress := math.Min(elapsed.Seconds()/billingBlockDuration.Seconds()*100, 100) + snap.Raw["block_progress_pct"] = fmt.Sprintf("%.0f", progress) + } + + snap.Raw["block_start"] = p.currentBlockStart.Format(time.RFC3339) + snap.Raw["block_end"] = p.currentBlockEnd.Format(time.RFC3339) + snap.Raw["block_models"] = strings.Join(core.SortedStringKeys(p.blockModels), ", ") + + elapsed := p.now.Sub(p.currentBlockStart) + if elapsed > time.Minute && p.blockCostUSD > 0 { + burnRate := p.blockCostUSD / elapsed.Hours() + snap.Metrics["burn_rate"] = core.Metric{Used: core.Float64Ptr(burnRate), Unit: "USD/h", Window: "current 5h block"} + snap.Raw["burn_rate"] = fmt.Sprintf("$%.2f/hour", burnRate) + } + } + + if p.allTimeCostUSD > 0 { + snap.Metrics["all_time_api_cost"] = core.Metric{Used: core.Float64Ptr(p.allTimeCostUSD), Unit: "USD", Window: "all-time estimate"} + } + if p.allTimeInputTokens > 0 { + setMetricMax(snap, "all_time_input_tokens", float64(p.allTimeInputTokens), "tokens", "all-time estimate") + } + if p.allTimeOutputTokens > 0 { + setMetricMax(snap, "all_time_output_tokens", float64(p.allTimeOutputTokens), "tokens", "all-time estimate") + } + if p.allTimeCacheRead > 0 { + setMetricMax(snap, "all_time_cache_read_tokens", float64(p.allTimeCacheRead), "tokens", "all-time estimate") + } + if p.allTimeCacheCreate > 0 { + setMetricMax(snap, "all_time_cache_create_tokens", float64(p.allTimeCacheCreate), "tokens", "all-time estimate") + } + if p.allTimeCacheCreate5m > 0 { + setMetricMax(snap, "all_time_cache_create_5m_tokens", float64(p.allTimeCacheCreate5m), "tokens", "all-time estimate") + } + if p.allTimeCacheCreate1h > 0 { + setMetricMax(snap, "all_time_cache_create_1h_tokens", float64(p.allTimeCacheCreate1h), "tokens", "all-time estimate") + } + if p.allTimeReasoning > 0 { + setMetricMax(snap, "all_time_reasoning_tokens", float64(p.allTimeReasoning), "tokens", "all-time estimate") + } + if p.allTimeToolCalls > 0 { + setMetricMax(snap, "all_time_tool_calls", float64(p.allTimeToolCalls), "calls", "all-time estimate") + setMetricMax(snap, "tool_calls_total", float64(p.allTimeToolCalls), "calls", "all-time estimate") + setMetricMax(snap, "tool_completed", float64(p.allTimeToolCalls), "calls", "all-time estimate") + setMetricMax(snap, "tool_success_rate", 100.0, "%", "all-time estimate") + } + if len(p.seenUsageKeys) > 0 { + setMetricMax(snap, "total_prompts", float64(len(p.seenUsageKeys)), "prompts", "all-time estimate") + } + if len(p.changedFiles) > 0 { + setMetricMax(snap, "composer_files_changed", float64(len(p.changedFiles)), "files", "all-time estimate") + } + if p.allTimeLinesAdded > 0 { + setMetricMax(snap, "composer_lines_added", float64(p.allTimeLinesAdded), "lines", "all-time estimate") + } + if p.allTimeLinesRemoved > 0 { + setMetricMax(snap, "composer_lines_removed", float64(p.allTimeLinesRemoved), "lines", "all-time estimate") + } + if p.allTimeCommitCount > 0 { + setMetricMax(snap, "scored_commits", float64(p.allTimeCommitCount), "commits", "all-time estimate") + } + if p.allTimeLinesAdded > 0 || p.allTimeLinesRemoved > 0 { + hundred := 100.0 + zero := 0.0 + snap.Metrics["ai_code_percentage"] = core.Metric{Used: &hundred, Remaining: &zero, Limit: &hundred, Unit: "%", Window: "all-time estimate"} + } + for lang, count := range p.languageUsageCounts { + if count > 0 { + setMetricMax(snap, "lang_"+sanitizeModelName(lang), float64(count), "requests", "all-time estimate") + } + } + for toolName, count := range p.toolUsageCounts { + if count > 0 { + setMetricMax(snap, "tool_"+sanitizeModelName(toolName), float64(count), "calls", "all-time estimate") + } + } + if p.allTimeWebSearch > 0 { + setMetricMax(snap, "all_time_web_search_requests", float64(p.allTimeWebSearch), "requests", "all-time estimate") + } + if p.allTimeWebFetch > 0 { + setMetricMax(snap, "all_time_web_fetch_requests", float64(p.allTimeWebFetch), "requests", "all-time estimate") + } + + snap.Raw["tool_usage"] = summarizeCountMap(p.toolUsageCounts, 6) + snap.Raw["language_usage"] = summarizeCountMap(p.languageUsageCounts, 8) + snap.Raw["project_usage"] = summarizeTotalsMap(p.projectTotals, true, 6) + snap.Raw["agent_usage"] = summarizeTotalsMap(p.agentTotals, false, 4) + snap.Raw["service_tier_usage"] = summarizeFloatMap(p.serviceTierTotals, "tok", 4) + snap.Raw["inference_geo_usage"] = summarizeFloatMap(p.inferenceGeoTotals, "tok", 4) + if p.allTimeCacheRead > 0 || p.allTimeCacheCreate > 0 { + snap.Raw["cache_usage"] = fmt.Sprintf("read %s · create %s (1h %s, 5m %s)", + shortTokenCount(float64(p.allTimeCacheRead)), + shortTokenCount(float64(p.allTimeCacheCreate)), + shortTokenCount(float64(p.allTimeCacheCreate1h)), + shortTokenCount(float64(p.allTimeCacheCreate5m)), + ) + } + snap.Raw["project_count"] = fmt.Sprintf("%d", len(p.projectTotals)) + snap.Raw["tool_count"] = fmt.Sprintf("%d", len(p.toolUsageCounts)) + snap.Raw["jsonl_total_entries"] = fmt.Sprintf("%d", p.allTimeEntries) + snap.Raw["jsonl_total_blocks"] = fmt.Sprintf("%d", len(p.blockStartCandidates)) + snap.Raw["jsonl_unique_requests"] = fmt.Sprintf("%d", len(p.seenUsageKeys)) + buildModelUsageSummaryRaw(snap) +} diff --git a/internal/providers/claude_code/test_helpers_test.go b/internal/providers/claude_code/test_helpers_test.go new file mode 100644 index 0000000..09c066b --- /dev/null +++ b/internal/providers/claude_code/test_helpers_test.go @@ -0,0 +1,18 @@ +package claude_code + +import "github.com/janekbaraniewski/openusage/internal/core" + +func testClaudeAccount(id, statsPath, accountPath string) core.AccountConfig { + return core.AccountConfig{ + ID: id, + Binary: statsPath, + BaseURL: accountPath, + } +} + +func testClaudeAccountWithDir(id, statsPath, accountPath, claudeDir string) core.AccountConfig { + acct := testClaudeAccount(id, statsPath, accountPath) + acct.ExtraData = map[string]string{"claude_dir": claudeDir} + acct.SetHint("claude_dir", claudeDir) + return acct +} diff --git a/internal/providers/codex/live_usage.go b/internal/providers/codex/live_usage.go index 2cc5ebf..ec21c10 100644 --- a/internal/providers/codex/live_usage.go +++ b/internal/providers/codex/live_usage.go @@ -313,8 +313,8 @@ func resolveChatGPTBaseURL(acct core.AccountConfig, configDir string) string { switch { case strings.TrimSpace(acct.BaseURL) != "": return normalizeChatGPTBaseURL(acct.BaseURL) - case acct.ExtraData != nil && strings.TrimSpace(acct.ExtraData["chatgpt_base_url"]) != "": - return normalizeChatGPTBaseURL(acct.ExtraData["chatgpt_base_url"]) + case strings.TrimSpace(acct.Hint("chatgpt_base_url", "")) != "": + return normalizeChatGPTBaseURL(acct.Hint("chatgpt_base_url", "")) default: if fromConfig := readChatGPTBaseURLFromConfig(configDir); fromConfig != "" { return normalizeChatGPTBaseURL(fromConfig) diff --git a/internal/providers/copilot/copilot.go b/internal/providers/copilot/copilot.go index ac819f7..2bfe882 100644 --- a/internal/providers/copilot/copilot.go +++ b/internal/providers/copilot/copilot.go @@ -245,7 +245,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa if configuredBinary == "" { configuredBinary = "gh" } - ghBinary, copilotBinary := resolveCopilotBinaries(configuredBinary, acct.ExtraData) + ghBinary, copilotBinary := resolveCopilotBinaries(configuredBinary, acct) if ghBinary == "" && copilotBinary == "" { return core.UsageSnapshot{ ProviderID: p.ID(), @@ -308,7 +308,7 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa return snap, nil } -func resolveCopilotBinaries(configuredBinary string, extraData map[string]string) (string, string) { +func resolveCopilotBinaries(configuredBinary string, acct core.AccountConfig) (string, string) { ghBinary := "" copilotBinary := "" @@ -322,8 +322,8 @@ func resolveCopilotBinaries(configuredBinary string, extraData map[string]string ghBinary = resolveBinaryPath("gh") } - if copilotBinary == "" && extraData != nil { - copilotBinary = resolveBinaryPath(extraData["copilot_binary"]) + if copilotBinary == "" { + copilotBinary = resolveBinaryPath(acct.Hint("copilot_binary", "")) } if copilotBinary == "" { copilotBinary = resolveBinaryPath("copilot") @@ -366,13 +366,11 @@ func detectCopilotVersion(ctx context.Context, ghBinary, copilotBinary string) ( } func (p *Provider) fetchLocalData(acct core.AccountConfig, snap *core.UsageSnapshot) { - if acct.ExtraData != nil { - if dir := strings.TrimSpace(acct.ExtraData["config_dir"]); dir != "" { - p.readConfig(dir, snap) - logData := p.readLogs(dir, snap) - p.readSessions(dir, snap, logData) - return - } + if dir := strings.TrimSpace(acct.Hint("config_dir", "")); dir != "" { + p.readConfig(dir, snap) + logData := p.readLogs(dir, snap) + p.readSessions(dir, snap, logData) + return } home, err := os.UserHomeDir() diff --git a/internal/providers/copilot/copilot_metrics_test.go b/internal/providers/copilot/copilot_metrics_test.go new file mode 100644 index 0000000..874c4da --- /dev/null +++ b/internal/providers/copilot/copilot_metrics_test.go @@ -0,0 +1,327 @@ +package copilot + +import ( + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestReadSessions_EmitsModelTokenMetrics(t *testing.T) { + p := New() + tmp := t.TempDir() + copilotDir := filepath.Join(tmp, ".copilot") + logDir := filepath.Join(copilotDir, "logs") + sessionDir := filepath.Join(copilotDir, "session-state") + if err := os.MkdirAll(logDir, 0o755); err != nil { + t.Fatalf("mkdir logs: %v", err) + } + if err := os.MkdirAll(sessionDir, 0o755); err != nil { + t.Fatalf("mkdir sessions: %v", err) + } + + logContent := strings.Join([]string{ + "2026-02-20T01:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", + "2026-02-20T01:00:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", + "2026-02-20T01:00:02.000Z [INFO] CompactionProcessor: Utilization 1.4% (1800/128000 tokens) below threshold 80%", + "2026-02-20T02:00:00.000Z [INFO] Workspace initialized: s2 (checkpoints: 0)", + "2026-02-20T02:00:01.000Z [INFO] CompactionProcessor: Utilization 0.7% (900/128000 tokens) below threshold 80%", + }, "\n") + if err := os.WriteFile(filepath.Join(logDir, "process-test.log"), []byte(logContent), 0o644); err != nil { + t.Fatalf("write log: %v", err) + } + + mkSession := func(id, model, created, updated string) { + dir := filepath.Join(sessionDir, id) + if err := os.MkdirAll(dir, 0o755); err != nil { + t.Fatalf("mkdir %s: %v", id, err) + } + ws := strings.Join([]string{ + "id: " + id, + "repository: owner/repo", + "branch: main", + "created_at: " + created, + "updated_at: " + updated, + }, "\n") + if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { + t.Fatalf("write workspace %s: %v", id, err) + } + events := strings.Join([]string{ + `{"type":"session.model_change","timestamp":"` + created + `","data":{"newModel":"` + model + `"}}`, + `{"type":"user.message","timestamp":"` + created + `","data":{"content":"hello"}}`, + `{"type":"assistant.turn_start","timestamp":"` + created + `","data":{"turnId":"0"}}`, + `{"type":"assistant.message","timestamp":"` + updated + `","data":{"content":"world","reasoningText":"r","toolRequests":[{"name":"read_file"}]}}`, + }, "\n") + if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(events), 0o644); err != nil { + t.Fatalf("write events %s: %v", id, err) + } + } + + mkSession("s1", "gpt-5-mini", "2026-02-20T01:00:00Z", "2026-02-20T01:10:00Z") + mkSession("s2", "claude-sonnet-4.6", "2026-02-20T02:00:00Z", "2026-02-20T02:10:00Z") + + snap := &core.UsageSnapshot{ + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + DailySeries: make(map[string][]core.TimePoint), + } + + logs := p.readLogs(copilotDir, snap) + p.readSessions(copilotDir, snap, logs) + + if m := snap.Metrics["model_gpt_5_mini_input_tokens"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("model_gpt_5_mini_input_tokens missing/zero: %+v", m) + } + if m := snap.Metrics["model_claude_sonnet_4_6_input_tokens"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("model_claude_sonnet_4_6_input_tokens missing/zero: %+v", m) + } + if _, ok := snap.DailySeries["tokens_gpt_5_mini"]; !ok { + t.Fatal("missing tokens_gpt_5_mini series") + } + if m := snap.Metrics["cli_input_tokens"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("cli_input_tokens missing/zero: %+v", m) + } + if m := snap.Metrics["client_owner_repo_total_tokens"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("client_owner_repo_total_tokens missing/zero: %+v", m) + } + if m := snap.Metrics["client_owner_repo_sessions"]; m.Used == nil || *m.Used != 2 { + t.Fatalf("client_owner_repo_sessions = %+v, want 2", m) + } + if _, ok := snap.DailySeries["tokens_client_owner_repo"]; !ok { + t.Fatal("missing tokens_client_owner_repo series") + } + if got := snap.Raw["client_usage"]; !strings.Contains(got, "owner/repo") { + t.Fatalf("client_usage = %q, want owner/repo", got) + } + if m := snap.Metrics["messages_today"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("messages_today missing/zero: %+v", m) + } + if m := snap.Metrics["tool_read_file"]; m.Used == nil || *m.Used != 2 { + t.Fatalf("tool_read_file = %+v, want 2 calls", m) + } + if _, ok := snap.Metrics["context_window"]; !ok { + t.Fatal("missing context_window metric") + } +} + +func TestReadLogs_UsesNewestTokenEntryByTimestamp(t *testing.T) { + p := New() + tmp := t.TempDir() + copilotDir := filepath.Join(tmp, ".copilot") + logDir := filepath.Join(copilotDir, "logs") + if err := os.MkdirAll(logDir, 0o755); err != nil { + t.Fatalf("mkdir logs: %v", err) + } + + newer := strings.Join([]string{ + "2026-02-21T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", + "2026-02-21T10:00:01.000Z [INFO] CompactionProcessor: Utilization 3.9% (5000/128000 tokens) below threshold 80%", + }, "\n") + older := strings.Join([]string{ + "2026-02-20T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", + "2026-02-20T10:00:01.000Z [INFO] CompactionProcessor: Utilization 0.8% (1000/128000 tokens) below threshold 80%", + }, "\n") + // Lexicographic order is intentionally opposite to timestamp order. + if err := os.WriteFile(filepath.Join(logDir, "a-new.log"), []byte(newer), 0o644); err != nil { + t.Fatalf("write newer log: %v", err) + } + if err := os.WriteFile(filepath.Join(logDir, "z-old.log"), []byte(older), 0o644); err != nil { + t.Fatalf("write older log: %v", err) + } + + snap := &core.UsageSnapshot{ + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + } + logs := p.readLogs(copilotDir, snap) + + if got := snap.Raw["context_window_tokens"]; got != "5000/128000" { + t.Fatalf("context_window_tokens = %q, want %q", got, "5000/128000") + } + if got := logs.SessionTokens["s1"].Used; got != 5000 { + t.Fatalf("session s1 used = %d, want 5000", got) + } +} + +func TestReadSessions_UsesLatestEventTimestampForRecency(t *testing.T) { + p := New() + tmp := t.TempDir() + copilotDir := filepath.Join(tmp, ".copilot") + logDir := filepath.Join(copilotDir, "logs") + sessionDir := filepath.Join(copilotDir, "session-state") + if err := os.MkdirAll(logDir, 0o755); err != nil { + t.Fatalf("mkdir logs: %v", err) + } + if err := os.MkdirAll(sessionDir, 0o755); err != nil { + t.Fatalf("mkdir sessions: %v", err) + } + + logContent := strings.Join([]string{ + "2026-02-21T13:05:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", + "2026-02-21T13:05:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", + "2026-02-21T15:00:00.000Z [INFO] Workspace initialized: s2 (checkpoints: 0)", + "2026-02-21T15:00:01.000Z [INFO] CompactionProcessor: Utilization 1.7% (2200/128000 tokens) below threshold 80%", + }, "\n") + if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { + t.Fatalf("write log: %v", err) + } + + mkSession := func(id, model, wsCreated, wsUpdated, evtTs string) { + dir := filepath.Join(sessionDir, id) + if err := os.MkdirAll(dir, 0o755); err != nil { + t.Fatalf("mkdir %s: %v", id, err) + } + ws := strings.Join([]string{ + "id: " + id, + "repository: owner/repo", + "branch: main", + "created_at: " + wsCreated, + "updated_at: " + wsUpdated, + }, "\n") + if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { + t.Fatalf("write workspace %s: %v", id, err) + } + events := strings.Join([]string{ + `{"type":"session.model_change","timestamp":"` + evtTs + `","data":{"newModel":"` + model + `"}}`, + `{"type":"user.message","timestamp":"` + evtTs + `","data":{"content":"hello"}}`, + `{"type":"assistant.turn_start","timestamp":"` + evtTs + `","data":{"turnId":"0"}}`, + }, "\n") + if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(events), 0o644); err != nil { + t.Fatalf("write events %s: %v", id, err) + } + } + + // Workspace metadata claims s1 is newer, but session events show s2 is latest. + mkSession("s1", "model-s1", "2026-02-21T10:00:00Z", "2026-02-21T13:00:00Z", "2026-02-21T13:05:00Z") + mkSession("s2", "model-s2", "2026-02-21T10:00:00Z", "2026-02-21T12:00:00Z", "2026-02-21T15:00:00Z") + + snap := &core.UsageSnapshot{ + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + DailySeries: make(map[string][]core.TimePoint), + } + logs := p.readLogs(copilotDir, snap) + p.readSessions(copilotDir, snap, logs) + + if got := snap.Raw["last_session_model"]; got != "model-s2" { + t.Fatalf("last_session_model = %q, want model-s2", got) + } + if got := snap.Raw["last_session_tokens"]; got != "2200/128000" { + t.Fatalf("last_session_tokens = %q, want 2200/128000", got) + } + if got := snap.Raw["last_session_time"]; got != "2026-02-21T15:00:01Z" { + t.Fatalf("last_session_time = %q, want 2026-02-21T15:00:01Z", got) + } +} + +func TestSessionShutdownDataParsing(t *testing.T) { + body := `{ + "shutdownType": "user_exit", + "totalPremiumRequests": 12, + "totalApiDurationMs": 45000, + "sessionStartTime": "2026-02-24T10:00:00Z", + "codeChanges": {"linesAdded": 150, "linesRemoved": 30, "filesModified": 5}, + "modelMetrics": { + "claude-sonnet-4.5": { + "requests": {"count": 10, "cost": 0.35}, + "usage": {"inputTokens": 52000, "outputTokens": 18000, "cacheReadTokens": 30000, "cacheWriteTokens": 5000} + }, + "gpt-5-mini": { + "requests": {"count": 2, "cost": 0.05}, + "usage": {"inputTokens": 3000, "outputTokens": 1000, "cacheReadTokens": 0, "cacheWriteTokens": 0} + } + } + }` + + var shutdown sessionShutdownData + if err := unmarshalJSON(body, &shutdown); err != nil { + t.Fatalf("unmarshal failed: %v", err) + } + + if shutdown.ShutdownType != "user_exit" { + t.Errorf("ShutdownType = %q, want %q", shutdown.ShutdownType, "user_exit") + } + if shutdown.TotalPremiumRequests != 12 { + t.Errorf("TotalPremiumRequests = %d, want 12", shutdown.TotalPremiumRequests) + } + if shutdown.TotalAPIDurationMs != 45000 { + t.Errorf("TotalAPIDurationMs = %d, want 45000", shutdown.TotalAPIDurationMs) + } + if shutdown.SessionStartTime != "2026-02-24T10:00:00Z" { + t.Errorf("SessionStartTime = %q", shutdown.SessionStartTime) + } + if shutdown.CodeChanges.LinesAdded != 150 { + t.Errorf("CodeChanges.LinesAdded = %d, want 150", shutdown.CodeChanges.LinesAdded) + } + if shutdown.CodeChanges.LinesRemoved != 30 { + t.Errorf("CodeChanges.LinesRemoved = %d, want 30", shutdown.CodeChanges.LinesRemoved) + } + if shutdown.CodeChanges.FilesModified != 5 { + t.Errorf("CodeChanges.FilesModified = %d, want 5", shutdown.CodeChanges.FilesModified) + } + if len(shutdown.ModelMetrics) != 2 { + t.Fatalf("expected 2 model metrics, got %d", len(shutdown.ModelMetrics)) + } + + claude := shutdown.ModelMetrics["claude-sonnet-4.5"] + if claude.Requests.Count != 10 { + t.Errorf("claude requests count = %d, want 10", claude.Requests.Count) + } + if claude.Requests.Cost != 0.35 { + t.Errorf("claude requests cost = %f, want 0.35", claude.Requests.Cost) + } + if claude.Usage.InputTokens != 52000 { + t.Errorf("claude input tokens = %f, want 52000", claude.Usage.InputTokens) + } + if claude.Usage.OutputTokens != 18000 { + t.Errorf("claude output tokens = %f, want 18000", claude.Usage.OutputTokens) + } + if claude.Usage.CacheReadTokens != 30000 { + t.Errorf("claude cache read tokens = %f, want 30000", claude.Usage.CacheReadTokens) + } + if claude.Usage.CacheWriteTokens != 5000 { + t.Errorf("claude cache write tokens = %f, want 5000", claude.Usage.CacheWriteTokens) + } + + gpt := shutdown.ModelMetrics["gpt-5-mini"] + if gpt.Requests.Count != 2 { + t.Errorf("gpt requests count = %d, want 2", gpt.Requests.Count) + } + if gpt.Requests.Cost != 0.05 { + t.Errorf("gpt requests cost = %f, want 0.05", gpt.Requests.Cost) + } +} + +func TestSessionShutdownDataParsing_Empty(t *testing.T) { + body := `{ + "shutdownType": "timeout", + "totalPremiumRequests": 0, + "totalApiDurationMs": 0, + "codeChanges": {}, + "modelMetrics": {} + }` + + var shutdown sessionShutdownData + if err := unmarshalJSON(body, &shutdown); err != nil { + t.Fatalf("unmarshal failed: %v", err) + } + + if shutdown.ShutdownType != "timeout" { + t.Errorf("ShutdownType = %q, want %q", shutdown.ShutdownType, "timeout") + } + if shutdown.TotalPremiumRequests != 0 { + t.Errorf("TotalPremiumRequests = %d, want 0", shutdown.TotalPremiumRequests) + } + if shutdown.CodeChanges.LinesAdded != 0 { + t.Errorf("CodeChanges.LinesAdded = %d, want 0", shutdown.CodeChanges.LinesAdded) + } + if len(shutdown.ModelMetrics) != 0 { + t.Errorf("expected 0 model metrics, got %d", len(shutdown.ModelMetrics)) + } +} diff --git a/internal/providers/copilot/copilot_sessions_test.go b/internal/providers/copilot/copilot_sessions_test.go new file mode 100644 index 0000000..a53be2c --- /dev/null +++ b/internal/providers/copilot/copilot_sessions_test.go @@ -0,0 +1,671 @@ +package copilot + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestReadSessions_AccumulatesShutdownEvents(t *testing.T) { + p := New() + tmp := t.TempDir() + copilotDir := filepath.Join(tmp, ".copilot") + logDir := filepath.Join(copilotDir, "logs") + sessionDir := filepath.Join(copilotDir, "session-state") + if err := os.MkdirAll(logDir, 0o755); err != nil { + t.Fatalf("mkdir logs: %v", err) + } + if err := os.MkdirAll(sessionDir, 0o755); err != nil { + t.Fatalf("mkdir sessions: %v", err) + } + + logContent := strings.Join([]string{ + "2026-02-24T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", + "2026-02-24T10:00:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", + "2026-02-24T12:00:00.000Z [INFO] Workspace initialized: s2 (checkpoints: 0)", + "2026-02-24T12:00:01.000Z [INFO] CompactionProcessor: Utilization 0.7% (900/128000 tokens) below threshold 80%", + }, "\n") + if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { + t.Fatalf("write log: %v", err) + } + + mkSessionWithShutdown := func(id, created, updated string, shutdownJSON string) { + dir := filepath.Join(sessionDir, id) + if err := os.MkdirAll(dir, 0o755); err != nil { + t.Fatalf("mkdir %s: %v", id, err) + } + ws := strings.Join([]string{ + "id: " + id, + "repository: owner/repo", + "branch: main", + "created_at: " + created, + "updated_at: " + updated, + }, "\n") + if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { + t.Fatalf("write workspace %s: %v", id, err) + } + events := strings.Join([]string{ + `{"type":"session.model_change","timestamp":"` + created + `","data":{"newModel":"claude-sonnet-4.5"}}`, + `{"type":"user.message","timestamp":"` + created + `","data":{"content":"hello"}}`, + `{"type":"assistant.turn_start","timestamp":"` + created + `","data":{"turnId":"0"}}`, + `{"type":"assistant.message","timestamp":"` + updated + `","data":{"content":"world","reasoningText":"r","toolRequests":[]}}`, + `{"type":"session.shutdown","timestamp":"` + updated + `","data":` + shutdownJSON + `}`, + }, "\n") + if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(events), 0o644); err != nil { + t.Fatalf("write events %s: %v", id, err) + } + } + + shutdown1 := `{ + "shutdownType": "user_exit", + "totalPremiumRequests": 8, + "totalApiDurationMs": 30000, + "sessionStartTime": "2026-02-24T10:00:00Z", + "codeChanges": {"linesAdded": 100, "linesRemoved": 20, "filesModified": 3}, + "modelMetrics": { + "claude-sonnet-4.5": { + "requests": {"count": 6, "cost": 0.25}, + "usage": {"inputTokens": 40000, "outputTokens": 12000, "cacheReadTokens": 20000, "cacheWriteTokens": 3000} + }, + "gpt-5-mini": { + "requests": {"count": 2, "cost": 0.04}, + "usage": {"inputTokens": 2000, "outputTokens": 800, "cacheReadTokens": 0, "cacheWriteTokens": 0} + } + } + }` + + shutdown2 := `{ + "shutdownType": "user_exit", + "totalPremiumRequests": 4, + "totalApiDurationMs": 15000, + "sessionStartTime": "2026-02-24T12:00:00Z", + "codeChanges": {"linesAdded": 50, "linesRemoved": 10, "filesModified": 2}, + "modelMetrics": { + "claude-sonnet-4.5": { + "requests": {"count": 4, "cost": 0.10}, + "usage": {"inputTokens": 12000, "outputTokens": 6000, "cacheReadTokens": 10000, "cacheWriteTokens": 2000} + } + } + }` + + mkSessionWithShutdown("s1", "2026-02-24T10:00:00Z", "2026-02-24T11:00:00Z", shutdown1) + mkSessionWithShutdown("s2", "2026-02-24T12:00:00Z", "2026-02-24T13:00:00Z", shutdown2) + + snap := &core.UsageSnapshot{ + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + DailySeries: make(map[string][]core.TimePoint), + } + + logs := p.readLogs(copilotDir, snap) + p.readSessions(copilotDir, snap, logs) + + // Verify that the session data is still correctly parsed (existing behavior). + if m := snap.Metrics["cli_messages"]; m.Used == nil || *m.Used != 2 { + t.Fatalf("cli_messages = %+v, want 2", m) + } + + // Verify total_sessions raw value accounts for both sessions. + if got := snap.Raw["total_sessions"]; got != "2" { + t.Fatalf("total_sessions = %q, want 2", got) + } +} + +func TestSessionShutdownDataParsing_NoModelMetrics(t *testing.T) { + body := `{ + "shutdownType": "crash", + "totalPremiumRequests": 3, + "totalApiDurationMs": 5000, + "codeChanges": {"linesAdded": 10, "linesRemoved": 2, "filesModified": 1} + }` + + var shutdown sessionShutdownData + if err := unmarshalJSON(body, &shutdown); err != nil { + t.Fatalf("unmarshal failed: %v", err) + } + + if shutdown.ShutdownType != "crash" { + t.Errorf("ShutdownType = %q, want %q", shutdown.ShutdownType, "crash") + } + if shutdown.TotalPremiumRequests != 3 { + t.Errorf("TotalPremiumRequests = %d, want 3", shutdown.TotalPremiumRequests) + } + if shutdown.CodeChanges.LinesAdded != 10 { + t.Errorf("CodeChanges.LinesAdded = %d, want 10", shutdown.CodeChanges.LinesAdded) + } + if shutdown.ModelMetrics != nil { + t.Errorf("expected nil ModelMetrics, got %v", shutdown.ModelMetrics) + } +} + +func TestAssistantUsageDataParsing(t *testing.T) { + body := `{ + "model": "claude-sonnet-4.5", + "inputTokens": 5200, + "outputTokens": 1800, + "cacheReadTokens": 3000, + "cacheWriteTokens": 500, + "cost": 0.042, + "duration": 2500, + "quotaSnapshots": { + "premium_interactions": { + "entitlementRequests": 300, + "usedRequests": 158, + "remainingPercentage": 47.3, + "resetDate": "2026-03-01T00:00:00Z" + } + } + }` + + var usage assistantUsageData + if err := unmarshalJSON(body, &usage); err != nil { + t.Fatalf("unmarshal failed: %v", err) + } + + if usage.Model != "claude-sonnet-4.5" { + t.Errorf("Model = %q, want %q", usage.Model, "claude-sonnet-4.5") + } + if usage.InputTokens != 5200 { + t.Errorf("InputTokens = %f, want 5200", usage.InputTokens) + } + if usage.OutputTokens != 1800 { + t.Errorf("OutputTokens = %f, want 1800", usage.OutputTokens) + } + if usage.CacheReadTokens != 3000 { + t.Errorf("CacheReadTokens = %f, want 3000", usage.CacheReadTokens) + } + if usage.CacheWriteTokens != 500 { + t.Errorf("CacheWriteTokens = %f, want 500", usage.CacheWriteTokens) + } + if usage.Cost != 0.042 { + t.Errorf("Cost = %f, want 0.042", usage.Cost) + } + if usage.Duration != 2500 { + t.Errorf("Duration = %d, want 2500", usage.Duration) + } + if len(usage.QuotaSnapshots) != 1 { + t.Fatalf("expected 1 quota snapshot, got %d", len(usage.QuotaSnapshots)) + } + + premium := usage.QuotaSnapshots["premium_interactions"] + if premium.EntitlementRequests != 300 { + t.Errorf("EntitlementRequests = %d, want 300", premium.EntitlementRequests) + } + if premium.UsedRequests != 158 { + t.Errorf("UsedRequests = %d, want 158", premium.UsedRequests) + } + if premium.RemainingPercentage != 47.3 { + t.Errorf("RemainingPercentage = %f, want 47.3", premium.RemainingPercentage) + } + if premium.ResetDate != "2026-03-01T00:00:00Z" { + t.Errorf("ResetDate = %q, want %q", premium.ResetDate, "2026-03-01T00:00:00Z") + } +} + +func TestAssistantUsageDataParsing_NoQuota(t *testing.T) { + body := `{ + "model": "gpt-5-mini", + "inputTokens": 1000, + "outputTokens": 500, + "cacheReadTokens": 0, + "cacheWriteTokens": 0, + "cost": 0.01, + "duration": 800 + }` + + var usage assistantUsageData + if err := unmarshalJSON(body, &usage); err != nil { + t.Fatalf("unmarshal failed: %v", err) + } + + if usage.Model != "gpt-5-mini" { + t.Errorf("Model = %q", usage.Model) + } + if usage.InputTokens != 1000 { + t.Errorf("InputTokens = %f, want 1000", usage.InputTokens) + } + if usage.OutputTokens != 500 { + t.Errorf("OutputTokens = %f, want 500", usage.OutputTokens) + } + if len(usage.QuotaSnapshots) != 0 { + t.Errorf("expected 0 quota snapshots, got %d", len(usage.QuotaSnapshots)) + } +} + +func TestReadSessions_AccumulatesUsageEvents(t *testing.T) { + p := New() + tmp := t.TempDir() + copilotDir := filepath.Join(tmp, ".copilot") + logDir := filepath.Join(copilotDir, "logs") + sessionDir := filepath.Join(copilotDir, "session-state") + if err := os.MkdirAll(logDir, 0o755); err != nil { + t.Fatalf("mkdir logs: %v", err) + } + if err := os.MkdirAll(sessionDir, 0o755); err != nil { + t.Fatalf("mkdir sessions: %v", err) + } + + logContent := strings.Join([]string{ + "2026-02-25T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", + "2026-02-25T10:00:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", + }, "\n") + if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { + t.Fatalf("write log: %v", err) + } + + mkSessionWithUsage := func(id, created, updated string, usageEvents []string) { + dir := filepath.Join(sessionDir, id) + if err := os.MkdirAll(dir, 0o755); err != nil { + t.Fatalf("mkdir %s: %v", id, err) + } + ws := strings.Join([]string{ + "id: " + id, + "repository: owner/repo", + "branch: main", + "created_at: " + created, + "updated_at: " + updated, + }, "\n") + if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { + t.Fatalf("write workspace %s: %v", id, err) + } + + baseEvents := []string{ + `{"type":"session.model_change","timestamp":"` + created + `","data":{"newModel":"claude-sonnet-4.5"}}`, + `{"type":"user.message","timestamp":"` + created + `","data":{"content":"hello"}}`, + `{"type":"assistant.turn_start","timestamp":"` + created + `","data":{"turnId":"0"}}`, + `{"type":"assistant.message","timestamp":"` + updated + `","data":{"content":"world","reasoningText":"r","toolRequests":[]}}`, + } + allEvents := append(baseEvents, usageEvents...) + if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(strings.Join(allEvents, "\n")), 0o644); err != nil { + t.Fatalf("write events %s: %v", id, err) + } + } + + usageEvent1 := `{"type":"assistant.usage","timestamp":"2026-02-25T10:05:00Z","data":{` + + `"model":"claude-sonnet-4.5","inputTokens":5200,"outputTokens":1800,` + + `"cacheReadTokens":3000,"cacheWriteTokens":500,"cost":0.042,"duration":2500,` + + `"quotaSnapshots":{"premium_interactions":{"entitlementRequests":300,"usedRequests":150,"remainingPercentage":50.0,"resetDate":"2026-03-01T00:00:00Z"}}}}` + + usageEvent2 := `{"type":"assistant.usage","timestamp":"2026-02-25T10:10:00Z","data":{` + + `"model":"claude-sonnet-4.5","inputTokens":3000,"outputTokens":1200,` + + `"cacheReadTokens":2000,"cacheWriteTokens":300,"cost":0.028,"duration":1800,` + + `"quotaSnapshots":{"premium_interactions":{"entitlementRequests":300,"usedRequests":152,"remainingPercentage":49.3,"resetDate":"2026-03-01T00:00:00Z"}}}}` + + usageEvent3 := `{"type":"assistant.usage","timestamp":"2026-02-25T10:15:00Z","data":{` + + `"model":"gpt-5-mini","inputTokens":1000,"outputTokens":500,` + + `"cacheReadTokens":0,"cacheWriteTokens":0,"cost":0.01,"duration":800}}` + + mkSessionWithUsage("s1", "2026-02-25T10:00:00Z", "2026-02-25T10:20:00Z", + []string{usageEvent1, usageEvent2, usageEvent3}) + + snap := &core.UsageSnapshot{ + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + DailySeries: make(map[string][]core.TimePoint), + } + + logs := p.readLogs(copilotDir, snap) + p.readSessions(copilotDir, snap, logs) + + // Verify that existing session behavior still works. + if m := snap.Metrics["cli_messages"]; m.Used == nil || *m.Used != 1 { + t.Fatalf("cli_messages = %+v, want 1", m) + } + if got := snap.Raw["total_sessions"]; got != "1" { + t.Fatalf("total_sessions = %q, want 1", got) + } + + // The usage data is accumulated internally but not yet emitted as metrics + // (that is Task 5). This test verifies the parsing does not break existing + // behavior and that the events are parsed without errors. + // We verify by checking the session still has correct model and timestamps. + if got := snap.Raw["last_session_model"]; got != "claude-sonnet-4.5" { + t.Fatalf("last_session_model = %q, want claude-sonnet-4.5", got) + } +} + +func TestReadSessions_UsageEventsMultipleSessions(t *testing.T) { + p := New() + tmp := t.TempDir() + copilotDir := filepath.Join(tmp, ".copilot") + logDir := filepath.Join(copilotDir, "logs") + sessionDir := filepath.Join(copilotDir, "session-state") + if err := os.MkdirAll(logDir, 0o755); err != nil { + t.Fatalf("mkdir logs: %v", err) + } + if err := os.MkdirAll(sessionDir, 0o755); err != nil { + t.Fatalf("mkdir sessions: %v", err) + } + + logContent := strings.Join([]string{ + "2026-02-25T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", + "2026-02-25T10:00:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", + "2026-02-25T14:00:00.000Z [INFO] Workspace initialized: s2 (checkpoints: 0)", + "2026-02-25T14:00:01.000Z [INFO] CompactionProcessor: Utilization 0.7% (900/128000 tokens) below threshold 80%", + }, "\n") + if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { + t.Fatalf("write log: %v", err) + } + + mkSession := func(id, model, created, updated string, usageEvents []string) { + dir := filepath.Join(sessionDir, id) + if err := os.MkdirAll(dir, 0o755); err != nil { + t.Fatalf("mkdir %s: %v", id, err) + } + ws := strings.Join([]string{ + "id: " + id, + "repository: owner/repo", + "branch: main", + "created_at: " + created, + "updated_at: " + updated, + }, "\n") + if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { + t.Fatalf("write workspace %s: %v", id, err) + } + + baseEvents := []string{ + `{"type":"session.model_change","timestamp":"` + created + `","data":{"newModel":"` + model + `"}}`, + `{"type":"user.message","timestamp":"` + created + `","data":{"content":"hello"}}`, + `{"type":"assistant.turn_start","timestamp":"` + created + `","data":{"turnId":"0"}}`, + `{"type":"assistant.message","timestamp":"` + updated + `","data":{"content":"reply","reasoningText":"","toolRequests":[]}}`, + } + allEvents := append(baseEvents, usageEvents...) + if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(strings.Join(allEvents, "\n")), 0o644); err != nil { + t.Fatalf("write events %s: %v", id, err) + } + } + + s1Usage := []string{ + `{"type":"assistant.usage","timestamp":"2026-02-25T10:05:00Z","data":{"model":"claude-sonnet-4.5","inputTokens":5200,"outputTokens":1800,"cacheReadTokens":3000,"cacheWriteTokens":500,"cost":0.042,"duration":2500}}`, + `{"type":"assistant.usage","timestamp":"2026-02-25T10:10:00Z","data":{"model":"claude-sonnet-4.5","inputTokens":3000,"outputTokens":1200,"cacheReadTokens":2000,"cacheWriteTokens":300,"cost":0.028,"duration":1800}}`, + } + + s2Usage := []string{ + `{"type":"assistant.usage","timestamp":"2026-02-25T14:05:00Z","data":{"model":"gpt-5-mini","inputTokens":1000,"outputTokens":500,"cacheReadTokens":0,"cacheWriteTokens":0,"cost":0.01,"duration":800}}`, + } + + mkSession("s1", "claude-sonnet-4.5", "2026-02-25T10:00:00Z", "2026-02-25T10:20:00Z", s1Usage) + mkSession("s2", "gpt-5-mini", "2026-02-25T14:00:00Z", "2026-02-25T14:10:00Z", s2Usage) + + snap := &core.UsageSnapshot{ + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + DailySeries: make(map[string][]core.TimePoint), + } + + logs := p.readLogs(copilotDir, snap) + p.readSessions(copilotDir, snap, logs) + + // Verify existing behavior is preserved. + if m := snap.Metrics["cli_messages"]; m.Used == nil || *m.Used != 2 { + t.Fatalf("cli_messages = %+v, want 2", m) + } + if got := snap.Raw["total_sessions"]; got != "2" { + t.Fatalf("total_sessions = %q, want 2", got) + } + + // The latest session (s2 at 14:10) should be shown as last. + if got := snap.Raw["last_session_model"]; got != "gpt-5-mini" { + t.Fatalf("last_session_model = %q, want gpt-5-mini", got) + } +} + +func TestExtractCopilotToolPathsAndLanguage(t *testing.T) { + raw := json.RawMessage(`{"name":"read_file","args":{"path":"internal/providers/copilot/copilot.go"}}`) + paths := extractCopilotToolPaths(raw) + if len(paths) != 1 || paths[0] != "internal/providers/copilot/copilot.go" { + t.Fatalf("extractCopilotToolPaths = %v", paths) + } + if lang := inferCopilotLanguageFromPath(paths[0]); lang != "go" { + t.Fatalf("inferCopilotLanguageFromPath = %q, want go", lang) + } +} + +func TestReadSessions_ExtractsLanguageAndCodeStatsMetrics(t *testing.T) { + p := New() + tmp := t.TempDir() + copilotDir := filepath.Join(tmp, ".copilot") + logDir := filepath.Join(copilotDir, "logs") + sessionDir := filepath.Join(copilotDir, "session-state") + if err := os.MkdirAll(logDir, 0o755); err != nil { + t.Fatalf("mkdir logs: %v", err) + } + if err := os.MkdirAll(sessionDir, 0o755); err != nil { + t.Fatalf("mkdir sessions: %v", err) + } + + logContent := strings.Join([]string{ + "2026-02-25T14:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", + "2026-02-25T14:00:01.000Z [INFO] CompactionProcessor: Utilization 1.1% (1400/128000 tokens) below threshold 80%", + }, "\n") + if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { + t.Fatalf("write log: %v", err) + } + + s1Dir := filepath.Join(sessionDir, "s1") + if err := os.MkdirAll(s1Dir, 0o755); err != nil { + t.Fatalf("mkdir s1: %v", err) + } + ws := strings.Join([]string{ + "id: s1", + "repository: owner/repo", + "branch: main", + "created_at: 2026-02-25T14:00:00Z", + "updated_at: 2026-02-25T14:10:00Z", + }, "\n") + if err := os.WriteFile(filepath.Join(s1Dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { + t.Fatalf("write workspace: %v", err) + } + + events := strings.Join([]string{ + `{"type":"session.model_change","timestamp":"2026-02-25T14:00:00Z","data":{"newModel":"claude-sonnet-4.6"}}`, + `{"type":"user.message","timestamp":"2026-02-25T14:00:01Z","data":{"content":"patch code"}}`, + `{"type":"assistant.turn_start","timestamp":"2026-02-25T14:00:02Z","data":{"turnId":"0"}}`, + `{"type":"assistant.message","timestamp":"2026-02-25T14:00:03Z","data":{"content":"done","reasoningText":"","toolRequests":[{"name":"read_file","args":{"path":"internal/providers/copilot/copilot.go"}},{"name":"edit_file","args":{"filePath":"internal/providers/copilot/widget.go","old_string":"a\nb","new_string":"a\nb\nc"}},{"name":"run_terminal","args":{"command":"git commit -m \"copilot metrics\""}}]}}`, + }, "\n") + if err := os.WriteFile(filepath.Join(s1Dir, "events.jsonl"), []byte(events), 0o644); err != nil { + t.Fatalf("write events: %v", err) + } + + snap := &core.UsageSnapshot{ + Metrics: make(map[string]core.Metric), + Resets: make(map[string]time.Time), + Raw: make(map[string]string), + DailySeries: make(map[string][]core.TimePoint), + } + + logs := p.readLogs(copilotDir, snap) + p.readSessions(copilotDir, snap, logs) + + if m := snap.Metrics["lang_go"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("lang_go missing/zero: %+v", m) + } + if m := snap.Metrics["composer_lines_added"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("composer_lines_added missing/zero: %+v", m) + } + if m := snap.Metrics["composer_lines_removed"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("composer_lines_removed missing/zero: %+v", m) + } + if m := snap.Metrics["composer_files_changed"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("composer_files_changed missing/zero: %+v", m) + } + if m := snap.Metrics["scored_commits"]; m.Used == nil || *m.Used <= 0 { + t.Fatalf("scored_commits missing/zero: %+v", m) + } + if m := snap.Metrics["total_prompts"]; m.Used == nil || *m.Used != 1 { + t.Fatalf("total_prompts = %+v, want 1", m) + } + if m := snap.Metrics["tool_calls_total"]; m.Used == nil || *m.Used != 3 { + t.Fatalf("tool_calls_total = %+v, want 3", m) + } +} + +func TestDetectCopilotVersion_FallbackToStandalone(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("test uses shell scripts") + } + + tmp := t.TempDir() + ghBin := writeTestExe(t, tmp, "gh", ` +if [ "$1" = "copilot" ] && [ "$2" = "--version" ]; then + echo "gh: unknown command copilot" >&2 + exit 1 +fi +exit 1 +`) + copilotBin := writeTestExe(t, tmp, "copilot", ` +if [ "$1" = "--version" ]; then + echo "copilot 1.2.3" + exit 0 +fi +exit 1 +`) + + version, source, err := detectCopilotVersion(context.Background(), ghBin, copilotBin) + if err != nil { + t.Fatalf("detectCopilotVersion() error: %v", err) + } + if version != "copilot 1.2.3" { + t.Fatalf("version = %q, want %q", version, "copilot 1.2.3") + } + if source != "copilot" { + t.Fatalf("source = %q, want %q", source, "copilot") + } +} + +func TestFetch_FallsBackToStandaloneCopilotWhenGHCopilotUnavailable(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("test uses shell scripts") + } + + tmp := t.TempDir() + configDir := filepath.Join(t.TempDir(), ".copilot") + if err := os.MkdirAll(configDir, 0o755); err != nil { + t.Fatalf("mkdir config dir: %v", err) + } + + ghBin := writeTestExe(t, tmp, "gh", ` +if [ "$1" = "copilot" ] && [ "$2" = "--version" ]; then + echo "gh: unknown command copilot" >&2 + exit 1 +fi +if [ "$1" = "auth" ] && [ "$2" = "status" ]; then + echo "Logged in to github.com as octocat" + exit 0 +fi +if [ "$1" = "api" ]; then + endpoint="" + for arg in "$@"; do endpoint="$arg"; done + case "$endpoint" in + "/user") + echo '{"login":"octocat","name":"Octo Cat","plan":{"name":"free"}}' + exit 0 + ;; + "/copilot_internal/user") + echo '{"login":"octocat","access_type_sku":"copilot_pro","copilot_plan":"individual","chat_enabled":true,"is_mcp_enabled":false,"organization_login_list":[],"organization_list":[]}' + exit 0 + ;; + "/rate_limit") + echo '{"resources":{"core":{"limit":5000,"remaining":4999,"reset":2000000000,"used":1}}}' + exit 0 + ;; + esac +fi +echo "unsupported gh args: $*" >&2 +exit 1 +`) + + copilotBin := writeTestExe(t, tmp, "copilot", ` +if [ "$1" = "--version" ]; then + echo "copilot 1.2.3" + exit 0 +fi +exit 1 +`) + + p := New() + snap, err := p.Fetch(context.Background(), testCopilotAccount(ghBin, configDir, copilotBin)) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + if snap.Status == core.StatusError || snap.Status == core.StatusAuth { + t.Fatalf("Status = %q, want non-error/auth fallback", snap.Status) + } + if snap.Raw["copilot_version"] != "copilot 1.2.3" { + t.Fatalf("copilot_version = %q, want %q", snap.Raw["copilot_version"], "copilot 1.2.3") + } + if snap.Raw["copilot_version_source"] != "copilot" { + t.Fatalf("copilot_version_source = %q, want %q", snap.Raw["copilot_version_source"], "copilot") + } + if !strings.Contains(snap.Raw["auth_status"], "Logged in") { + t.Fatalf("auth_status = %q, want GitHub auth output", snap.Raw["auth_status"]) + } + if snap.Raw["github_login"] != "octocat" { + t.Fatalf("github_login = %q, want %q", snap.Raw["github_login"], "octocat") + } +} + +func TestFetch_StandaloneCopilotWithoutGH(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("test uses shell scripts") + } + + tmp := t.TempDir() + configDir := filepath.Join(t.TempDir(), ".copilot") + if err := os.MkdirAll(configDir, 0o755); err != nil { + t.Fatalf("mkdir config dir: %v", err) + } + + copilotBin := writeTestExe(t, tmp, "copilot", ` +if [ "$1" = "--version" ]; then + echo "copilot 2.0.0" + exit 0 +fi +exit 1 +`) + t.Setenv("PATH", tmp) + + p := New() + snap, err := p.Fetch(context.Background(), testCopilotAccount(copilotBin, configDir, "")) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + if snap.Status != core.StatusOK { + t.Fatalf("Status = %q, want %q", snap.Status, core.StatusOK) + } + if snap.Raw["copilot_version"] != "copilot 2.0.0" { + t.Fatalf("copilot_version = %q, want %q", snap.Raw["copilot_version"], "copilot 2.0.0") + } + if snap.Raw["copilot_version_source"] != "copilot" { + t.Fatalf("copilot_version_source = %q, want %q", snap.Raw["copilot_version_source"], "copilot") + } + if !strings.Contains(snap.Raw["auth_status"], "skipped GitHub API checks") { + t.Fatalf("auth_status = %q, want skipped GH API message", snap.Raw["auth_status"]) + } +} + +func writeTestExe(t *testing.T, dir, name, body string) string { + t.Helper() + path := filepath.Join(dir, name) + script := "#!/bin/sh\n" + strings.TrimSpace(body) + "\n" + if err := os.WriteFile(path, []byte(script), 0o755); err != nil { + t.Fatalf("write executable %s: %v", name, err) + } + return path +} + +func unmarshalJSON(s string, v interface{}) error { + return json.Unmarshal([]byte(s), v) +} + +func boolPtr(v bool) *bool { return &v } diff --git a/internal/providers/copilot/copilot_test.go b/internal/providers/copilot/copilot_test.go index ebc251a..b6308ac 100644 --- a/internal/providers/copilot/copilot_test.go +++ b/internal/providers/copilot/copilot_test.go @@ -1,11 +1,7 @@ package copilot import ( - "context" "encoding/json" - "os" - "path/filepath" - "runtime" "strings" "testing" "time" @@ -816,993 +812,3 @@ func TestAssistantMsgDataParsing_EmptyTools(t *testing.T) { t.Errorf("expected 0 tool requests, got %d", len(tools)) } } - -func TestReadSessions_EmitsModelTokenMetrics(t *testing.T) { - p := New() - tmp := t.TempDir() - copilotDir := filepath.Join(tmp, ".copilot") - logDir := filepath.Join(copilotDir, "logs") - sessionDir := filepath.Join(copilotDir, "session-state") - if err := os.MkdirAll(logDir, 0o755); err != nil { - t.Fatalf("mkdir logs: %v", err) - } - if err := os.MkdirAll(sessionDir, 0o755); err != nil { - t.Fatalf("mkdir sessions: %v", err) - } - - logContent := strings.Join([]string{ - "2026-02-20T01:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", - "2026-02-20T01:00:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", - "2026-02-20T01:00:02.000Z [INFO] CompactionProcessor: Utilization 1.4% (1800/128000 tokens) below threshold 80%", - "2026-02-20T02:00:00.000Z [INFO] Workspace initialized: s2 (checkpoints: 0)", - "2026-02-20T02:00:01.000Z [INFO] CompactionProcessor: Utilization 0.7% (900/128000 tokens) below threshold 80%", - }, "\n") - if err := os.WriteFile(filepath.Join(logDir, "process-test.log"), []byte(logContent), 0o644); err != nil { - t.Fatalf("write log: %v", err) - } - - mkSession := func(id, model, created, updated string) { - dir := filepath.Join(sessionDir, id) - if err := os.MkdirAll(dir, 0o755); err != nil { - t.Fatalf("mkdir %s: %v", id, err) - } - ws := strings.Join([]string{ - "id: " + id, - "repository: owner/repo", - "branch: main", - "created_at: " + created, - "updated_at: " + updated, - }, "\n") - if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { - t.Fatalf("write workspace %s: %v", id, err) - } - events := strings.Join([]string{ - `{"type":"session.model_change","timestamp":"` + created + `","data":{"newModel":"` + model + `"}}`, - `{"type":"user.message","timestamp":"` + created + `","data":{"content":"hello"}}`, - `{"type":"assistant.turn_start","timestamp":"` + created + `","data":{"turnId":"0"}}`, - `{"type":"assistant.message","timestamp":"` + updated + `","data":{"content":"world","reasoningText":"r","toolRequests":[{"name":"read_file"}]}}`, - }, "\n") - if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(events), 0o644); err != nil { - t.Fatalf("write events %s: %v", id, err) - } - } - - mkSession("s1", "gpt-5-mini", "2026-02-20T01:00:00Z", "2026-02-20T01:10:00Z") - mkSession("s2", "claude-sonnet-4.6", "2026-02-20T02:00:00Z", "2026-02-20T02:10:00Z") - - snap := &core.UsageSnapshot{ - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - DailySeries: make(map[string][]core.TimePoint), - } - - logs := p.readLogs(copilotDir, snap) - p.readSessions(copilotDir, snap, logs) - - if m := snap.Metrics["model_gpt_5_mini_input_tokens"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("model_gpt_5_mini_input_tokens missing/zero: %+v", m) - } - if m := snap.Metrics["model_claude_sonnet_4_6_input_tokens"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("model_claude_sonnet_4_6_input_tokens missing/zero: %+v", m) - } - if _, ok := snap.DailySeries["tokens_gpt_5_mini"]; !ok { - t.Fatal("missing tokens_gpt_5_mini series") - } - if m := snap.Metrics["cli_input_tokens"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("cli_input_tokens missing/zero: %+v", m) - } - if m := snap.Metrics["client_owner_repo_total_tokens"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("client_owner_repo_total_tokens missing/zero: %+v", m) - } - if m := snap.Metrics["client_owner_repo_sessions"]; m.Used == nil || *m.Used != 2 { - t.Fatalf("client_owner_repo_sessions = %+v, want 2", m) - } - if _, ok := snap.DailySeries["tokens_client_owner_repo"]; !ok { - t.Fatal("missing tokens_client_owner_repo series") - } - if got := snap.Raw["client_usage"]; !strings.Contains(got, "owner/repo") { - t.Fatalf("client_usage = %q, want owner/repo", got) - } - if m := snap.Metrics["messages_today"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("messages_today missing/zero: %+v", m) - } - if m := snap.Metrics["tool_read_file"]; m.Used == nil || *m.Used != 2 { - t.Fatalf("tool_read_file = %+v, want 2 calls", m) - } - if _, ok := snap.Metrics["context_window"]; !ok { - t.Fatal("missing context_window metric") - } -} - -func TestReadLogs_UsesNewestTokenEntryByTimestamp(t *testing.T) { - p := New() - tmp := t.TempDir() - copilotDir := filepath.Join(tmp, ".copilot") - logDir := filepath.Join(copilotDir, "logs") - if err := os.MkdirAll(logDir, 0o755); err != nil { - t.Fatalf("mkdir logs: %v", err) - } - - newer := strings.Join([]string{ - "2026-02-21T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", - "2026-02-21T10:00:01.000Z [INFO] CompactionProcessor: Utilization 3.9% (5000/128000 tokens) below threshold 80%", - }, "\n") - older := strings.Join([]string{ - "2026-02-20T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", - "2026-02-20T10:00:01.000Z [INFO] CompactionProcessor: Utilization 0.8% (1000/128000 tokens) below threshold 80%", - }, "\n") - // Lexicographic order is intentionally opposite to timestamp order. - if err := os.WriteFile(filepath.Join(logDir, "a-new.log"), []byte(newer), 0o644); err != nil { - t.Fatalf("write newer log: %v", err) - } - if err := os.WriteFile(filepath.Join(logDir, "z-old.log"), []byte(older), 0o644); err != nil { - t.Fatalf("write older log: %v", err) - } - - snap := &core.UsageSnapshot{ - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - } - logs := p.readLogs(copilotDir, snap) - - if got := snap.Raw["context_window_tokens"]; got != "5000/128000" { - t.Fatalf("context_window_tokens = %q, want %q", got, "5000/128000") - } - if got := logs.SessionTokens["s1"].Used; got != 5000 { - t.Fatalf("session s1 used = %d, want 5000", got) - } -} - -func TestReadSessions_UsesLatestEventTimestampForRecency(t *testing.T) { - p := New() - tmp := t.TempDir() - copilotDir := filepath.Join(tmp, ".copilot") - logDir := filepath.Join(copilotDir, "logs") - sessionDir := filepath.Join(copilotDir, "session-state") - if err := os.MkdirAll(logDir, 0o755); err != nil { - t.Fatalf("mkdir logs: %v", err) - } - if err := os.MkdirAll(sessionDir, 0o755); err != nil { - t.Fatalf("mkdir sessions: %v", err) - } - - logContent := strings.Join([]string{ - "2026-02-21T13:05:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", - "2026-02-21T13:05:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", - "2026-02-21T15:00:00.000Z [INFO] Workspace initialized: s2 (checkpoints: 0)", - "2026-02-21T15:00:01.000Z [INFO] CompactionProcessor: Utilization 1.7% (2200/128000 tokens) below threshold 80%", - }, "\n") - if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { - t.Fatalf("write log: %v", err) - } - - mkSession := func(id, model, wsCreated, wsUpdated, evtTs string) { - dir := filepath.Join(sessionDir, id) - if err := os.MkdirAll(dir, 0o755); err != nil { - t.Fatalf("mkdir %s: %v", id, err) - } - ws := strings.Join([]string{ - "id: " + id, - "repository: owner/repo", - "branch: main", - "created_at: " + wsCreated, - "updated_at: " + wsUpdated, - }, "\n") - if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { - t.Fatalf("write workspace %s: %v", id, err) - } - events := strings.Join([]string{ - `{"type":"session.model_change","timestamp":"` + evtTs + `","data":{"newModel":"` + model + `"}}`, - `{"type":"user.message","timestamp":"` + evtTs + `","data":{"content":"hello"}}`, - `{"type":"assistant.turn_start","timestamp":"` + evtTs + `","data":{"turnId":"0"}}`, - }, "\n") - if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(events), 0o644); err != nil { - t.Fatalf("write events %s: %v", id, err) - } - } - - // Workspace metadata claims s1 is newer, but session events show s2 is latest. - mkSession("s1", "model-s1", "2026-02-21T10:00:00Z", "2026-02-21T13:00:00Z", "2026-02-21T13:05:00Z") - mkSession("s2", "model-s2", "2026-02-21T10:00:00Z", "2026-02-21T12:00:00Z", "2026-02-21T15:00:00Z") - - snap := &core.UsageSnapshot{ - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - DailySeries: make(map[string][]core.TimePoint), - } - logs := p.readLogs(copilotDir, snap) - p.readSessions(copilotDir, snap, logs) - - if got := snap.Raw["last_session_model"]; got != "model-s2" { - t.Fatalf("last_session_model = %q, want model-s2", got) - } - if got := snap.Raw["last_session_tokens"]; got != "2200/128000" { - t.Fatalf("last_session_tokens = %q, want 2200/128000", got) - } - if got := snap.Raw["last_session_time"]; got != "2026-02-21T15:00:01Z" { - t.Fatalf("last_session_time = %q, want 2026-02-21T15:00:01Z", got) - } -} - -func TestSessionShutdownDataParsing(t *testing.T) { - body := `{ - "shutdownType": "user_exit", - "totalPremiumRequests": 12, - "totalApiDurationMs": 45000, - "sessionStartTime": "2026-02-24T10:00:00Z", - "codeChanges": {"linesAdded": 150, "linesRemoved": 30, "filesModified": 5}, - "modelMetrics": { - "claude-sonnet-4.5": { - "requests": {"count": 10, "cost": 0.35}, - "usage": {"inputTokens": 52000, "outputTokens": 18000, "cacheReadTokens": 30000, "cacheWriteTokens": 5000} - }, - "gpt-5-mini": { - "requests": {"count": 2, "cost": 0.05}, - "usage": {"inputTokens": 3000, "outputTokens": 1000, "cacheReadTokens": 0, "cacheWriteTokens": 0} - } - } - }` - - var shutdown sessionShutdownData - if err := unmarshalJSON(body, &shutdown); err != nil { - t.Fatalf("unmarshal failed: %v", err) - } - - if shutdown.ShutdownType != "user_exit" { - t.Errorf("ShutdownType = %q, want %q", shutdown.ShutdownType, "user_exit") - } - if shutdown.TotalPremiumRequests != 12 { - t.Errorf("TotalPremiumRequests = %d, want 12", shutdown.TotalPremiumRequests) - } - if shutdown.TotalAPIDurationMs != 45000 { - t.Errorf("TotalAPIDurationMs = %d, want 45000", shutdown.TotalAPIDurationMs) - } - if shutdown.SessionStartTime != "2026-02-24T10:00:00Z" { - t.Errorf("SessionStartTime = %q", shutdown.SessionStartTime) - } - if shutdown.CodeChanges.LinesAdded != 150 { - t.Errorf("CodeChanges.LinesAdded = %d, want 150", shutdown.CodeChanges.LinesAdded) - } - if shutdown.CodeChanges.LinesRemoved != 30 { - t.Errorf("CodeChanges.LinesRemoved = %d, want 30", shutdown.CodeChanges.LinesRemoved) - } - if shutdown.CodeChanges.FilesModified != 5 { - t.Errorf("CodeChanges.FilesModified = %d, want 5", shutdown.CodeChanges.FilesModified) - } - if len(shutdown.ModelMetrics) != 2 { - t.Fatalf("expected 2 model metrics, got %d", len(shutdown.ModelMetrics)) - } - - claude := shutdown.ModelMetrics["claude-sonnet-4.5"] - if claude.Requests.Count != 10 { - t.Errorf("claude requests count = %d, want 10", claude.Requests.Count) - } - if claude.Requests.Cost != 0.35 { - t.Errorf("claude requests cost = %f, want 0.35", claude.Requests.Cost) - } - if claude.Usage.InputTokens != 52000 { - t.Errorf("claude input tokens = %f, want 52000", claude.Usage.InputTokens) - } - if claude.Usage.OutputTokens != 18000 { - t.Errorf("claude output tokens = %f, want 18000", claude.Usage.OutputTokens) - } - if claude.Usage.CacheReadTokens != 30000 { - t.Errorf("claude cache read tokens = %f, want 30000", claude.Usage.CacheReadTokens) - } - if claude.Usage.CacheWriteTokens != 5000 { - t.Errorf("claude cache write tokens = %f, want 5000", claude.Usage.CacheWriteTokens) - } - - gpt := shutdown.ModelMetrics["gpt-5-mini"] - if gpt.Requests.Count != 2 { - t.Errorf("gpt requests count = %d, want 2", gpt.Requests.Count) - } - if gpt.Requests.Cost != 0.05 { - t.Errorf("gpt requests cost = %f, want 0.05", gpt.Requests.Cost) - } -} - -func TestSessionShutdownDataParsing_Empty(t *testing.T) { - body := `{ - "shutdownType": "timeout", - "totalPremiumRequests": 0, - "totalApiDurationMs": 0, - "codeChanges": {}, - "modelMetrics": {} - }` - - var shutdown sessionShutdownData - if err := unmarshalJSON(body, &shutdown); err != nil { - t.Fatalf("unmarshal failed: %v", err) - } - - if shutdown.ShutdownType != "timeout" { - t.Errorf("ShutdownType = %q, want %q", shutdown.ShutdownType, "timeout") - } - if shutdown.TotalPremiumRequests != 0 { - t.Errorf("TotalPremiumRequests = %d, want 0", shutdown.TotalPremiumRequests) - } - if shutdown.CodeChanges.LinesAdded != 0 { - t.Errorf("CodeChanges.LinesAdded = %d, want 0", shutdown.CodeChanges.LinesAdded) - } - if len(shutdown.ModelMetrics) != 0 { - t.Errorf("expected 0 model metrics, got %d", len(shutdown.ModelMetrics)) - } -} - -func TestReadSessions_AccumulatesShutdownEvents(t *testing.T) { - p := New() - tmp := t.TempDir() - copilotDir := filepath.Join(tmp, ".copilot") - logDir := filepath.Join(copilotDir, "logs") - sessionDir := filepath.Join(copilotDir, "session-state") - if err := os.MkdirAll(logDir, 0o755); err != nil { - t.Fatalf("mkdir logs: %v", err) - } - if err := os.MkdirAll(sessionDir, 0o755); err != nil { - t.Fatalf("mkdir sessions: %v", err) - } - - logContent := strings.Join([]string{ - "2026-02-24T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", - "2026-02-24T10:00:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", - "2026-02-24T12:00:00.000Z [INFO] Workspace initialized: s2 (checkpoints: 0)", - "2026-02-24T12:00:01.000Z [INFO] CompactionProcessor: Utilization 0.7% (900/128000 tokens) below threshold 80%", - }, "\n") - if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { - t.Fatalf("write log: %v", err) - } - - mkSessionWithShutdown := func(id, created, updated string, shutdownJSON string) { - dir := filepath.Join(sessionDir, id) - if err := os.MkdirAll(dir, 0o755); err != nil { - t.Fatalf("mkdir %s: %v", id, err) - } - ws := strings.Join([]string{ - "id: " + id, - "repository: owner/repo", - "branch: main", - "created_at: " + created, - "updated_at: " + updated, - }, "\n") - if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { - t.Fatalf("write workspace %s: %v", id, err) - } - events := strings.Join([]string{ - `{"type":"session.model_change","timestamp":"` + created + `","data":{"newModel":"claude-sonnet-4.5"}}`, - `{"type":"user.message","timestamp":"` + created + `","data":{"content":"hello"}}`, - `{"type":"assistant.turn_start","timestamp":"` + created + `","data":{"turnId":"0"}}`, - `{"type":"assistant.message","timestamp":"` + updated + `","data":{"content":"world","reasoningText":"r","toolRequests":[]}}`, - `{"type":"session.shutdown","timestamp":"` + updated + `","data":` + shutdownJSON + `}`, - }, "\n") - if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(events), 0o644); err != nil { - t.Fatalf("write events %s: %v", id, err) - } - } - - shutdown1 := `{ - "shutdownType": "user_exit", - "totalPremiumRequests": 8, - "totalApiDurationMs": 30000, - "sessionStartTime": "2026-02-24T10:00:00Z", - "codeChanges": {"linesAdded": 100, "linesRemoved": 20, "filesModified": 3}, - "modelMetrics": { - "claude-sonnet-4.5": { - "requests": {"count": 6, "cost": 0.25}, - "usage": {"inputTokens": 40000, "outputTokens": 12000, "cacheReadTokens": 20000, "cacheWriteTokens": 3000} - }, - "gpt-5-mini": { - "requests": {"count": 2, "cost": 0.04}, - "usage": {"inputTokens": 2000, "outputTokens": 800, "cacheReadTokens": 0, "cacheWriteTokens": 0} - } - } - }` - - shutdown2 := `{ - "shutdownType": "user_exit", - "totalPremiumRequests": 4, - "totalApiDurationMs": 15000, - "sessionStartTime": "2026-02-24T12:00:00Z", - "codeChanges": {"linesAdded": 50, "linesRemoved": 10, "filesModified": 2}, - "modelMetrics": { - "claude-sonnet-4.5": { - "requests": {"count": 4, "cost": 0.10}, - "usage": {"inputTokens": 12000, "outputTokens": 6000, "cacheReadTokens": 10000, "cacheWriteTokens": 2000} - } - } - }` - - mkSessionWithShutdown("s1", "2026-02-24T10:00:00Z", "2026-02-24T11:00:00Z", shutdown1) - mkSessionWithShutdown("s2", "2026-02-24T12:00:00Z", "2026-02-24T13:00:00Z", shutdown2) - - snap := &core.UsageSnapshot{ - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - DailySeries: make(map[string][]core.TimePoint), - } - - logs := p.readLogs(copilotDir, snap) - p.readSessions(copilotDir, snap, logs) - - // Verify that the session data is still correctly parsed (existing behavior). - if m := snap.Metrics["cli_messages"]; m.Used == nil || *m.Used != 2 { - t.Fatalf("cli_messages = %+v, want 2", m) - } - - // Verify total_sessions raw value accounts for both sessions. - if got := snap.Raw["total_sessions"]; got != "2" { - t.Fatalf("total_sessions = %q, want 2", got) - } -} - -func TestSessionShutdownDataParsing_NoModelMetrics(t *testing.T) { - body := `{ - "shutdownType": "crash", - "totalPremiumRequests": 3, - "totalApiDurationMs": 5000, - "codeChanges": {"linesAdded": 10, "linesRemoved": 2, "filesModified": 1} - }` - - var shutdown sessionShutdownData - if err := unmarshalJSON(body, &shutdown); err != nil { - t.Fatalf("unmarshal failed: %v", err) - } - - if shutdown.ShutdownType != "crash" { - t.Errorf("ShutdownType = %q, want %q", shutdown.ShutdownType, "crash") - } - if shutdown.TotalPremiumRequests != 3 { - t.Errorf("TotalPremiumRequests = %d, want 3", shutdown.TotalPremiumRequests) - } - if shutdown.CodeChanges.LinesAdded != 10 { - t.Errorf("CodeChanges.LinesAdded = %d, want 10", shutdown.CodeChanges.LinesAdded) - } - if shutdown.ModelMetrics != nil { - t.Errorf("expected nil ModelMetrics, got %v", shutdown.ModelMetrics) - } -} - -func TestAssistantUsageDataParsing(t *testing.T) { - body := `{ - "model": "claude-sonnet-4.5", - "inputTokens": 5200, - "outputTokens": 1800, - "cacheReadTokens": 3000, - "cacheWriteTokens": 500, - "cost": 0.042, - "duration": 2500, - "quotaSnapshots": { - "premium_interactions": { - "entitlementRequests": 300, - "usedRequests": 158, - "remainingPercentage": 47.3, - "resetDate": "2026-03-01T00:00:00Z" - } - } - }` - - var usage assistantUsageData - if err := unmarshalJSON(body, &usage); err != nil { - t.Fatalf("unmarshal failed: %v", err) - } - - if usage.Model != "claude-sonnet-4.5" { - t.Errorf("Model = %q, want %q", usage.Model, "claude-sonnet-4.5") - } - if usage.InputTokens != 5200 { - t.Errorf("InputTokens = %f, want 5200", usage.InputTokens) - } - if usage.OutputTokens != 1800 { - t.Errorf("OutputTokens = %f, want 1800", usage.OutputTokens) - } - if usage.CacheReadTokens != 3000 { - t.Errorf("CacheReadTokens = %f, want 3000", usage.CacheReadTokens) - } - if usage.CacheWriteTokens != 500 { - t.Errorf("CacheWriteTokens = %f, want 500", usage.CacheWriteTokens) - } - if usage.Cost != 0.042 { - t.Errorf("Cost = %f, want 0.042", usage.Cost) - } - if usage.Duration != 2500 { - t.Errorf("Duration = %d, want 2500", usage.Duration) - } - if len(usage.QuotaSnapshots) != 1 { - t.Fatalf("expected 1 quota snapshot, got %d", len(usage.QuotaSnapshots)) - } - - premium := usage.QuotaSnapshots["premium_interactions"] - if premium.EntitlementRequests != 300 { - t.Errorf("EntitlementRequests = %d, want 300", premium.EntitlementRequests) - } - if premium.UsedRequests != 158 { - t.Errorf("UsedRequests = %d, want 158", premium.UsedRequests) - } - if premium.RemainingPercentage != 47.3 { - t.Errorf("RemainingPercentage = %f, want 47.3", premium.RemainingPercentage) - } - if premium.ResetDate != "2026-03-01T00:00:00Z" { - t.Errorf("ResetDate = %q, want %q", premium.ResetDate, "2026-03-01T00:00:00Z") - } -} - -func TestAssistantUsageDataParsing_NoQuota(t *testing.T) { - body := `{ - "model": "gpt-5-mini", - "inputTokens": 1000, - "outputTokens": 500, - "cacheReadTokens": 0, - "cacheWriteTokens": 0, - "cost": 0.01, - "duration": 800 - }` - - var usage assistantUsageData - if err := unmarshalJSON(body, &usage); err != nil { - t.Fatalf("unmarshal failed: %v", err) - } - - if usage.Model != "gpt-5-mini" { - t.Errorf("Model = %q", usage.Model) - } - if usage.InputTokens != 1000 { - t.Errorf("InputTokens = %f, want 1000", usage.InputTokens) - } - if usage.OutputTokens != 500 { - t.Errorf("OutputTokens = %f, want 500", usage.OutputTokens) - } - if len(usage.QuotaSnapshots) != 0 { - t.Errorf("expected 0 quota snapshots, got %d", len(usage.QuotaSnapshots)) - } -} - -func TestReadSessions_AccumulatesUsageEvents(t *testing.T) { - p := New() - tmp := t.TempDir() - copilotDir := filepath.Join(tmp, ".copilot") - logDir := filepath.Join(copilotDir, "logs") - sessionDir := filepath.Join(copilotDir, "session-state") - if err := os.MkdirAll(logDir, 0o755); err != nil { - t.Fatalf("mkdir logs: %v", err) - } - if err := os.MkdirAll(sessionDir, 0o755); err != nil { - t.Fatalf("mkdir sessions: %v", err) - } - - logContent := strings.Join([]string{ - "2026-02-25T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", - "2026-02-25T10:00:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", - }, "\n") - if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { - t.Fatalf("write log: %v", err) - } - - mkSessionWithUsage := func(id, created, updated string, usageEvents []string) { - dir := filepath.Join(sessionDir, id) - if err := os.MkdirAll(dir, 0o755); err != nil { - t.Fatalf("mkdir %s: %v", id, err) - } - ws := strings.Join([]string{ - "id: " + id, - "repository: owner/repo", - "branch: main", - "created_at: " + created, - "updated_at: " + updated, - }, "\n") - if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { - t.Fatalf("write workspace %s: %v", id, err) - } - - baseEvents := []string{ - `{"type":"session.model_change","timestamp":"` + created + `","data":{"newModel":"claude-sonnet-4.5"}}`, - `{"type":"user.message","timestamp":"` + created + `","data":{"content":"hello"}}`, - `{"type":"assistant.turn_start","timestamp":"` + created + `","data":{"turnId":"0"}}`, - `{"type":"assistant.message","timestamp":"` + updated + `","data":{"content":"world","reasoningText":"r","toolRequests":[]}}`, - } - allEvents := append(baseEvents, usageEvents...) - if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(strings.Join(allEvents, "\n")), 0o644); err != nil { - t.Fatalf("write events %s: %v", id, err) - } - } - - usageEvent1 := `{"type":"assistant.usage","timestamp":"2026-02-25T10:05:00Z","data":{` + - `"model":"claude-sonnet-4.5","inputTokens":5200,"outputTokens":1800,` + - `"cacheReadTokens":3000,"cacheWriteTokens":500,"cost":0.042,"duration":2500,` + - `"quotaSnapshots":{"premium_interactions":{"entitlementRequests":300,"usedRequests":150,"remainingPercentage":50.0,"resetDate":"2026-03-01T00:00:00Z"}}}}` - - usageEvent2 := `{"type":"assistant.usage","timestamp":"2026-02-25T10:10:00Z","data":{` + - `"model":"claude-sonnet-4.5","inputTokens":3000,"outputTokens":1200,` + - `"cacheReadTokens":2000,"cacheWriteTokens":300,"cost":0.028,"duration":1800,` + - `"quotaSnapshots":{"premium_interactions":{"entitlementRequests":300,"usedRequests":152,"remainingPercentage":49.3,"resetDate":"2026-03-01T00:00:00Z"}}}}` - - usageEvent3 := `{"type":"assistant.usage","timestamp":"2026-02-25T10:15:00Z","data":{` + - `"model":"gpt-5-mini","inputTokens":1000,"outputTokens":500,` + - `"cacheReadTokens":0,"cacheWriteTokens":0,"cost":0.01,"duration":800}}` - - mkSessionWithUsage("s1", "2026-02-25T10:00:00Z", "2026-02-25T10:20:00Z", - []string{usageEvent1, usageEvent2, usageEvent3}) - - snap := &core.UsageSnapshot{ - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - DailySeries: make(map[string][]core.TimePoint), - } - - logs := p.readLogs(copilotDir, snap) - p.readSessions(copilotDir, snap, logs) - - // Verify that existing session behavior still works. - if m := snap.Metrics["cli_messages"]; m.Used == nil || *m.Used != 1 { - t.Fatalf("cli_messages = %+v, want 1", m) - } - if got := snap.Raw["total_sessions"]; got != "1" { - t.Fatalf("total_sessions = %q, want 1", got) - } - - // The usage data is accumulated internally but not yet emitted as metrics - // (that is Task 5). This test verifies the parsing does not break existing - // behavior and that the events are parsed without errors. - // We verify by checking the session still has correct model and timestamps. - if got := snap.Raw["last_session_model"]; got != "claude-sonnet-4.5" { - t.Fatalf("last_session_model = %q, want claude-sonnet-4.5", got) - } -} - -func TestReadSessions_UsageEventsMultipleSessions(t *testing.T) { - p := New() - tmp := t.TempDir() - copilotDir := filepath.Join(tmp, ".copilot") - logDir := filepath.Join(copilotDir, "logs") - sessionDir := filepath.Join(copilotDir, "session-state") - if err := os.MkdirAll(logDir, 0o755); err != nil { - t.Fatalf("mkdir logs: %v", err) - } - if err := os.MkdirAll(sessionDir, 0o755); err != nil { - t.Fatalf("mkdir sessions: %v", err) - } - - logContent := strings.Join([]string{ - "2026-02-25T10:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", - "2026-02-25T10:00:01.000Z [INFO] CompactionProcessor: Utilization 1.0% (1200/128000 tokens) below threshold 80%", - "2026-02-25T14:00:00.000Z [INFO] Workspace initialized: s2 (checkpoints: 0)", - "2026-02-25T14:00:01.000Z [INFO] CompactionProcessor: Utilization 0.7% (900/128000 tokens) below threshold 80%", - }, "\n") - if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { - t.Fatalf("write log: %v", err) - } - - mkSession := func(id, model, created, updated string, usageEvents []string) { - dir := filepath.Join(sessionDir, id) - if err := os.MkdirAll(dir, 0o755); err != nil { - t.Fatalf("mkdir %s: %v", id, err) - } - ws := strings.Join([]string{ - "id: " + id, - "repository: owner/repo", - "branch: main", - "created_at: " + created, - "updated_at: " + updated, - }, "\n") - if err := os.WriteFile(filepath.Join(dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { - t.Fatalf("write workspace %s: %v", id, err) - } - - baseEvents := []string{ - `{"type":"session.model_change","timestamp":"` + created + `","data":{"newModel":"` + model + `"}}`, - `{"type":"user.message","timestamp":"` + created + `","data":{"content":"hello"}}`, - `{"type":"assistant.turn_start","timestamp":"` + created + `","data":{"turnId":"0"}}`, - `{"type":"assistant.message","timestamp":"` + updated + `","data":{"content":"reply","reasoningText":"","toolRequests":[]}}`, - } - allEvents := append(baseEvents, usageEvents...) - if err := os.WriteFile(filepath.Join(dir, "events.jsonl"), []byte(strings.Join(allEvents, "\n")), 0o644); err != nil { - t.Fatalf("write events %s: %v", id, err) - } - } - - s1Usage := []string{ - `{"type":"assistant.usage","timestamp":"2026-02-25T10:05:00Z","data":{"model":"claude-sonnet-4.5","inputTokens":5200,"outputTokens":1800,"cacheReadTokens":3000,"cacheWriteTokens":500,"cost":0.042,"duration":2500}}`, - `{"type":"assistant.usage","timestamp":"2026-02-25T10:10:00Z","data":{"model":"claude-sonnet-4.5","inputTokens":3000,"outputTokens":1200,"cacheReadTokens":2000,"cacheWriteTokens":300,"cost":0.028,"duration":1800}}`, - } - - s2Usage := []string{ - `{"type":"assistant.usage","timestamp":"2026-02-25T14:05:00Z","data":{"model":"gpt-5-mini","inputTokens":1000,"outputTokens":500,"cacheReadTokens":0,"cacheWriteTokens":0,"cost":0.01,"duration":800}}`, - } - - mkSession("s1", "claude-sonnet-4.5", "2026-02-25T10:00:00Z", "2026-02-25T10:20:00Z", s1Usage) - mkSession("s2", "gpt-5-mini", "2026-02-25T14:00:00Z", "2026-02-25T14:10:00Z", s2Usage) - - snap := &core.UsageSnapshot{ - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - DailySeries: make(map[string][]core.TimePoint), - } - - logs := p.readLogs(copilotDir, snap) - p.readSessions(copilotDir, snap, logs) - - // Verify existing behavior is preserved. - if m := snap.Metrics["cli_messages"]; m.Used == nil || *m.Used != 2 { - t.Fatalf("cli_messages = %+v, want 2", m) - } - if got := snap.Raw["total_sessions"]; got != "2" { - t.Fatalf("total_sessions = %q, want 2", got) - } - - // The latest session (s2 at 14:10) should be shown as last. - if got := snap.Raw["last_session_model"]; got != "gpt-5-mini" { - t.Fatalf("last_session_model = %q, want gpt-5-mini", got) - } -} - -func TestExtractCopilotToolPathsAndLanguage(t *testing.T) { - raw := json.RawMessage(`{"name":"read_file","args":{"path":"internal/providers/copilot/copilot.go"}}`) - paths := extractCopilotToolPaths(raw) - if len(paths) != 1 || paths[0] != "internal/providers/copilot/copilot.go" { - t.Fatalf("extractCopilotToolPaths = %v", paths) - } - if lang := inferCopilotLanguageFromPath(paths[0]); lang != "go" { - t.Fatalf("inferCopilotLanguageFromPath = %q, want go", lang) - } -} - -func TestReadSessions_ExtractsLanguageAndCodeStatsMetrics(t *testing.T) { - p := New() - tmp := t.TempDir() - copilotDir := filepath.Join(tmp, ".copilot") - logDir := filepath.Join(copilotDir, "logs") - sessionDir := filepath.Join(copilotDir, "session-state") - if err := os.MkdirAll(logDir, 0o755); err != nil { - t.Fatalf("mkdir logs: %v", err) - } - if err := os.MkdirAll(sessionDir, 0o755); err != nil { - t.Fatalf("mkdir sessions: %v", err) - } - - logContent := strings.Join([]string{ - "2026-02-25T14:00:00.000Z [INFO] Workspace initialized: s1 (checkpoints: 0)", - "2026-02-25T14:00:01.000Z [INFO] CompactionProcessor: Utilization 1.1% (1400/128000 tokens) below threshold 80%", - }, "\n") - if err := os.WriteFile(filepath.Join(logDir, "process.log"), []byte(logContent), 0o644); err != nil { - t.Fatalf("write log: %v", err) - } - - s1Dir := filepath.Join(sessionDir, "s1") - if err := os.MkdirAll(s1Dir, 0o755); err != nil { - t.Fatalf("mkdir s1: %v", err) - } - ws := strings.Join([]string{ - "id: s1", - "repository: owner/repo", - "branch: main", - "created_at: 2026-02-25T14:00:00Z", - "updated_at: 2026-02-25T14:10:00Z", - }, "\n") - if err := os.WriteFile(filepath.Join(s1Dir, "workspace.yaml"), []byte(ws), 0o644); err != nil { - t.Fatalf("write workspace: %v", err) - } - - events := strings.Join([]string{ - `{"type":"session.model_change","timestamp":"2026-02-25T14:00:00Z","data":{"newModel":"claude-sonnet-4.6"}}`, - `{"type":"user.message","timestamp":"2026-02-25T14:00:01Z","data":{"content":"patch code"}}`, - `{"type":"assistant.turn_start","timestamp":"2026-02-25T14:00:02Z","data":{"turnId":"0"}}`, - `{"type":"assistant.message","timestamp":"2026-02-25T14:00:03Z","data":{"content":"done","reasoningText":"","toolRequests":[{"name":"read_file","args":{"path":"internal/providers/copilot/copilot.go"}},{"name":"edit_file","args":{"filePath":"internal/providers/copilot/widget.go","old_string":"a\nb","new_string":"a\nb\nc"}},{"name":"run_terminal","args":{"command":"git commit -m \"copilot metrics\""}}]}}`, - }, "\n") - if err := os.WriteFile(filepath.Join(s1Dir, "events.jsonl"), []byte(events), 0o644); err != nil { - t.Fatalf("write events: %v", err) - } - - snap := &core.UsageSnapshot{ - Metrics: make(map[string]core.Metric), - Resets: make(map[string]time.Time), - Raw: make(map[string]string), - DailySeries: make(map[string][]core.TimePoint), - } - - logs := p.readLogs(copilotDir, snap) - p.readSessions(copilotDir, snap, logs) - - if m := snap.Metrics["lang_go"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("lang_go missing/zero: %+v", m) - } - if m := snap.Metrics["composer_lines_added"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("composer_lines_added missing/zero: %+v", m) - } - if m := snap.Metrics["composer_lines_removed"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("composer_lines_removed missing/zero: %+v", m) - } - if m := snap.Metrics["composer_files_changed"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("composer_files_changed missing/zero: %+v", m) - } - if m := snap.Metrics["scored_commits"]; m.Used == nil || *m.Used <= 0 { - t.Fatalf("scored_commits missing/zero: %+v", m) - } - if m := snap.Metrics["total_prompts"]; m.Used == nil || *m.Used != 1 { - t.Fatalf("total_prompts = %+v, want 1", m) - } - if m := snap.Metrics["tool_calls_total"]; m.Used == nil || *m.Used != 3 { - t.Fatalf("tool_calls_total = %+v, want 3", m) - } -} - -func TestDetectCopilotVersion_FallbackToStandalone(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("test uses shell scripts") - } - - tmp := t.TempDir() - ghBin := writeTestExe(t, tmp, "gh", ` -if [ "$1" = "copilot" ] && [ "$2" = "--version" ]; then - echo "gh: unknown command copilot" >&2 - exit 1 -fi -exit 1 -`) - copilotBin := writeTestExe(t, tmp, "copilot", ` -if [ "$1" = "--version" ]; then - echo "copilot 1.2.3" - exit 0 -fi -exit 1 -`) - - version, source, err := detectCopilotVersion(context.Background(), ghBin, copilotBin) - if err != nil { - t.Fatalf("detectCopilotVersion() error: %v", err) - } - if version != "copilot 1.2.3" { - t.Fatalf("version = %q, want %q", version, "copilot 1.2.3") - } - if source != "copilot" { - t.Fatalf("source = %q, want %q", source, "copilot") - } -} - -func TestFetch_FallsBackToStandaloneCopilotWhenGHCopilotUnavailable(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("test uses shell scripts") - } - - tmp := t.TempDir() - configDir := filepath.Join(t.TempDir(), ".copilot") - if err := os.MkdirAll(configDir, 0o755); err != nil { - t.Fatalf("mkdir config dir: %v", err) - } - - ghBin := writeTestExe(t, tmp, "gh", ` -if [ "$1" = "copilot" ] && [ "$2" = "--version" ]; then - echo "gh: unknown command copilot" >&2 - exit 1 -fi -if [ "$1" = "auth" ] && [ "$2" = "status" ]; then - echo "Logged in to github.com as octocat" - exit 0 -fi -if [ "$1" = "api" ]; then - endpoint="" - for arg in "$@"; do endpoint="$arg"; done - case "$endpoint" in - "/user") - echo '{"login":"octocat","name":"Octo Cat","plan":{"name":"free"}}' - exit 0 - ;; - "/copilot_internal/user") - echo '{"login":"octocat","access_type_sku":"copilot_pro","copilot_plan":"individual","chat_enabled":true,"is_mcp_enabled":false,"organization_login_list":[],"organization_list":[]}' - exit 0 - ;; - "/rate_limit") - echo '{"resources":{"core":{"limit":5000,"remaining":4999,"reset":2000000000,"used":1}}}' - exit 0 - ;; - esac -fi -echo "unsupported gh args: $*" >&2 -exit 1 -`) - - copilotBin := writeTestExe(t, tmp, "copilot", ` -if [ "$1" = "--version" ]; then - echo "copilot 1.2.3" - exit 0 -fi -exit 1 -`) - - p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "copilot", - Provider: "copilot", - Auth: "cli", - Binary: ghBin, - ExtraData: map[string]string{ - "copilot_binary": copilotBin, - "config_dir": configDir, - }, - }) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - if snap.Status == core.StatusError || snap.Status == core.StatusAuth { - t.Fatalf("Status = %q, want non-error/auth fallback", snap.Status) - } - if snap.Raw["copilot_version"] != "copilot 1.2.3" { - t.Fatalf("copilot_version = %q, want %q", snap.Raw["copilot_version"], "copilot 1.2.3") - } - if snap.Raw["copilot_version_source"] != "copilot" { - t.Fatalf("copilot_version_source = %q, want %q", snap.Raw["copilot_version_source"], "copilot") - } - if !strings.Contains(snap.Raw["auth_status"], "Logged in") { - t.Fatalf("auth_status = %q, want GitHub auth output", snap.Raw["auth_status"]) - } - if snap.Raw["github_login"] != "octocat" { - t.Fatalf("github_login = %q, want %q", snap.Raw["github_login"], "octocat") - } -} - -func TestFetch_StandaloneCopilotWithoutGH(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("test uses shell scripts") - } - - tmp := t.TempDir() - configDir := filepath.Join(t.TempDir(), ".copilot") - if err := os.MkdirAll(configDir, 0o755); err != nil { - t.Fatalf("mkdir config dir: %v", err) - } - - copilotBin := writeTestExe(t, tmp, "copilot", ` -if [ "$1" = "--version" ]; then - echo "copilot 2.0.0" - exit 0 -fi -exit 1 -`) - t.Setenv("PATH", tmp) - - p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "copilot", - Provider: "copilot", - Auth: "cli", - Binary: copilotBin, - ExtraData: map[string]string{ - "config_dir": configDir, - }, - }) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - if snap.Status != core.StatusOK { - t.Fatalf("Status = %q, want %q", snap.Status, core.StatusOK) - } - if snap.Raw["copilot_version"] != "copilot 2.0.0" { - t.Fatalf("copilot_version = %q, want %q", snap.Raw["copilot_version"], "copilot 2.0.0") - } - if snap.Raw["copilot_version_source"] != "copilot" { - t.Fatalf("copilot_version_source = %q, want %q", snap.Raw["copilot_version_source"], "copilot") - } - if !strings.Contains(snap.Raw["auth_status"], "skipped GitHub API checks") { - t.Fatalf("auth_status = %q, want skipped GH API message", snap.Raw["auth_status"]) - } -} - -func writeTestExe(t *testing.T, dir, name, body string) string { - t.Helper() - path := filepath.Join(dir, name) - script := "#!/bin/sh\n" + strings.TrimSpace(body) + "\n" - if err := os.WriteFile(path, []byte(script), 0o755); err != nil { - t.Fatalf("write executable %s: %v", name, err) - } - return path -} - -func unmarshalJSON(s string, v interface{}) error { - return json.Unmarshal([]byte(s), v) -} - -func boolPtr(v bool) *bool { return &v } diff --git a/internal/providers/copilot/local_config.go b/internal/providers/copilot/local_config.go new file mode 100644 index 0000000..bdd03f0 --- /dev/null +++ b/internal/providers/copilot/local_config.go @@ -0,0 +1,29 @@ +package copilot + +import ( + "encoding/json" + "os" + "path/filepath" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func (p *Provider) readConfig(copilotDir string, snap *core.UsageSnapshot) { + data, err := os.ReadFile(filepath.Join(copilotDir, "config.json")) + if err != nil { + return + } + var cfg copilotConfig + if json.Unmarshal(data, &cfg) != nil { + return + } + if cfg.Model != "" { + snap.Raw["preferred_model"] = cfg.Model + } + if cfg.ReasoningEffort != "" { + snap.Raw["reasoning_effort"] = cfg.ReasoningEffort + } + if cfg.Experimental { + snap.Raw["experimental"] = "enabled" + } +} diff --git a/internal/providers/copilot/local_data.go b/internal/providers/copilot/local_data.go index 10498cb..a27ef9e 100644 --- a/internal/providers/copilot/local_data.go +++ b/internal/providers/copilot/local_data.go @@ -12,188 +12,6 @@ import ( "github.com/janekbaraniewski/openusage/internal/core" ) -func (p *Provider) readConfig(copilotDir string, snap *core.UsageSnapshot) { - data, err := os.ReadFile(filepath.Join(copilotDir, "config.json")) - if err != nil { - return - } - var cfg copilotConfig - if json.Unmarshal(data, &cfg) != nil { - return - } - if cfg.Model != "" { - snap.Raw["preferred_model"] = cfg.Model - } - if cfg.ReasoningEffort != "" { - snap.Raw["reasoning_effort"] = cfg.ReasoningEffort - } - if cfg.Experimental { - snap.Raw["experimental"] = "enabled" - } -} - -type logSummary struct { - DefaultModel string - SessionTokens map[string]logTokenEntry - SessionBurn map[string]float64 -} - -func (p *Provider) readLogs(copilotDir string, snap *core.UsageSnapshot) logSummary { - ls := logSummary{ - SessionTokens: make(map[string]logTokenEntry), - SessionBurn: make(map[string]float64), - } - sessionEntries := make(map[string][]logTokenEntry) - logDir := filepath.Join(copilotDir, "logs") - entries, err := os.ReadDir(logDir) - if err != nil { - return ls - } - - var allTokenEntries []logTokenEntry - - for _, entry := range entries { - if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".log") { - continue - } - data, err := os.ReadFile(filepath.Join(logDir, entry.Name())) - if err != nil { - continue - } - - var currentSessionID string - for _, line := range strings.Split(string(data), "\n") { - line = strings.TrimSpace(line) - - if strings.Contains(line, "Workspace initialized:") { - if idx := strings.Index(line, "Workspace initialized:"); idx >= 0 { - rest := strings.TrimSpace(line[idx+len("Workspace initialized:"):]) - if spIdx := strings.Index(rest, " "); spIdx > 0 { - currentSessionID = rest[:spIdx] - } else if rest != "" { - currentSessionID = rest - } - } - } - - if strings.Contains(line, "Using default model:") { - if idx := strings.Index(line, "Using default model:"); idx >= 0 { - m := strings.TrimSpace(line[idx+len("Using default model:"):]) - if m != "" { - ls.DefaultModel = m - } - } - } - - if strings.Contains(line, "CompactionProcessor: Utilization") { - te := parseCompactionLine(line) - if te.Total > 0 { - allTokenEntries = append(allTokenEntries, te) - if currentSessionID != "" { - sessionEntries[currentSessionID] = append(sessionEntries[currentSessionID], te) - } - } - } - } - } - - if ls.DefaultModel != "" { - snap.Raw["default_model"] = ls.DefaultModel - } - - for sessionID, entries := range sessionEntries { - sortCompactionEntries(entries) - last := entries[len(entries)-1] - ls.SessionTokens[sessionID] = last - - burn := 0.0 - for idx, te := range entries { - if idx == 0 { - if te.Used > 0 { - burn += float64(te.Used) - } - continue - } - delta := te.Used - entries[idx-1].Used - if delta > 0 { - burn += float64(delta) - } - } - if burn > 0 { - ls.SessionBurn[sessionID] = burn - } - } - - if last, ok := newestCompactionEntry(allTokenEntries); ok { - snap.Raw["context_window_tokens"] = fmt.Sprintf("%d/%d", last.Used, last.Total) - pct := float64(last.Used) / float64(last.Total) * 100 - snap.Raw["context_window_pct"] = fmt.Sprintf("%.1f%%", pct) - used := float64(last.Used) - limit := float64(last.Total) - snap.Metrics["context_window"] = core.Metric{ - Limit: &limit, - Used: &used, - Remaining: core.Float64Ptr(limit - used), - Unit: "tokens", - Window: "session", - } - } - - return ls -} - -type assistantMsgData struct { - Content string `json:"content"` - ReasoningTxt string `json:"reasoningText"` - ToolRequests json.RawMessage `json:"toolRequests"` -} - -type quotaSnapshotEntry struct { - EntitlementRequests int `json:"entitlementRequests"` - UsedRequests int `json:"usedRequests"` - RemainingPercentage float64 `json:"remainingPercentage"` - ResetDate string `json:"resetDate"` -} - -type assistantUsageData struct { - Model string `json:"model"` - InputTokens float64 `json:"inputTokens"` - OutputTokens float64 `json:"outputTokens"` - CacheReadTokens float64 `json:"cacheReadTokens"` - CacheWriteTokens float64 `json:"cacheWriteTokens"` - Cost float64 `json:"cost"` - Duration int64 `json:"duration"` - QuotaSnapshots map[string]quotaSnapshotEntry `json:"quotaSnapshots"` -} - -type sessionShutdownData struct { - ShutdownType string `json:"shutdownType"` - TotalPremiumRequests int `json:"totalPremiumRequests"` - TotalAPIDurationMs int64 `json:"totalApiDurationMs"` - SessionStartTime string `json:"sessionStartTime"` - CodeChanges shutdownCodeChanges `json:"codeChanges"` - ModelMetrics map[string]shutdownModelMetric `json:"modelMetrics"` -} - -type shutdownCodeChanges struct { - LinesAdded int `json:"linesAdded"` - LinesRemoved int `json:"linesRemoved"` - FilesModified int `json:"filesModified"` -} - -type shutdownModelMetric struct { - Requests struct { - Count int `json:"count"` - Cost float64 `json:"cost"` - } `json:"requests"` - Usage struct { - InputTokens float64 `json:"inputTokens"` - OutputTokens float64 `json:"outputTokens"` - CacheReadTokens float64 `json:"cacheReadTokens"` - CacheWriteTokens float64 `json:"cacheWriteTokens"` - } `json:"usage"` -} - func (p *Provider) readSessions(copilotDir string, snap *core.UsageSnapshot, logs logSummary) { sessionDir := filepath.Join(copilotDir, "session-state") entries, err := os.ReadDir(sessionDir) diff --git a/internal/providers/copilot/local_logs.go b/internal/providers/copilot/local_logs.go new file mode 100644 index 0000000..a2aaf39 --- /dev/null +++ b/internal/providers/copilot/local_logs.go @@ -0,0 +1,120 @@ +package copilot + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +type logSummary struct { + DefaultModel string + SessionTokens map[string]logTokenEntry + SessionBurn map[string]float64 +} + +func (p *Provider) readLogs(copilotDir string, snap *core.UsageSnapshot) logSummary { + ls := logSummary{ + SessionTokens: make(map[string]logTokenEntry), + SessionBurn: make(map[string]float64), + } + sessionEntries := make(map[string][]logTokenEntry) + logDir := filepath.Join(copilotDir, "logs") + entries, err := os.ReadDir(logDir) + if err != nil { + return ls + } + + var allTokenEntries []logTokenEntry + + for _, entry := range entries { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".log") { + continue + } + data, err := os.ReadFile(filepath.Join(logDir, entry.Name())) + if err != nil { + continue + } + + var currentSessionID string + for _, line := range strings.Split(string(data), "\n") { + line = strings.TrimSpace(line) + + if strings.Contains(line, "Workspace initialized:") { + if idx := strings.Index(line, "Workspace initialized:"); idx >= 0 { + rest := strings.TrimSpace(line[idx+len("Workspace initialized:"):]) + if spIdx := strings.Index(rest, " "); spIdx > 0 { + currentSessionID = rest[:spIdx] + } else if rest != "" { + currentSessionID = rest + } + } + } + + if strings.Contains(line, "Using default model:") { + if idx := strings.Index(line, "Using default model:"); idx >= 0 { + m := strings.TrimSpace(line[idx+len("Using default model:"):]) + if m != "" { + ls.DefaultModel = m + } + } + } + + if strings.Contains(line, "CompactionProcessor: Utilization") { + te := parseCompactionLine(line) + if te.Total > 0 { + allTokenEntries = append(allTokenEntries, te) + if currentSessionID != "" { + sessionEntries[currentSessionID] = append(sessionEntries[currentSessionID], te) + } + } + } + } + } + + if ls.DefaultModel != "" { + snap.Raw["default_model"] = ls.DefaultModel + } + + for sessionID, entries := range sessionEntries { + sortCompactionEntries(entries) + last := entries[len(entries)-1] + ls.SessionTokens[sessionID] = last + + burn := 0.0 + for idx, te := range entries { + if idx == 0 { + if te.Used > 0 { + burn += float64(te.Used) + } + continue + } + delta := te.Used - entries[idx-1].Used + if delta > 0 { + burn += float64(delta) + } + } + if burn > 0 { + ls.SessionBurn[sessionID] = burn + } + } + + if last, ok := newestCompactionEntry(allTokenEntries); ok { + snap.Raw["context_window_tokens"] = fmt.Sprintf("%d/%d", last.Used, last.Total) + pct := float64(last.Used) / float64(last.Total) * 100 + snap.Raw["context_window_pct"] = fmt.Sprintf("%.1f%%", pct) + used := float64(last.Used) + limit := float64(last.Total) + snap.Metrics["context_window"] = core.Metric{ + Limit: &limit, + Used: &used, + Remaining: core.Float64Ptr(limit - used), + Unit: "tokens", + Window: "session", + } + } + + return ls +} diff --git a/internal/providers/copilot/local_types.go b/internal/providers/copilot/local_types.go new file mode 100644 index 0000000..a4df328 --- /dev/null +++ b/internal/providers/copilot/local_types.go @@ -0,0 +1,55 @@ +package copilot + +import "encoding/json" + +type assistantMsgData struct { + Content string `json:"content"` + ReasoningTxt string `json:"reasoningText"` + ToolRequests json.RawMessage `json:"toolRequests"` +} + +type quotaSnapshotEntry struct { + EntitlementRequests int `json:"entitlementRequests"` + UsedRequests int `json:"usedRequests"` + RemainingPercentage float64 `json:"remainingPercentage"` + ResetDate string `json:"resetDate"` +} + +type assistantUsageData struct { + Model string `json:"model"` + InputTokens float64 `json:"inputTokens"` + OutputTokens float64 `json:"outputTokens"` + CacheReadTokens float64 `json:"cacheReadTokens"` + CacheWriteTokens float64 `json:"cacheWriteTokens"` + Cost float64 `json:"cost"` + Duration int64 `json:"duration"` + QuotaSnapshots map[string]quotaSnapshotEntry `json:"quotaSnapshots"` +} + +type sessionShutdownData struct { + ShutdownType string `json:"shutdownType"` + TotalPremiumRequests int `json:"totalPremiumRequests"` + TotalAPIDurationMs int64 `json:"totalApiDurationMs"` + SessionStartTime string `json:"sessionStartTime"` + CodeChanges shutdownCodeChanges `json:"codeChanges"` + ModelMetrics map[string]shutdownModelMetric `json:"modelMetrics"` +} + +type shutdownCodeChanges struct { + LinesAdded int `json:"linesAdded"` + LinesRemoved int `json:"linesRemoved"` + FilesModified int `json:"filesModified"` +} + +type shutdownModelMetric struct { + Requests struct { + Count int `json:"count"` + Cost float64 `json:"cost"` + } `json:"requests"` + Usage struct { + InputTokens float64 `json:"inputTokens"` + OutputTokens float64 `json:"outputTokens"` + CacheReadTokens float64 `json:"cacheReadTokens"` + CacheWriteTokens float64 `json:"cacheWriteTokens"` + } `json:"usage"` +} diff --git a/internal/providers/copilot/telemetry_session_file.go b/internal/providers/copilot/telemetry_session_file.go index 0d62e4c..2d628f0 100644 --- a/internal/providers/copilot/telemetry_session_file.go +++ b/internal/providers/copilot/telemetry_session_file.go @@ -712,432 +712,3 @@ func copilotTelemetryMessageID(sessionID string, lineNum int, messageID, fallbac } return fmt.Sprintf("%s:%d", sessionID, lineNum) } - -func parseCopilotTelemetryToolRequest(raw json.RawMessage) (copilotTelemetryToolRequest, bool) { - var reqMap map[string]any - if json.Unmarshal(raw, &reqMap) != nil { - return copilotTelemetryToolRequest{}, false - } - - out := copilotTelemetryToolRequest{ - ToolCallID: strings.TrimSpace(anyToString(reqMap["toolCallId"])), - RawName: core.FirstNonEmpty(anyToString(reqMap["name"]), anyToString(reqMap["toolName"]), anyToString(reqMap["tool"])), - } - if out.RawName == "" { - out.RawName = extractCopilotToolName(raw) - } - for _, key := range []string{"arguments", "args", "input"} { - if value, ok := reqMap[key]; ok && out.Input == nil { - out.Input = decodeCopilotTelemetryJSONAny(value) - } - } - return out, true -} - -func normalizeCopilotTelemetryToolName(raw string) (string, map[string]any) { - meta := map[string]any{} - name := strings.TrimSpace(raw) - if name == "" { - return "unknown", meta - } - meta["tool_name_raw"] = name - if server, function, ok := parseCopilotTelemetryMCPTool(name); ok { - meta["tool_type"] = "mcp" - meta["mcp_server"] = server - meta["mcp_function"] = function - return "mcp__" + server + "__" + function, meta - } - return sanitizeMetricName(name), meta -} - -func parseCopilotTelemetryMCPTool(raw string) (string, string, bool) { - normalized := strings.ToLower(strings.TrimSpace(raw)) - if normalized == "" { - return "", "", false - } - for _, marker := range []string{"_mcp_server_", "-mcp-server-"} { - if parts := strings.SplitN(normalized, marker, 2); len(parts) == 2 { - server := sanitizeCopilotMCPSegment(parts[0]) - function := sanitizeCopilotMCPSegment(parts[1]) - if server != "" && function != "" { - return server, function, true - } - } - } - if strings.HasPrefix(normalized, "mcp__") { - parts := strings.SplitN(strings.TrimPrefix(normalized, "mcp__"), "__", 2) - if len(parts) == 2 { - server := sanitizeCopilotMCPSegment(parts[0]) - function := sanitizeCopilotMCPSegment(parts[1]) - if server != "" && function != "" { - return server, function, true - } - } - } - if strings.HasPrefix(normalized, "mcp-") || strings.HasPrefix(normalized, "mcp_") { - canonical := normalizeCopilotCursorStyleMCPName(normalized) - if strings.HasPrefix(canonical, "mcp__") { - parts := strings.SplitN(strings.TrimPrefix(canonical, "mcp__"), "__", 2) - if len(parts) == 2 { - server := sanitizeCopilotMCPSegment(parts[0]) - function := sanitizeCopilotMCPSegment(parts[1]) - if server != "" && function != "" { - return server, function, true - } - } - } - } - if strings.HasSuffix(normalized, " (mcp)") { - body := strings.TrimSpace(strings.TrimSuffix(normalized, " (mcp)")) - body = strings.TrimPrefix(body, "user-") - if body == "" { - return "", "", false - } - if idx := findCopilotTelemetryServerFunctionSplit(body); idx > 0 { - server := sanitizeCopilotMCPSegment(body[:idx]) - function := sanitizeCopilotMCPSegment(body[idx+1:]) - if server != "" && function != "" { - return server, function, true - } - } - return "other", sanitizeCopilotMCPSegment(body), true - } - return "", "", false -} - -func normalizeCopilotCursorStyleMCPName(name string) string { - if strings.HasPrefix(name, "mcp-") { - rest := name[4:] - parts := strings.SplitN(rest, "-user-", 2) - if len(parts) == 2 { - server := parts[0] - afterUser := parts[1] - serverDash := server + "-" - if strings.HasPrefix(afterUser, serverDash) { - return "mcp__" + server + "__" + afterUser[len(serverDash):] - } - if idx := strings.LastIndex(afterUser, "-"); idx > 0 { - return "mcp__" + server + "__" + afterUser[idx+1:] - } - return "mcp__" + server + "__" + afterUser - } - if idx := strings.Index(rest, "-"); idx > 0 { - return "mcp__" + rest[:idx] + "__" + rest[idx+1:] - } - return "mcp__" + rest + "__" - } - if strings.HasPrefix(name, "mcp_") { - rest := name[4:] - if idx := strings.Index(rest, "_"); idx > 0 { - return "mcp__" + rest[:idx] + "__" + rest[idx+1:] - } - return "mcp__" + rest + "__" - } - return name -} - -func findCopilotTelemetryServerFunctionSplit(s string) int { - best := -1 - for i := 0; i < len(s); i++ { - if s[i] == '-' && strings.Contains(s[i+1:], "_") { - best = i - } - } - return best -} - -func sanitizeCopilotMCPSegment(raw string) string { - raw = strings.ToLower(strings.TrimSpace(raw)) - if raw == "" { - return "" - } - var b strings.Builder - lastUnderscore := false - for _, r := range raw { - switch { - case r >= 'a' && r <= 'z', r >= '0' && r <= '9', r == '_', r == '-': - b.WriteRune(r) - lastUnderscore = false - default: - if !lastUnderscore { - b.WriteByte('_') - lastUnderscore = true - } - } - } - return strings.Trim(b.String(), "_") -} - -func copilotTelemetryToolStatus(success *bool, statusRaw, errorCode, errorMessage string) shared.TelemetryStatus { - if success != nil { - if *success { - return shared.TelemetryStatusOK - } - if copilotTelemetryLooksAborted(errorCode, errorMessage, statusRaw) { - return shared.TelemetryStatusAborted - } - return shared.TelemetryStatusError - } - switch strings.ToLower(strings.TrimSpace(statusRaw)) { - case "ok", "success", "succeeded", "completed", "complete": - return shared.TelemetryStatusOK - case "aborted", "cancelled", "canceled", "denied": - return shared.TelemetryStatusAborted - case "error", "failed", "failure": - return shared.TelemetryStatusError - } - if errorCode != "" || errorMessage != "" { - if copilotTelemetryLooksAborted(errorCode, errorMessage, statusRaw) { - return shared.TelemetryStatusAborted - } - return shared.TelemetryStatusError - } - return shared.TelemetryStatusUnknown -} - -func copilotTelemetryLooksAborted(parts ...string) bool { - for _, part := range parts { - lower := strings.ToLower(strings.TrimSpace(part)) - if lower == "" { - continue - } - if strings.Contains(lower, "denied") || strings.Contains(lower, "cancel") || strings.Contains(lower, "abort") || strings.Contains(lower, "rejected") || strings.Contains(lower, "user initiated") { - return true - } - } - return false -} - -func summarizeCopilotTelemetryResult(raw json.RawMessage) map[string]any { - if len(strings.TrimSpace(string(raw))) == 0 { - return nil - } - decoded := decodeCopilotTelemetryJSONAny(raw) - if decoded == nil { - return nil - } - payload := map[string]any{} - if paths := shared.ExtractFilePathsFromPayload(decoded); len(paths) > 0 { - payload["result_file"] = paths[0] - } - switch value := decoded.(type) { - case map[string]any: - if content := anyToString(value["content"]); content != "" { - payload["result_chars"] = len(content) - if added, removed := countCopilotTelemetryUnifiedDiff(content); added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - if detailed := anyToString(value["detailedContent"]); detailed != "" { - payload["result_detailed_chars"] = len(detailed) - if _, ok := payload["lines_added"]; !ok { - if added, removed := countCopilotTelemetryUnifiedDiff(detailed); added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - } - if msg := anyToString(value["message"]); msg != "" { - payload["result_message"] = truncate(msg, 240) - } - case string: - if value != "" { - payload["result_chars"] = len(value) - if added, removed := countCopilotTelemetryUnifiedDiff(value); added > 0 || removed > 0 { - payload["lines_added"] = added - payload["lines_removed"] = removed - } - } - } - if len(payload) == 0 { - return nil - } - return payload -} - -func countCopilotTelemetryUnifiedDiff(raw string) (int, int) { - raw = strings.TrimSpace(raw) - if raw == "" || (!strings.Contains(raw, "diff --git") && !strings.Contains(raw, "\n@@")) { - return 0, 0 - } - added, removed := 0, 0 - for _, line := range strings.Split(raw, "\n") { - switch { - case strings.HasPrefix(line, "+++"), strings.HasPrefix(line, "---"), strings.HasPrefix(line, "@@"): - case strings.HasPrefix(line, "+"): - added++ - case strings.HasPrefix(line, "-"): - removed++ - } - } - return added, removed -} - -func summarizeCopilotTelemetryError(raw json.RawMessage) (string, string) { - if len(strings.TrimSpace(string(raw))) == 0 { - return "", "" - } - decoded := decodeCopilotTelemetryJSONAny(raw) - if decoded == nil { - return "", "" - } - switch value := decoded.(type) { - case map[string]any: - return strings.TrimSpace(anyToString(value["code"])), strings.TrimSpace(anyToString(value["message"])) - case string: - return "", strings.TrimSpace(value) - default: - return "", strings.TrimSpace(anyToString(decoded)) - } -} - -func copilotTelemetryBasePayload(path string, line int, client, repo, cwd, event string) map[string]any { - payload := map[string]any{ - "source_file": path, - "line": line, - "event": event, - "client": client, - "upstream_provider": "github", - } - if strings.TrimSpace(repo) != "" { - payload["repository"] = strings.TrimSpace(repo) - } - if strings.TrimSpace(cwd) != "" { - payload["cwd"] = strings.TrimSpace(cwd) - } - return payload -} - -func copyCopilotTelemetryPayload(in map[string]any) map[string]any { - if len(in) == 0 { - return nil - } - out := make(map[string]any, len(in)) - for key, value := range in { - out[key] = value - } - return out -} - -func decodeCopilotTelemetryJSONAny(raw any) any { - switch value := raw.(type) { - case nil: - return nil - case map[string]any, []any: - return value - case json.RawMessage: - var out any - if json.Unmarshal(value, &out) == nil { - return out - } - return strings.TrimSpace(string(value)) - case []byte: - var out any - if json.Unmarshal(value, &out) == nil { - return out - } - return strings.TrimSpace(string(value)) - case string: - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return nil - } - var out any - if json.Unmarshal([]byte(trimmed), &out) == nil { - return out - } - return trimmed - default: - return value - } -} - -func extractCopilotTelemetryCommand(input any) string { - var command string - var walk func(any) - walk = func(value any) { - if command != "" || value == nil { - return - } - switch v := value.(type) { - case map[string]any: - for key, child := range v { - k := strings.ToLower(strings.TrimSpace(key)) - if (k == "command" || k == "cmd" || k == "script" || k == "shell_command") && child != nil { - if s, ok := child.(string); ok { - command = strings.TrimSpace(s) - return - } - } - } - for _, child := range v { - walk(child) - } - case []any: - for _, child := range v { - walk(child) - } - } - } - walk(input) - return command -} - -func estimateCopilotTelemetryLineDelta(input any) (int, int) { - if input == nil { - return 0, 0 - } - encoded, err := json.Marshal(map[string]any{"arguments": input}) - if err != nil { - return 0, 0 - } - return estimateCopilotToolLineDelta(encoded) -} - -func copilotUpstreamProviderForModel(model string) string { - model = strings.ToLower(strings.TrimSpace(model)) - if model == "" || model == "unknown" { - return "github" - } - switch { - case strings.Contains(model, "claude"): - return "anthropic" - case strings.Contains(model, "gpt"), strings.HasPrefix(model, "o1"), strings.HasPrefix(model, "o3"), strings.HasPrefix(model, "o4"): - return "openai" - case strings.Contains(model, "gemini"): - return "google" - case strings.Contains(model, "qwen"): - return "alibaba_cloud" - case strings.Contains(model, "deepseek"): - return "deepseek" - case strings.Contains(model, "llama"): - return "meta" - case strings.Contains(model, "mistral"): - return "mistral" - default: - return "github" - } -} - -func anyToString(v any) string { - switch value := v.(type) { - case string: - return value - case fmt.Stringer: - return value.String() - default: - if value == nil { - return "" - } - return fmt.Sprintf("%v", value) - } -} - -func truncate(input string, max int) string { - input = strings.TrimSpace(input) - if max <= 0 || len(input) <= max { - return input - } - return input[:max] -} diff --git a/internal/providers/copilot/telemetry_session_helpers.go b/internal/providers/copilot/telemetry_session_helpers.go new file mode 100644 index 0000000..1cabac1 --- /dev/null +++ b/internal/providers/copilot/telemetry_session_helpers.go @@ -0,0 +1,439 @@ +package copilot + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +func parseCopilotTelemetryToolRequest(raw json.RawMessage) (copilotTelemetryToolRequest, bool) { + var reqMap map[string]any + if json.Unmarshal(raw, &reqMap) != nil { + return copilotTelemetryToolRequest{}, false + } + + out := copilotTelemetryToolRequest{ + ToolCallID: strings.TrimSpace(anyToString(reqMap["toolCallId"])), + RawName: core.FirstNonEmpty(anyToString(reqMap["name"]), anyToString(reqMap["toolName"]), anyToString(reqMap["tool"])), + } + if out.RawName == "" { + out.RawName = extractCopilotToolName(raw) + } + for _, key := range []string{"arguments", "args", "input"} { + if value, ok := reqMap[key]; ok && out.Input == nil { + out.Input = decodeCopilotTelemetryJSONAny(value) + } + } + return out, true +} + +func normalizeCopilotTelemetryToolName(raw string) (string, map[string]any) { + meta := map[string]any{} + name := strings.TrimSpace(raw) + if name == "" { + return "unknown", meta + } + meta["tool_name_raw"] = name + if server, function, ok := parseCopilotTelemetryMCPTool(name); ok { + meta["tool_type"] = "mcp" + meta["mcp_server"] = server + meta["mcp_function"] = function + return "mcp__" + server + "__" + function, meta + } + return sanitizeMetricName(name), meta +} + +func parseCopilotTelemetryMCPTool(raw string) (string, string, bool) { + normalized := strings.ToLower(strings.TrimSpace(raw)) + if normalized == "" { + return "", "", false + } + for _, marker := range []string{"_mcp_server_", "-mcp-server-"} { + if parts := strings.SplitN(normalized, marker, 2); len(parts) == 2 { + server := sanitizeCopilotMCPSegment(parts[0]) + function := sanitizeCopilotMCPSegment(parts[1]) + if server != "" && function != "" { + return server, function, true + } + } + } + if strings.HasPrefix(normalized, "mcp__") { + parts := strings.SplitN(strings.TrimPrefix(normalized, "mcp__"), "__", 2) + if len(parts) == 2 { + server := sanitizeCopilotMCPSegment(parts[0]) + function := sanitizeCopilotMCPSegment(parts[1]) + if server != "" && function != "" { + return server, function, true + } + } + } + if strings.HasPrefix(normalized, "mcp-") || strings.HasPrefix(normalized, "mcp_") { + canonical := normalizeCopilotCursorStyleMCPName(normalized) + if strings.HasPrefix(canonical, "mcp__") { + parts := strings.SplitN(strings.TrimPrefix(canonical, "mcp__"), "__", 2) + if len(parts) == 2 { + server := sanitizeCopilotMCPSegment(parts[0]) + function := sanitizeCopilotMCPSegment(parts[1]) + if server != "" && function != "" { + return server, function, true + } + } + } + } + if strings.HasSuffix(normalized, " (mcp)") { + body := strings.TrimSpace(strings.TrimSuffix(normalized, " (mcp)")) + body = strings.TrimPrefix(body, "user-") + if body == "" { + return "", "", false + } + if idx := findCopilotTelemetryServerFunctionSplit(body); idx > 0 { + server := sanitizeCopilotMCPSegment(body[:idx]) + function := sanitizeCopilotMCPSegment(body[idx+1:]) + if server != "" && function != "" { + return server, function, true + } + } + return "other", sanitizeCopilotMCPSegment(body), true + } + return "", "", false +} + +func normalizeCopilotCursorStyleMCPName(name string) string { + if strings.HasPrefix(name, "mcp-") { + rest := name[4:] + parts := strings.SplitN(rest, "-user-", 2) + if len(parts) == 2 { + server := parts[0] + afterUser := parts[1] + serverDash := server + "-" + if strings.HasPrefix(afterUser, serverDash) { + return "mcp__" + server + "__" + afterUser[len(serverDash):] + } + if idx := strings.LastIndex(afterUser, "-"); idx > 0 { + return "mcp__" + server + "__" + afterUser[idx+1:] + } + return "mcp__" + server + "__" + afterUser + } + if idx := strings.Index(rest, "-"); idx > 0 { + return "mcp__" + rest[:idx] + "__" + rest[idx+1:] + } + return "mcp__" + rest + "__" + } + if strings.HasPrefix(name, "mcp_") { + rest := name[4:] + if idx := strings.Index(rest, "_"); idx > 0 { + return "mcp__" + rest[:idx] + "__" + rest[idx+1:] + } + return "mcp__" + rest + "__" + } + return name +} + +func findCopilotTelemetryServerFunctionSplit(s string) int { + best := -1 + for i := 0; i < len(s); i++ { + if s[i] == '-' && strings.Contains(s[i+1:], "_") { + best = i + } + } + return best +} + +func sanitizeCopilotMCPSegment(raw string) string { + raw = strings.ToLower(strings.TrimSpace(raw)) + if raw == "" { + return "" + } + var b strings.Builder + lastUnderscore := false + for _, r := range raw { + switch { + case r >= 'a' && r <= 'z', r >= '0' && r <= '9', r == '_', r == '-': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore { + b.WriteByte('_') + lastUnderscore = true + } + } + } + return strings.Trim(b.String(), "_") +} + +func copilotTelemetryToolStatus(success *bool, statusRaw, errorCode, errorMessage string) shared.TelemetryStatus { + if success != nil { + if *success { + return shared.TelemetryStatusOK + } + if copilotTelemetryLooksAborted(errorCode, errorMessage, statusRaw) { + return shared.TelemetryStatusAborted + } + return shared.TelemetryStatusError + } + switch strings.ToLower(strings.TrimSpace(statusRaw)) { + case "ok", "success", "succeeded", "completed", "complete": + return shared.TelemetryStatusOK + case "aborted", "cancelled", "canceled", "denied": + return shared.TelemetryStatusAborted + case "error", "failed", "failure": + return shared.TelemetryStatusError + } + if errorCode != "" || errorMessage != "" { + if copilotTelemetryLooksAborted(errorCode, errorMessage, statusRaw) { + return shared.TelemetryStatusAborted + } + return shared.TelemetryStatusError + } + return shared.TelemetryStatusUnknown +} + +func copilotTelemetryLooksAborted(parts ...string) bool { + for _, part := range parts { + lower := strings.ToLower(strings.TrimSpace(part)) + if lower == "" { + continue + } + if strings.Contains(lower, "denied") || strings.Contains(lower, "cancel") || strings.Contains(lower, "abort") || strings.Contains(lower, "rejected") || strings.Contains(lower, "user initiated") { + return true + } + } + return false +} + +func summarizeCopilotTelemetryResult(raw json.RawMessage) map[string]any { + if len(strings.TrimSpace(string(raw))) == 0 { + return nil + } + decoded := decodeCopilotTelemetryJSONAny(raw) + if decoded == nil { + return nil + } + payload := map[string]any{} + if paths := shared.ExtractFilePathsFromPayload(decoded); len(paths) > 0 { + payload["result_file"] = paths[0] + } + switch value := decoded.(type) { + case map[string]any: + if content := anyToString(value["content"]); content != "" { + payload["result_chars"] = len(content) + if added, removed := countCopilotTelemetryUnifiedDiff(content); added > 0 || removed > 0 { + payload["lines_added"] = added + payload["lines_removed"] = removed + } + } + if detailed := anyToString(value["detailedContent"]); detailed != "" { + payload["result_detailed_chars"] = len(detailed) + if _, ok := payload["lines_added"]; !ok { + if added, removed := countCopilotTelemetryUnifiedDiff(detailed); added > 0 || removed > 0 { + payload["lines_added"] = added + payload["lines_removed"] = removed + } + } + } + if msg := anyToString(value["message"]); msg != "" { + payload["result_message"] = truncate(msg, 240) + } + case string: + if value != "" { + payload["result_chars"] = len(value) + if added, removed := countCopilotTelemetryUnifiedDiff(value); added > 0 || removed > 0 { + payload["lines_added"] = added + payload["lines_removed"] = removed + } + } + } + if len(payload) == 0 { + return nil + } + return payload +} + +func countCopilotTelemetryUnifiedDiff(raw string) (int, int) { + raw = strings.TrimSpace(raw) + if raw == "" || (!strings.Contains(raw, "diff --git") && !strings.Contains(raw, "\n@@")) { + return 0, 0 + } + added, removed := 0, 0 + for _, line := range strings.Split(raw, "\n") { + switch { + case strings.HasPrefix(line, "+++"), strings.HasPrefix(line, "---"), strings.HasPrefix(line, "@@"): + case strings.HasPrefix(line, "+"): + added++ + case strings.HasPrefix(line, "-"): + removed++ + } + } + return added, removed +} + +func summarizeCopilotTelemetryError(raw json.RawMessage) (string, string) { + if len(strings.TrimSpace(string(raw))) == 0 { + return "", "" + } + decoded := decodeCopilotTelemetryJSONAny(raw) + if decoded == nil { + return "", "" + } + switch value := decoded.(type) { + case map[string]any: + return strings.TrimSpace(anyToString(value["code"])), strings.TrimSpace(anyToString(value["message"])) + case string: + return "", strings.TrimSpace(value) + default: + return "", strings.TrimSpace(anyToString(decoded)) + } +} + +func copilotTelemetryBasePayload(path string, line int, client, repo, cwd, event string) map[string]any { + payload := map[string]any{ + "source_file": path, + "line": line, + "event": event, + "client": client, + "upstream_provider": "github", + } + if strings.TrimSpace(repo) != "" { + payload["repository"] = strings.TrimSpace(repo) + } + if strings.TrimSpace(cwd) != "" { + payload["cwd"] = strings.TrimSpace(cwd) + } + return payload +} + +func copyCopilotTelemetryPayload(in map[string]any) map[string]any { + if len(in) == 0 { + return nil + } + out := make(map[string]any, len(in)) + for key, value := range in { + out[key] = value + } + return out +} + +func decodeCopilotTelemetryJSONAny(raw any) any { + switch value := raw.(type) { + case nil: + return nil + case map[string]any, []any: + return value + case json.RawMessage: + var out any + if json.Unmarshal(value, &out) == nil { + return out + } + return strings.TrimSpace(string(value)) + case []byte: + var out any + if json.Unmarshal(value, &out) == nil { + return out + } + return strings.TrimSpace(string(value)) + case string: + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil + } + var out any + if json.Unmarshal([]byte(trimmed), &out) == nil { + return out + } + return trimmed + default: + return value + } +} + +func extractCopilotTelemetryCommand(input any) string { + var command string + var walk func(any) + walk = func(value any) { + if command != "" || value == nil { + return + } + switch v := value.(type) { + case map[string]any: + for key, child := range v { + k := strings.ToLower(strings.TrimSpace(key)) + if (k == "command" || k == "cmd" || k == "script" || k == "shell_command") && child != nil { + if s, ok := child.(string); ok { + command = strings.TrimSpace(s) + return + } + } + } + for _, child := range v { + walk(child) + } + case []any: + for _, child := range v { + walk(child) + } + } + } + walk(input) + return command +} + +func estimateCopilotTelemetryLineDelta(input any) (int, int) { + if input == nil { + return 0, 0 + } + encoded, err := json.Marshal(map[string]any{"arguments": input}) + if err != nil { + return 0, 0 + } + return estimateCopilotToolLineDelta(encoded) +} + +func copilotUpstreamProviderForModel(model string) string { + model = strings.ToLower(strings.TrimSpace(model)) + if model == "" || model == "unknown" { + return "github" + } + switch { + case strings.Contains(model, "claude"): + return "anthropic" + case strings.Contains(model, "gpt"), strings.HasPrefix(model, "o1"), strings.HasPrefix(model, "o3"), strings.HasPrefix(model, "o4"): + return "openai" + case strings.Contains(model, "gemini"): + return "google" + case strings.Contains(model, "qwen"): + return "alibaba_cloud" + case strings.Contains(model, "deepseek"): + return "deepseek" + case strings.Contains(model, "llama"): + return "meta" + case strings.Contains(model, "mistral"): + return "mistral" + default: + return "github" + } +} + +func anyToString(v any) string { + switch value := v.(type) { + case string: + return value + case fmt.Stringer: + return value.String() + default: + if value == nil { + return "" + } + return fmt.Sprintf("%v", value) + } +} + +func truncate(input string, max int) string { + input = strings.TrimSpace(input) + if max <= 0 || len(input) <= max { + return input + } + return input[:max] +} diff --git a/internal/providers/copilot/test_helpers_test.go b/internal/providers/copilot/test_helpers_test.go new file mode 100644 index 0000000..5b94b70 --- /dev/null +++ b/internal/providers/copilot/test_helpers_test.go @@ -0,0 +1,25 @@ +package copilot + +import "github.com/janekbaraniewski/openusage/internal/core" + +func testCopilotAccount(binary, configDir, copilotBinary string) core.AccountConfig { + acct := core.AccountConfig{ + ID: "copilot", + Provider: "copilot", + Auth: "cli", + Binary: binary, + } + if configDir == "" && copilotBinary == "" { + return acct + } + acct.ExtraData = map[string]string{} + if configDir != "" { + acct.ExtraData["config_dir"] = configDir + acct.SetHint("config_dir", configDir) + } + if copilotBinary != "" { + acct.ExtraData["copilot_binary"] = copilotBinary + acct.SetHint("copilot_binary", copilotBinary) + } + return acct +} diff --git a/internal/providers/cursor/cursor_local_test.go b/internal/providers/cursor/cursor_local_test.go new file mode 100644 index 0000000..1c7c4da --- /dev/null +++ b/internal/providers/cursor/cursor_local_test.go @@ -0,0 +1,689 @@ +package cursor + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestProvider_Fetch_ReadsComposerSessionsFromStateDB(t *testing.T) { + stateDBPath := filepath.Join(t.TempDir(), "state.vscdb") + db, err := sql.Open("sqlite3", stateDBPath) + if err != nil { + t.Fatalf("open sqlite: %v", err) + } + db.Exec(`CREATE TABLE IF NOT EXISTS ItemTable (key TEXT PRIMARY KEY, value TEXT)`) + db.Exec(`INSERT INTO ItemTable (key, value) VALUES ('cursorAuth/cachedEmail', 'test@example.com')`) + db.Exec(`INSERT INTO ItemTable (key, value) VALUES ('freeBestOfN.promptCount', '42')`) + + db.Exec(`CREATE TABLE IF NOT EXISTS cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`) + now := time.Now() + session1 := fmt.Sprintf(`{"usageData":{"claude-4.5-opus":{"costInCents":500,"amount":10},"gpt-4o":{"costInCents":100,"amount":5}},"unifiedMode":"agent","createdAt":%d,"totalLinesAdded":200,"totalLinesRemoved":50}`, now.Add(-1*time.Hour).UnixMilli()) + session2 := fmt.Sprintf(`{"usageData":{"claude-4.5-opus":{"costInCents":300,"amount":8}},"unifiedMode":"chat","createdAt":%d,"totalLinesAdded":100,"totalLinesRemoved":20}`, now.Add(-2*time.Hour).UnixMilli()) + sessionEmpty := `{"usageData":{},"unifiedMode":"agent","createdAt":1000}` + db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES ('composerData:aaa', ?)`, session1) + db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES ('composerData:bbb', ?)`, session2) + db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES ('composerData:ccc', ?)`, sessionEmpty) + db.Close() + + p := New() + snap, err := p.Fetch(context.Background(), core.AccountConfig{ + ID: "cursor-composer-test", + Provider: "cursor", + ExtraData: map[string]string{ + "state_db": stateDBPath, + }, + }) + if err != nil { + t.Fatalf("Fetch returned error: %v", err) + } + + if m, ok := snap.Metrics["composer_cost"]; !ok || m.Used == nil || *m.Used != 9.0 { + t.Errorf("composer_cost: got %+v, want Used=9.0 (900 cents)", m) + } + if m, ok := snap.Metrics["composer_sessions"]; !ok || m.Used == nil || *m.Used != 2 { + t.Errorf("composer_sessions: got %+v, want Used=2", m) + } + if m, ok := snap.Metrics["composer_requests"]; !ok || m.Used == nil || *m.Used != 23 { + t.Errorf("composer_requests: got %+v, want Used=23", m) + } + if m, ok := snap.Metrics["composer_lines_added"]; !ok || m.Used == nil || *m.Used != 300 { + t.Errorf("composer_lines_added: got %+v, want Used=300", m) + } + if m, ok := snap.Metrics["mode_agent_sessions"]; !ok || m.Used == nil || *m.Used != 1 { + t.Errorf("mode_agent_sessions: got %+v, want Used=1", m) + } + if m, ok := snap.Metrics["mode_chat_sessions"]; !ok || m.Used == nil || *m.Used != 1 { + t.Errorf("mode_chat_sessions: got %+v, want Used=1", m) + } + if m, ok := snap.Metrics["total_prompts"]; !ok || m.Used == nil || *m.Used != 42 { + t.Errorf("total_prompts: got %+v, want Used=42", m) + } + if snap.Raw["account_email"] != "test@example.com" { + t.Errorf("account_email: got %q, want test@example.com", snap.Raw["account_email"]) + } + if snap.Raw["total_prompts"] != "42" { + t.Errorf("total_prompts raw: got %q, want 42", snap.Raw["total_prompts"]) + } +} + +func TestProvider_Fetch_ReadsScoredCommitsFromTrackingDB(t *testing.T) { + dbPath := filepath.Join(t.TempDir(), "ai-code-tracking.db") + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatalf("open sqlite: %v", err) + } + db.Exec(`CREATE TABLE ai_code_hashes (hash TEXT PRIMARY KEY, source TEXT, createdAt INTEGER, model TEXT)`) + db.Exec(`INSERT INTO ai_code_hashes VALUES ('h1', 'composer', ?, 'claude')`, time.Now().UnixMilli()) + + db.Exec(`CREATE TABLE scored_commits ( + commitHash TEXT, branchName TEXT, scoredAt INTEGER, + linesAdded INTEGER, linesDeleted INTEGER, + tabLinesAdded INTEGER, tabLinesDeleted INTEGER, + composerLinesAdded INTEGER, composerLinesDeleted INTEGER, + humanLinesAdded INTEGER, humanLinesDeleted INTEGER, + blankLinesAdded INTEGER, blankLinesDeleted INTEGER, + commitMessage TEXT, commitDate TEXT, + v1AiPercentage TEXT, v2AiPercentage TEXT, + PRIMARY KEY (commitHash, branchName))`) + db.Exec(`INSERT INTO scored_commits VALUES ('abc', 'main', ?, 100, 10, 20, 5, 60, 3, 20, 2, 0, 0, 'test', '2026-02-23', '50.0', '80.0')`, time.Now().UnixMilli()) + db.Exec(`INSERT INTO scored_commits VALUES ('def', 'main', ?, 200, 20, 40, 10, 120, 6, 40, 4, 0, 0, 'test2', '2026-02-22', '30.0', '60.0')`, time.Now().UnixMilli()) + db.Close() + + p := New() + snap, err := p.Fetch(context.Background(), core.AccountConfig{ + ID: "cursor-commits-test", + Provider: "cursor", + ExtraData: map[string]string{ + "tracking_db": dbPath, + }, + }) + if err != nil { + t.Fatalf("Fetch returned error: %v", err) + } + + if m, ok := snap.Metrics["scored_commits"]; !ok || m.Used == nil || *m.Used != 2 { + t.Errorf("scored_commits: got %+v, want Used=2", m) + } + if m, ok := snap.Metrics["ai_code_percentage"]; !ok || m.Used == nil { + t.Errorf("ai_code_percentage missing") + } else if *m.Used != 70.0 { + t.Errorf("ai_code_percentage: got %.1f, want 70.0 (avg of 80+60)", *m.Used) + } +} + +func TestCursorClientBucket(t *testing.T) { + tests := []struct { + source string + want string + }{ + {source: "composer", want: "ide"}, + {source: "tab", want: "ide"}, + {source: "human", want: "ide"}, + {source: "cli", want: "cli_agents"}, + {source: "terminal", want: "cli_agents"}, + {source: "background-agent", want: "cloud_agents"}, + {source: "cloud", want: "cloud_agents"}, + {source: "web_agent", want: "cloud_agents"}, + {source: "unknown-source", want: "other"}, + {source: "", want: "other"}, + } + + for _, tt := range tests { + if got := cursorClientBucket(tt.source); got != tt.want { + t.Errorf("cursorClientBucket(%q) = %q, want %q", tt.source, got, tt.want) + } + } +} + +type cursorTrackingRow struct { + Hash string + Source string + Model string + CreatedAt int64 +} + +func createCursorTrackingDBForTest(t *testing.T, rows []cursorTrackingRow) string { + t.Helper() + + dbPath := filepath.Join(t.TempDir(), "ai-code-tracking.db") + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatalf("open sqlite db: %v", err) + } + defer db.Close() + + _, err = db.Exec(` + CREATE TABLE ai_code_hashes ( + hash TEXT PRIMARY KEY, + source TEXT NOT NULL, + fileExtension TEXT, + fileName TEXT, + requestId TEXT, + conversationId TEXT, + timestamp INTEGER, + createdAt INTEGER NOT NULL, + model TEXT + )`) + if err != nil { + t.Fatalf("create ai_code_hashes table: %v", err) + } + + stmt, err := db.Prepare(` + INSERT INTO ai_code_hashes ( + hash, source, fileExtension, fileName, requestId, conversationId, timestamp, createdAt, model + ) VALUES (?, ?, '', '', '', '', ?, ?, ?)`) + if err != nil { + t.Fatalf("prepare insert: %v", err) + } + defer stmt.Close() + + for _, row := range rows { + ts := row.CreatedAt + if ts == 0 { + ts = time.Now().UnixMilli() + } + if _, err := stmt.Exec(row.Hash, row.Source, ts, ts, row.Model); err != nil { + t.Fatalf("insert row %q: %v", row.Hash, err) + } + } + + return dbPath +} + +func TestProvider_Fetch_PlanSpendGaugeUsesIncludedAmountWhenNoLimit(t *testing.T) { + // When the plan has no hard limit (pu.Limit=0) and no pooled team limit, + // plan_spend should use the plan's included amount as the gauge reference. + mux := http.NewServeMux() + mux.HandleFunc("/aiserver.v1.DashboardService/GetCurrentPeriodUsage", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(currentPeriodUsageResp{ + BillingCycleStart: "1768055295000", + BillingCycleEnd: "1770733695000", + PlanUsage: planUsage{ + TotalSpend: 36470, // $364.70 + IncludedSpend: 2000, + Limit: 0, // No hard limit + TotalPercentUsed: 0, + }, + DisplayMessage: "Usage-based billing", + }) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetPlanInfo", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(planInfoResp{ + PlanInfo: struct { + PlanName string `json:"planName"` + IncludedAmountCents float64 `json:"includedAmountCents"` + Price string `json:"price"` + BillingCycleEnd string `json:"billingCycleEnd"` + }{ + PlanName: "Pro", + IncludedAmountCents: 2000, // $20 included + Price: "$20/mo", + }, + }) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(aggregatedUsageResp{}) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetHardLimit", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(hardLimitResp{}) + }) + mux.HandleFunc("/auth/full_stripe_profile", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(stripeProfileResp{}) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetUsageLimitPolicyStatus", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(usageLimitPolicyResp{}) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + prevBase := cursorAPIBase + cursorAPIBase = server.URL + defer func() { cursorAPIBase = prevBase }() + + p := New() + snap, err := p.Fetch(context.Background(), core.AccountConfig{ + ID: "cursor-gauge-test", + Provider: "cursor", + Token: "test-token", + }) + if err != nil { + t.Fatalf("Fetch returned error: %v", err) + } + + m, ok := snap.Metrics["plan_spend"] + if !ok { + t.Fatal("plan_spend metric missing") + } + if m.Used == nil || *m.Used != 364.70 { + t.Fatalf("plan_spend.Used = %v, want 364.70", m.Used) + } + if m.Limit == nil || *m.Limit != 20.0 { + t.Fatalf("plan_spend.Limit = %v, want 20.0 (from IncludedAmountCents)", m.Limit) + } +} + +func TestProvider_Fetch_CachedBillingMetricsRestoreOnAPIFailure(t *testing.T) { + // First call: API available → caches billing metrics. + // Second call: API fails → billing metrics restored from cache. + var periodCalls int + mux := http.NewServeMux() + mux.HandleFunc("/aiserver.v1.DashboardService/GetCurrentPeriodUsage", func(w http.ResponseWriter, r *http.Request) { + periodCalls++ + if periodCalls > 1 { + http.Error(w, "service unavailable", http.StatusServiceUnavailable) + return + } + json.NewEncoder(w).Encode(currentPeriodUsageResp{ + BillingCycleStart: "1768055295000", + BillingCycleEnd: "1770733695000", + PlanUsage: planUsage{ + TotalSpend: 40700, + Limit: 0, + TotalPercentUsed: 85.0, + AutoPercentUsed: 60.0, + APIPercentUsed: 25.0, + }, + SpendLimitUsage: spendLimitUsage{ + PooledLimit: 360000, + PooledUsed: 40700, + PooledRemaining: 319300, + }, + }) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetPlanInfo", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(planInfoResp{ + PlanInfo: struct { + PlanName string `json:"planName"` + IncludedAmountCents float64 `json:"includedAmountCents"` + Price string `json:"price"` + BillingCycleEnd string `json:"billingCycleEnd"` + }{ + PlanName: "Business", + IncludedAmountCents: 50000, + }, + }) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(aggregatedUsageResp{ + Aggregations: []modelAggregation{ + {ModelIntent: "test-model", TotalCents: 100}, + }, + }) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + prevBase := cursorAPIBase + cursorAPIBase = server.URL + defer func() { cursorAPIBase = prevBase }() + + // Create state DB with composer cost data. + stateDBPath := filepath.Join(t.TempDir(), "state.vscdb") + db, err := sql.Open("sqlite3", stateDBPath) + if err != nil { + t.Fatalf("open sqlite: %v", err) + } + db.Exec(`CREATE TABLE IF NOT EXISTS ItemTable (key TEXT PRIMARY KEY, value TEXT)`) + db.Exec(`CREATE TABLE IF NOT EXISTS cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`) + session := fmt.Sprintf(`{"usageData":{"test-model":{"costInCents":7500,"amount":15}},"unifiedMode":"agent","createdAt":%d}`, time.Now().Add(-1*time.Hour).UnixMilli()) + db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES ('composerData:aaa', ?)`, session) + db.Close() + + p := New() + acct := core.AccountConfig{ + ID: "cursor-cache-billing", + Provider: "cursor", + Token: "test-token", + ExtraData: map[string]string{ + "state_db": stateDBPath, + }, + } + + // First fetch: API works, caches billing metrics. + snap1, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("first Fetch returned error: %v", err) + } + // Verify API-derived billing metrics exist. + if m, ok := snap1.Metrics["spend_limit"]; !ok || m.Limit == nil || *m.Limit != 3600.0 { + t.Fatalf("spend_limit after API call: got %+v, want Limit=3600", snap1.Metrics["spend_limit"]) + } + if m, ok := snap1.Metrics["plan_percent_used"]; !ok || m.Used == nil || *m.Used != 85.0 { + t.Fatalf("plan_percent_used after API call: got %+v, want Used=85", snap1.Metrics["plan_percent_used"]) + } + + // Second fetch: API fails → billing metrics should be restored from cache. + snap2, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("second Fetch returned error: %v", err) + } + + // spend_limit should be restored from cache. + if m, ok := snap2.Metrics["spend_limit"]; !ok { + t.Fatal("spend_limit missing after API failure (should be restored from cache)") + } else { + if m.Limit == nil || *m.Limit != 3600.0 { + t.Fatalf("spend_limit.Limit = %v, want 3600 (from cache)", m.Limit) + } + if m.Used == nil || *m.Used != 407.0 { + t.Fatalf("spend_limit.Used = %v, want 407 (from cache)", m.Used) + } + } + + // plan_percent_used should be restored from cache. + if m, ok := snap2.Metrics["plan_percent_used"]; !ok { + t.Fatal("plan_percent_used missing after API failure (should be restored from cache)") + } else { + if m.Used == nil || *m.Used != 85.0 { + t.Fatalf("plan_percent_used.Used = %v, want 85 (from cache)", m.Used) + } + } + + // plan_spend should be restored from cache. + if m, ok := snap2.Metrics["plan_spend"]; !ok { + t.Fatal("plan_spend missing after API failure (should be restored from cache)") + } else { + if m.Used == nil { + t.Fatal("plan_spend.Used is nil (should be restored from cache)") + } + } +} + +func TestProvider_Fetch_PartialAPIFailure_PeriodUsageDown(t *testing.T) { + // GetCurrentPeriodUsage fails, but GetAggregatedUsageEvents succeeds. + // After a first successful call caches billing metrics, the second call + // with GetCurrentPeriodUsage failing should still show billing gauges + // AND model aggregation data from the live API. + var periodCalls int + mux := http.NewServeMux() + mux.HandleFunc("/aiserver.v1.DashboardService/GetCurrentPeriodUsage", func(w http.ResponseWriter, r *http.Request) { + periodCalls++ + if periodCalls > 1 { + http.Error(w, "rate limited", http.StatusTooManyRequests) + return + } + json.NewEncoder(w).Encode(currentPeriodUsageResp{ + BillingCycleStart: "1768055295000", + BillingCycleEnd: "1770733695000", + PlanUsage: planUsage{ + TotalSpend: 40700, + TotalPercentUsed: 85.0, + }, + SpendLimitUsage: spendLimitUsage{ + PooledLimit: 360000, + PooledUsed: 40700, + PooledRemaining: 319300, + }, + }) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(aggregatedUsageResp{ + Aggregations: []modelAggregation{ + {ModelIntent: "claude-opus", TotalCents: 30000, InputTokens: "1000000"}, + }, + TotalCostCents: 30000, + }) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetPlanInfo", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(planInfoResp{}) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + prevBase := cursorAPIBase + cursorAPIBase = server.URL + defer func() { cursorAPIBase = prevBase }() + + p := New() + acct := core.AccountConfig{ + ID: "cursor-partial", + Provider: "cursor", + Token: "test-token", + } + + // First fetch: everything works. + snap1, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("first Fetch: %v", err) + } + if _, ok := snap1.Metrics["spend_limit"]; !ok { + t.Fatal("spend_limit missing after successful API call") + } + + // Second fetch: GetCurrentPeriodUsage fails, but aggregation succeeds. + snap2, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("second Fetch: %v", err) + } + + // Model aggregation from live API should still work. + if _, ok := snap2.Metrics["billing_total_cost"]; !ok { + t.Fatal("billing_total_cost missing — aggregation endpoint should still work") + } + + // Billing gauge should be restored from cache. + if m, ok := snap2.Metrics["spend_limit"]; !ok { + t.Fatal("spend_limit missing — should be restored from billing cache") + } else if m.Limit == nil || *m.Limit != 3600.0 { + t.Fatalf("spend_limit.Limit = %v, want 3600 (from cached billing)", m.Limit) + } + + // plan_percent_used should also be restored. + if m, ok := snap2.Metrics["plan_percent_used"]; !ok { + t.Fatal("plan_percent_used missing — should be restored from billing cache") + } else if m.Used == nil || *m.Used != 85.0 { + t.Fatalf("plan_percent_used.Used = %v, want 85 (from cached billing)", m.Used) + } +} + +func TestProvider_Fetch_NoPeriodUsage_AggregationCreatesGauge(t *testing.T) { + // GetCurrentPeriodUsage always fails, no billing cache exists. + // GetAggregatedUsageEvents succeeds with cost data. + // GetPlanInfo returns IncludedAmountCents. + // Should create a plan_spend gauge from billing_total_cost + plan limit. + mux := http.NewServeMux() + mux.HandleFunc("/aiserver.v1.DashboardService/GetCurrentPeriodUsage", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "unauthorized", http.StatusUnauthorized) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(aggregatedUsageResp{ + Aggregations: []modelAggregation{ + {ModelIntent: "claude-opus", TotalCents: 36470}, + }, + TotalCostCents: 36470, + }) + }) + mux.HandleFunc("/aiserver.v1.DashboardService/GetPlanInfo", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(planInfoResp{ + PlanInfo: struct { + PlanName string `json:"planName"` + IncludedAmountCents float64 `json:"includedAmountCents"` + Price string `json:"price"` + BillingCycleEnd string `json:"billingCycleEnd"` + }{ + PlanName: "Pro", + IncludedAmountCents: 2000, + }, + }) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + prevBase := cursorAPIBase + cursorAPIBase = server.URL + defer func() { cursorAPIBase = prevBase }() + + p := New() + snap, err := p.Fetch(context.Background(), core.AccountConfig{ + ID: "cursor-no-period", + Provider: "cursor", + Token: "test-token", + }) + if err != nil { + t.Fatalf("Fetch: %v", err) + } + + // billing_total_cost should exist from aggregation. + if m, ok := snap.Metrics["billing_total_cost"]; !ok || m.Used == nil { + t.Fatal("billing_total_cost missing from aggregation") + } + + // plan_spend should be created from billing_total_cost + plan included amount. + m, ok := snap.Metrics["plan_spend"] + if !ok { + t.Fatal("plan_spend missing — should be built from billing_total_cost + plan limit") + } + if m.Used == nil || *m.Used != 364.70 { + t.Fatalf("plan_spend.Used = %v, want 364.70", m.Used) + } + if m.Limit == nil || *m.Limit != 20.0 { + t.Fatalf("plan_spend.Limit = %v, want 20.0 (from IncludedAmountCents)", m.Limit) + } +} + +// TestProvider_Fetch_LocalOnlyComposerCostCreatesCreditsTag verifies that +// when the API is completely unavailable (no token) but local composer +// sessions have cost data, ensureCreditGauges creates plan_total_spend_usd +// so the Credits tag renders in the TUI. +func TestProvider_Fetch_LocalOnlyComposerCostCreatesCreditsTag(t *testing.T) { + p := New() + + // Set up a state DB with composer sessions that have cost data. + stateDir := t.TempDir() + stateDBPath := filepath.Join(stateDir, "state.vscdb") + sdb, err := sql.Open("sqlite3", stateDBPath) + if err != nil { + t.Fatalf("open state db: %v", err) + } + _, err = sdb.Exec(`CREATE TABLE IF NOT EXISTS ItemTable (key TEXT PRIMARY KEY, value TEXT)`) + if err != nil { + t.Fatalf("create ItemTable: %v", err) + } + _, err = sdb.Exec(`CREATE TABLE IF NOT EXISTS cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`) + if err != nil { + t.Fatalf("create cursorDiskKV: %v", err) + } + + // Insert composer session with cost data. + usage := map[string]composerModelUsage{ + "claude-4-5-opus-high-thinking": {CostInCents: 15000, Amount: 20}, + } + usageJSON, _ := json.Marshal(usage) + createdAt := time.Now().Add(-1 * time.Hour).UnixMilli() + sessionVal := fmt.Sprintf(`{"usageData":%s,"unifiedMode":"agent","createdAt":%d,"totalLinesAdded":100,"totalLinesRemoved":10}`, string(usageJSON), createdAt) + _, err = sdb.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES (?, ?)`, "composerData:session-1", sessionVal) + if err != nil { + t.Fatalf("insert composer session: %v", err) + } + sdb.Close() + + // Fetch with no token — API is completely unavailable. + snap, err := p.Fetch(context.Background(), core.AccountConfig{ + ID: "test-local-only", + ExtraData: map[string]string{ + "state_db": stateDBPath, + }, + }) + if err != nil { + t.Fatalf("Fetch: %v", err) + } + + // composer_cost should exist from local state DB. + cm, ok := snap.Metrics["composer_cost"] + if !ok || cm.Used == nil || *cm.Used <= 0 { + t.Fatalf("composer_cost missing or zero, got: %+v", cm) + } + + // plan_total_spend_usd should be synthesized by ensureCreditGauges. + ptsu, ok := snap.Metrics["plan_total_spend_usd"] + if !ok { + t.Fatal("plan_total_spend_usd missing — ensureCreditGauges should create it from composer_cost") + } + if ptsu.Used == nil || *ptsu.Used != *cm.Used { + t.Fatalf("plan_total_spend_usd.Used = %v, want %v (from composer_cost)", ptsu.Used, *cm.Used) + } + + // Message should indicate API unavailable. + if snap.Message == "" { + t.Error("expected a local-only message, got empty") + } +} + +// TestProvider_Fetch_LocalOnlyCachedLimitCreatesPlanSpendGauge verifies that +// when the API previously provided a plan limit (cached), and later becomes +// unavailable, ensureCreditGauges creates plan_spend with the cached limit +// so the gauge bar renders. +func TestProvider_Fetch_LocalOnlyCachedLimitCreatesPlanSpendGauge(t *testing.T) { + p := New() + + // Pre-populate the cache with an effective limit from a previous API call. + p.mu.Lock() + p.modelAggregationCache["test-cached"] = cachedModelAggregation{ + EffectiveLimitUSD: 500.0, + } + p.mu.Unlock() + + // Set up a state DB with composer sessions that have cost data. + stateDir := t.TempDir() + stateDBPath := filepath.Join(stateDir, "state.vscdb") + sdb, err := sql.Open("sqlite3", stateDBPath) + if err != nil { + t.Fatalf("open state db: %v", err) + } + _, err = sdb.Exec(`CREATE TABLE IF NOT EXISTS ItemTable (key TEXT PRIMARY KEY, value TEXT)`) + if err != nil { + t.Fatalf("create ItemTable: %v", err) + } + _, err = sdb.Exec(`CREATE TABLE IF NOT EXISTS cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`) + if err != nil { + t.Fatalf("create cursorDiskKV: %v", err) + } + + usage := map[string]composerModelUsage{ + "claude-4-5-opus": {CostInCents: 36470, Amount: 50}, + } + usageJSON, _ := json.Marshal(usage) + createdAt := time.Now().Add(-2 * time.Hour).UnixMilli() + sessionVal := fmt.Sprintf(`{"usageData":%s,"unifiedMode":"agent","createdAt":%d,"totalLinesAdded":200,"totalLinesRemoved":20}`, string(usageJSON), createdAt) + _, err = sdb.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES (?, ?)`, "composerData:session-cached", sessionVal) + if err != nil { + t.Fatalf("insert composer session: %v", err) + } + sdb.Close() + + // Fetch with no token. + snap, err := p.Fetch(context.Background(), core.AccountConfig{ + ID: "test-cached", + ExtraData: map[string]string{ + "state_db": stateDBPath, + }, + }) + if err != nil { + t.Fatalf("Fetch: %v", err) + } + + // plan_spend should be created with cached limit. + ps, ok := snap.Metrics["plan_spend"] + if !ok { + t.Fatal("plan_spend missing — ensureCreditGauges should create it from composer_cost + cached limit") + } + if ps.Used == nil || *ps.Used != 364.70 { + t.Fatalf("plan_spend.Used = %v, want 364.70", ps.Used) + } + if ps.Limit == nil || *ps.Limit != 500.0 { + t.Fatalf("plan_spend.Limit = %v, want 500.0 (from cached effective limit)", ps.Limit) + } +} diff --git a/internal/providers/cursor/cursor_test.go b/internal/providers/cursor/cursor_test.go index 30b127e..3829423 100644 --- a/internal/providers/cursor/cursor_test.go +++ b/internal/providers/cursor/cursor_test.go @@ -2,12 +2,10 @@ package cursor import ( "context" - "database/sql" "encoding/json" "fmt" "net/http" "net/http/httptest" - "path/filepath" "testing" "time" @@ -37,13 +35,10 @@ func TestProvider_Describe(t *testing.T) { func TestProvider_Fetch_NoData(t *testing.T) { p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "test-cursor", - ExtraData: map[string]string{ - "tracking_db": "/nonexistent/ai-code-tracking.db", - "state_db": "/nonexistent/state.vscdb", - }, - }) + snap, err := p.Fetch(context.Background(), testCursorAccount("test-cursor", "", map[string]string{ + "tracking_db": "/nonexistent/ai-code-tracking.db", + "state_db": "/nonexistent/state.vscdb", + })) if err != nil { t.Fatalf("Fetch should not error, got: %v", err) } @@ -328,11 +323,7 @@ func TestProvider_Fetch_ExposesPlanSplitAndCacheTokenMetrics(t *testing.T) { defer func() { cursorAPIBase = prevBase }() p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "cursor-split-test", - Provider: "cursor", - Token: "test-token", - }) + snap, err := p.Fetch(context.Background(), testCursorAccount("cursor-split-test", "test-token", nil)) if err != nil { t.Fatalf("Fetch returned error: %v", err) } @@ -410,7 +401,7 @@ func TestProvider_Fetch_UsesCachedModelAggregationWhenAggregationEndpointErrors( defer func() { cursorAPIBase = prevBase }() p := New() - acct := core.AccountConfig{ID: "cursor-cache-error", Provider: "cursor", Token: "test-token"} + acct := testCursorAccount("cursor-cache-error", "test-token", nil) first, err := p.Fetch(context.Background(), acct) if err != nil { @@ -462,7 +453,7 @@ func TestProvider_Fetch_UsesCachedModelAggregationWhenAggregationEndpointReturns defer func() { cursorAPIBase = prevBase }() p := New() - acct := core.AccountConfig{ID: "cursor-cache-empty", Provider: "cursor", Token: "test-token"} + acct := testCursorAccount("cursor-cache-empty", "test-token", nil) first, err := p.Fetch(context.Background(), acct) if err != nil { @@ -687,677 +678,3 @@ func newCursorAPITestMux(aggregateHandler http.HandlerFunc) *http.ServeMux { mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", aggregateHandler) return mux } - -func TestProvider_Fetch_ReadsComposerSessionsFromStateDB(t *testing.T) { - stateDBPath := filepath.Join(t.TempDir(), "state.vscdb") - db, err := sql.Open("sqlite3", stateDBPath) - if err != nil { - t.Fatalf("open sqlite: %v", err) - } - db.Exec(`CREATE TABLE IF NOT EXISTS ItemTable (key TEXT PRIMARY KEY, value TEXT)`) - db.Exec(`INSERT INTO ItemTable (key, value) VALUES ('cursorAuth/cachedEmail', 'test@example.com')`) - db.Exec(`INSERT INTO ItemTable (key, value) VALUES ('freeBestOfN.promptCount', '42')`) - - db.Exec(`CREATE TABLE IF NOT EXISTS cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`) - now := time.Now() - session1 := fmt.Sprintf(`{"usageData":{"claude-4.5-opus":{"costInCents":500,"amount":10},"gpt-4o":{"costInCents":100,"amount":5}},"unifiedMode":"agent","createdAt":%d,"totalLinesAdded":200,"totalLinesRemoved":50}`, now.Add(-1*time.Hour).UnixMilli()) - session2 := fmt.Sprintf(`{"usageData":{"claude-4.5-opus":{"costInCents":300,"amount":8}},"unifiedMode":"chat","createdAt":%d,"totalLinesAdded":100,"totalLinesRemoved":20}`, now.Add(-2*time.Hour).UnixMilli()) - sessionEmpty := `{"usageData":{},"unifiedMode":"agent","createdAt":1000}` - db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES ('composerData:aaa', ?)`, session1) - db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES ('composerData:bbb', ?)`, session2) - db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES ('composerData:ccc', ?)`, sessionEmpty) - db.Close() - - p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "cursor-composer-test", - Provider: "cursor", - ExtraData: map[string]string{ - "state_db": stateDBPath, - }, - }) - if err != nil { - t.Fatalf("Fetch returned error: %v", err) - } - - if m, ok := snap.Metrics["composer_cost"]; !ok || m.Used == nil || *m.Used != 9.0 { - t.Errorf("composer_cost: got %+v, want Used=9.0 (900 cents)", m) - } - if m, ok := snap.Metrics["composer_sessions"]; !ok || m.Used == nil || *m.Used != 2 { - t.Errorf("composer_sessions: got %+v, want Used=2", m) - } - if m, ok := snap.Metrics["composer_requests"]; !ok || m.Used == nil || *m.Used != 23 { - t.Errorf("composer_requests: got %+v, want Used=23", m) - } - if m, ok := snap.Metrics["composer_lines_added"]; !ok || m.Used == nil || *m.Used != 300 { - t.Errorf("composer_lines_added: got %+v, want Used=300", m) - } - if m, ok := snap.Metrics["mode_agent_sessions"]; !ok || m.Used == nil || *m.Used != 1 { - t.Errorf("mode_agent_sessions: got %+v, want Used=1", m) - } - if m, ok := snap.Metrics["mode_chat_sessions"]; !ok || m.Used == nil || *m.Used != 1 { - t.Errorf("mode_chat_sessions: got %+v, want Used=1", m) - } - if m, ok := snap.Metrics["total_prompts"]; !ok || m.Used == nil || *m.Used != 42 { - t.Errorf("total_prompts: got %+v, want Used=42", m) - } - if snap.Raw["account_email"] != "test@example.com" { - t.Errorf("account_email: got %q, want test@example.com", snap.Raw["account_email"]) - } - if snap.Raw["total_prompts"] != "42" { - t.Errorf("total_prompts raw: got %q, want 42", snap.Raw["total_prompts"]) - } -} - -func TestProvider_Fetch_ReadsScoredCommitsFromTrackingDB(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "ai-code-tracking.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - t.Fatalf("open sqlite: %v", err) - } - db.Exec(`CREATE TABLE ai_code_hashes (hash TEXT PRIMARY KEY, source TEXT, createdAt INTEGER, model TEXT)`) - db.Exec(`INSERT INTO ai_code_hashes VALUES ('h1', 'composer', ?, 'claude')`, time.Now().UnixMilli()) - - db.Exec(`CREATE TABLE scored_commits ( - commitHash TEXT, branchName TEXT, scoredAt INTEGER, - linesAdded INTEGER, linesDeleted INTEGER, - tabLinesAdded INTEGER, tabLinesDeleted INTEGER, - composerLinesAdded INTEGER, composerLinesDeleted INTEGER, - humanLinesAdded INTEGER, humanLinesDeleted INTEGER, - blankLinesAdded INTEGER, blankLinesDeleted INTEGER, - commitMessage TEXT, commitDate TEXT, - v1AiPercentage TEXT, v2AiPercentage TEXT, - PRIMARY KEY (commitHash, branchName))`) - db.Exec(`INSERT INTO scored_commits VALUES ('abc', 'main', ?, 100, 10, 20, 5, 60, 3, 20, 2, 0, 0, 'test', '2026-02-23', '50.0', '80.0')`, time.Now().UnixMilli()) - db.Exec(`INSERT INTO scored_commits VALUES ('def', 'main', ?, 200, 20, 40, 10, 120, 6, 40, 4, 0, 0, 'test2', '2026-02-22', '30.0', '60.0')`, time.Now().UnixMilli()) - db.Close() - - p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "cursor-commits-test", - Provider: "cursor", - ExtraData: map[string]string{ - "tracking_db": dbPath, - }, - }) - if err != nil { - t.Fatalf("Fetch returned error: %v", err) - } - - if m, ok := snap.Metrics["scored_commits"]; !ok || m.Used == nil || *m.Used != 2 { - t.Errorf("scored_commits: got %+v, want Used=2", m) - } - if m, ok := snap.Metrics["ai_code_percentage"]; !ok || m.Used == nil { - t.Errorf("ai_code_percentage missing") - } else if *m.Used != 70.0 { - t.Errorf("ai_code_percentage: got %.1f, want 70.0 (avg of 80+60)", *m.Used) - } -} - -func TestCursorClientBucket(t *testing.T) { - tests := []struct { - source string - want string - }{ - {source: "composer", want: "ide"}, - {source: "tab", want: "ide"}, - {source: "human", want: "ide"}, - {source: "cli", want: "cli_agents"}, - {source: "terminal", want: "cli_agents"}, - {source: "background-agent", want: "cloud_agents"}, - {source: "cloud", want: "cloud_agents"}, - {source: "web_agent", want: "cloud_agents"}, - {source: "unknown-source", want: "other"}, - {source: "", want: "other"}, - } - - for _, tt := range tests { - if got := cursorClientBucket(tt.source); got != tt.want { - t.Errorf("cursorClientBucket(%q) = %q, want %q", tt.source, got, tt.want) - } - } -} - -type cursorTrackingRow struct { - Hash string - Source string - Model string - CreatedAt int64 -} - -func createCursorTrackingDBForTest(t *testing.T, rows []cursorTrackingRow) string { - t.Helper() - - dbPath := filepath.Join(t.TempDir(), "ai-code-tracking.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - t.Fatalf("open sqlite db: %v", err) - } - defer db.Close() - - _, err = db.Exec(` - CREATE TABLE ai_code_hashes ( - hash TEXT PRIMARY KEY, - source TEXT NOT NULL, - fileExtension TEXT, - fileName TEXT, - requestId TEXT, - conversationId TEXT, - timestamp INTEGER, - createdAt INTEGER NOT NULL, - model TEXT - )`) - if err != nil { - t.Fatalf("create ai_code_hashes table: %v", err) - } - - stmt, err := db.Prepare(` - INSERT INTO ai_code_hashes ( - hash, source, fileExtension, fileName, requestId, conversationId, timestamp, createdAt, model - ) VALUES (?, ?, '', '', '', '', ?, ?, ?)`) - if err != nil { - t.Fatalf("prepare insert: %v", err) - } - defer stmt.Close() - - for _, row := range rows { - ts := row.CreatedAt - if ts == 0 { - ts = time.Now().UnixMilli() - } - if _, err := stmt.Exec(row.Hash, row.Source, ts, ts, row.Model); err != nil { - t.Fatalf("insert row %q: %v", row.Hash, err) - } - } - - return dbPath -} - -func TestProvider_Fetch_PlanSpendGaugeUsesIncludedAmountWhenNoLimit(t *testing.T) { - // When the plan has no hard limit (pu.Limit=0) and no pooled team limit, - // plan_spend should use the plan's included amount as the gauge reference. - mux := http.NewServeMux() - mux.HandleFunc("/aiserver.v1.DashboardService/GetCurrentPeriodUsage", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(currentPeriodUsageResp{ - BillingCycleStart: "1768055295000", - BillingCycleEnd: "1770733695000", - PlanUsage: planUsage{ - TotalSpend: 36470, // $364.70 - IncludedSpend: 2000, - Limit: 0, // No hard limit - TotalPercentUsed: 0, - }, - DisplayMessage: "Usage-based billing", - }) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetPlanInfo", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(planInfoResp{ - PlanInfo: struct { - PlanName string `json:"planName"` - IncludedAmountCents float64 `json:"includedAmountCents"` - Price string `json:"price"` - BillingCycleEnd string `json:"billingCycleEnd"` - }{ - PlanName: "Pro", - IncludedAmountCents: 2000, // $20 included - Price: "$20/mo", - }, - }) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(aggregatedUsageResp{}) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetHardLimit", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(hardLimitResp{}) - }) - mux.HandleFunc("/auth/full_stripe_profile", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(stripeProfileResp{}) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetUsageLimitPolicyStatus", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(usageLimitPolicyResp{}) - }) - - server := httptest.NewServer(mux) - defer server.Close() - - prevBase := cursorAPIBase - cursorAPIBase = server.URL - defer func() { cursorAPIBase = prevBase }() - - p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "cursor-gauge-test", - Provider: "cursor", - Token: "test-token", - }) - if err != nil { - t.Fatalf("Fetch returned error: %v", err) - } - - m, ok := snap.Metrics["plan_spend"] - if !ok { - t.Fatal("plan_spend metric missing") - } - if m.Used == nil || *m.Used != 364.70 { - t.Fatalf("plan_spend.Used = %v, want 364.70", m.Used) - } - if m.Limit == nil || *m.Limit != 20.0 { - t.Fatalf("plan_spend.Limit = %v, want 20.0 (from IncludedAmountCents)", m.Limit) - } -} - -func TestProvider_Fetch_CachedBillingMetricsRestoreOnAPIFailure(t *testing.T) { - // First call: API available → caches billing metrics. - // Second call: API fails → billing metrics restored from cache. - var periodCalls int - mux := http.NewServeMux() - mux.HandleFunc("/aiserver.v1.DashboardService/GetCurrentPeriodUsage", func(w http.ResponseWriter, r *http.Request) { - periodCalls++ - if periodCalls > 1 { - http.Error(w, "service unavailable", http.StatusServiceUnavailable) - return - } - json.NewEncoder(w).Encode(currentPeriodUsageResp{ - BillingCycleStart: "1768055295000", - BillingCycleEnd: "1770733695000", - PlanUsage: planUsage{ - TotalSpend: 40700, - Limit: 0, - TotalPercentUsed: 85.0, - AutoPercentUsed: 60.0, - APIPercentUsed: 25.0, - }, - SpendLimitUsage: spendLimitUsage{ - PooledLimit: 360000, - PooledUsed: 40700, - PooledRemaining: 319300, - }, - }) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetPlanInfo", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(planInfoResp{ - PlanInfo: struct { - PlanName string `json:"planName"` - IncludedAmountCents float64 `json:"includedAmountCents"` - Price string `json:"price"` - BillingCycleEnd string `json:"billingCycleEnd"` - }{ - PlanName: "Business", - IncludedAmountCents: 50000, - }, - }) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(aggregatedUsageResp{ - Aggregations: []modelAggregation{ - {ModelIntent: "test-model", TotalCents: 100}, - }, - }) - }) - - server := httptest.NewServer(mux) - defer server.Close() - - prevBase := cursorAPIBase - cursorAPIBase = server.URL - defer func() { cursorAPIBase = prevBase }() - - // Create state DB with composer cost data. - stateDBPath := filepath.Join(t.TempDir(), "state.vscdb") - db, err := sql.Open("sqlite3", stateDBPath) - if err != nil { - t.Fatalf("open sqlite: %v", err) - } - db.Exec(`CREATE TABLE IF NOT EXISTS ItemTable (key TEXT PRIMARY KEY, value TEXT)`) - db.Exec(`CREATE TABLE IF NOT EXISTS cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`) - session := fmt.Sprintf(`{"usageData":{"test-model":{"costInCents":7500,"amount":15}},"unifiedMode":"agent","createdAt":%d}`, time.Now().Add(-1*time.Hour).UnixMilli()) - db.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES ('composerData:aaa', ?)`, session) - db.Close() - - p := New() - acct := core.AccountConfig{ - ID: "cursor-cache-billing", - Provider: "cursor", - Token: "test-token", - ExtraData: map[string]string{ - "state_db": stateDBPath, - }, - } - - // First fetch: API works, caches billing metrics. - snap1, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("first Fetch returned error: %v", err) - } - // Verify API-derived billing metrics exist. - if m, ok := snap1.Metrics["spend_limit"]; !ok || m.Limit == nil || *m.Limit != 3600.0 { - t.Fatalf("spend_limit after API call: got %+v, want Limit=3600", snap1.Metrics["spend_limit"]) - } - if m, ok := snap1.Metrics["plan_percent_used"]; !ok || m.Used == nil || *m.Used != 85.0 { - t.Fatalf("plan_percent_used after API call: got %+v, want Used=85", snap1.Metrics["plan_percent_used"]) - } - - // Second fetch: API fails → billing metrics should be restored from cache. - snap2, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("second Fetch returned error: %v", err) - } - - // spend_limit should be restored from cache. - if m, ok := snap2.Metrics["spend_limit"]; !ok { - t.Fatal("spend_limit missing after API failure (should be restored from cache)") - } else { - if m.Limit == nil || *m.Limit != 3600.0 { - t.Fatalf("spend_limit.Limit = %v, want 3600 (from cache)", m.Limit) - } - if m.Used == nil || *m.Used != 407.0 { - t.Fatalf("spend_limit.Used = %v, want 407 (from cache)", m.Used) - } - } - - // plan_percent_used should be restored from cache. - if m, ok := snap2.Metrics["plan_percent_used"]; !ok { - t.Fatal("plan_percent_used missing after API failure (should be restored from cache)") - } else { - if m.Used == nil || *m.Used != 85.0 { - t.Fatalf("plan_percent_used.Used = %v, want 85 (from cache)", m.Used) - } - } - - // plan_spend should be restored from cache. - if m, ok := snap2.Metrics["plan_spend"]; !ok { - t.Fatal("plan_spend missing after API failure (should be restored from cache)") - } else { - if m.Used == nil { - t.Fatal("plan_spend.Used is nil (should be restored from cache)") - } - } -} - -func TestProvider_Fetch_PartialAPIFailure_PeriodUsageDown(t *testing.T) { - // GetCurrentPeriodUsage fails, but GetAggregatedUsageEvents succeeds. - // After a first successful call caches billing metrics, the second call - // with GetCurrentPeriodUsage failing should still show billing gauges - // AND model aggregation data from the live API. - var periodCalls int - mux := http.NewServeMux() - mux.HandleFunc("/aiserver.v1.DashboardService/GetCurrentPeriodUsage", func(w http.ResponseWriter, r *http.Request) { - periodCalls++ - if periodCalls > 1 { - http.Error(w, "rate limited", http.StatusTooManyRequests) - return - } - json.NewEncoder(w).Encode(currentPeriodUsageResp{ - BillingCycleStart: "1768055295000", - BillingCycleEnd: "1770733695000", - PlanUsage: planUsage{ - TotalSpend: 40700, - TotalPercentUsed: 85.0, - }, - SpendLimitUsage: spendLimitUsage{ - PooledLimit: 360000, - PooledUsed: 40700, - PooledRemaining: 319300, - }, - }) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(aggregatedUsageResp{ - Aggregations: []modelAggregation{ - {ModelIntent: "claude-opus", TotalCents: 30000, InputTokens: "1000000"}, - }, - TotalCostCents: 30000, - }) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetPlanInfo", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(planInfoResp{}) - }) - - server := httptest.NewServer(mux) - defer server.Close() - - prevBase := cursorAPIBase - cursorAPIBase = server.URL - defer func() { cursorAPIBase = prevBase }() - - p := New() - acct := core.AccountConfig{ - ID: "cursor-partial", - Provider: "cursor", - Token: "test-token", - } - - // First fetch: everything works. - snap1, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("first Fetch: %v", err) - } - if _, ok := snap1.Metrics["spend_limit"]; !ok { - t.Fatal("spend_limit missing after successful API call") - } - - // Second fetch: GetCurrentPeriodUsage fails, but aggregation succeeds. - snap2, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("second Fetch: %v", err) - } - - // Model aggregation from live API should still work. - if _, ok := snap2.Metrics["billing_total_cost"]; !ok { - t.Fatal("billing_total_cost missing — aggregation endpoint should still work") - } - - // Billing gauge should be restored from cache. - if m, ok := snap2.Metrics["spend_limit"]; !ok { - t.Fatal("spend_limit missing — should be restored from billing cache") - } else if m.Limit == nil || *m.Limit != 3600.0 { - t.Fatalf("spend_limit.Limit = %v, want 3600 (from cached billing)", m.Limit) - } - - // plan_percent_used should also be restored. - if m, ok := snap2.Metrics["plan_percent_used"]; !ok { - t.Fatal("plan_percent_used missing — should be restored from billing cache") - } else if m.Used == nil || *m.Used != 85.0 { - t.Fatalf("plan_percent_used.Used = %v, want 85 (from cached billing)", m.Used) - } -} - -func TestProvider_Fetch_NoPeriodUsage_AggregationCreatesGauge(t *testing.T) { - // GetCurrentPeriodUsage always fails, no billing cache exists. - // GetAggregatedUsageEvents succeeds with cost data. - // GetPlanInfo returns IncludedAmountCents. - // Should create a plan_spend gauge from billing_total_cost + plan limit. - mux := http.NewServeMux() - mux.HandleFunc("/aiserver.v1.DashboardService/GetCurrentPeriodUsage", func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "unauthorized", http.StatusUnauthorized) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetAggregatedUsageEvents", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(aggregatedUsageResp{ - Aggregations: []modelAggregation{ - {ModelIntent: "claude-opus", TotalCents: 36470}, - }, - TotalCostCents: 36470, - }) - }) - mux.HandleFunc("/aiserver.v1.DashboardService/GetPlanInfo", func(w http.ResponseWriter, r *http.Request) { - json.NewEncoder(w).Encode(planInfoResp{ - PlanInfo: struct { - PlanName string `json:"planName"` - IncludedAmountCents float64 `json:"includedAmountCents"` - Price string `json:"price"` - BillingCycleEnd string `json:"billingCycleEnd"` - }{ - PlanName: "Pro", - IncludedAmountCents: 2000, - }, - }) - }) - - server := httptest.NewServer(mux) - defer server.Close() - - prevBase := cursorAPIBase - cursorAPIBase = server.URL - defer func() { cursorAPIBase = prevBase }() - - p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "cursor-no-period", - Provider: "cursor", - Token: "test-token", - }) - if err != nil { - t.Fatalf("Fetch: %v", err) - } - - // billing_total_cost should exist from aggregation. - if m, ok := snap.Metrics["billing_total_cost"]; !ok || m.Used == nil { - t.Fatal("billing_total_cost missing from aggregation") - } - - // plan_spend should be created from billing_total_cost + plan included amount. - m, ok := snap.Metrics["plan_spend"] - if !ok { - t.Fatal("plan_spend missing — should be built from billing_total_cost + plan limit") - } - if m.Used == nil || *m.Used != 364.70 { - t.Fatalf("plan_spend.Used = %v, want 364.70", m.Used) - } - if m.Limit == nil || *m.Limit != 20.0 { - t.Fatalf("plan_spend.Limit = %v, want 20.0 (from IncludedAmountCents)", m.Limit) - } -} - -// TestProvider_Fetch_LocalOnlyComposerCostCreatesCreditsTag verifies that -// when the API is completely unavailable (no token) but local composer -// sessions have cost data, ensureCreditGauges creates plan_total_spend_usd -// so the Credits tag renders in the TUI. -func TestProvider_Fetch_LocalOnlyComposerCostCreatesCreditsTag(t *testing.T) { - p := New() - - // Set up a state DB with composer sessions that have cost data. - stateDir := t.TempDir() - stateDBPath := filepath.Join(stateDir, "state.vscdb") - sdb, err := sql.Open("sqlite3", stateDBPath) - if err != nil { - t.Fatalf("open state db: %v", err) - } - _, err = sdb.Exec(`CREATE TABLE IF NOT EXISTS ItemTable (key TEXT PRIMARY KEY, value TEXT)`) - if err != nil { - t.Fatalf("create ItemTable: %v", err) - } - _, err = sdb.Exec(`CREATE TABLE IF NOT EXISTS cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`) - if err != nil { - t.Fatalf("create cursorDiskKV: %v", err) - } - - // Insert composer session with cost data. - usage := map[string]composerModelUsage{ - "claude-4-5-opus-high-thinking": {CostInCents: 15000, Amount: 20}, - } - usageJSON, _ := json.Marshal(usage) - createdAt := time.Now().Add(-1 * time.Hour).UnixMilli() - sessionVal := fmt.Sprintf(`{"usageData":%s,"unifiedMode":"agent","createdAt":%d,"totalLinesAdded":100,"totalLinesRemoved":10}`, string(usageJSON), createdAt) - _, err = sdb.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES (?, ?)`, "composerData:session-1", sessionVal) - if err != nil { - t.Fatalf("insert composer session: %v", err) - } - sdb.Close() - - // Fetch with no token — API is completely unavailable. - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "test-local-only", - ExtraData: map[string]string{ - "state_db": stateDBPath, - }, - }) - if err != nil { - t.Fatalf("Fetch: %v", err) - } - - // composer_cost should exist from local state DB. - cm, ok := snap.Metrics["composer_cost"] - if !ok || cm.Used == nil || *cm.Used <= 0 { - t.Fatalf("composer_cost missing or zero, got: %+v", cm) - } - - // plan_total_spend_usd should be synthesized by ensureCreditGauges. - ptsu, ok := snap.Metrics["plan_total_spend_usd"] - if !ok { - t.Fatal("plan_total_spend_usd missing — ensureCreditGauges should create it from composer_cost") - } - if ptsu.Used == nil || *ptsu.Used != *cm.Used { - t.Fatalf("plan_total_spend_usd.Used = %v, want %v (from composer_cost)", ptsu.Used, *cm.Used) - } - - // Message should indicate API unavailable. - if snap.Message == "" { - t.Error("expected a local-only message, got empty") - } -} - -// TestProvider_Fetch_LocalOnlyCachedLimitCreatesPlanSpendGauge verifies that -// when the API previously provided a plan limit (cached), and later becomes -// unavailable, ensureCreditGauges creates plan_spend with the cached limit -// so the gauge bar renders. -func TestProvider_Fetch_LocalOnlyCachedLimitCreatesPlanSpendGauge(t *testing.T) { - p := New() - - // Pre-populate the cache with an effective limit from a previous API call. - p.mu.Lock() - p.modelAggregationCache["test-cached"] = cachedModelAggregation{ - EffectiveLimitUSD: 500.0, - } - p.mu.Unlock() - - // Set up a state DB with composer sessions that have cost data. - stateDir := t.TempDir() - stateDBPath := filepath.Join(stateDir, "state.vscdb") - sdb, err := sql.Open("sqlite3", stateDBPath) - if err != nil { - t.Fatalf("open state db: %v", err) - } - _, err = sdb.Exec(`CREATE TABLE IF NOT EXISTS ItemTable (key TEXT PRIMARY KEY, value TEXT)`) - if err != nil { - t.Fatalf("create ItemTable: %v", err) - } - _, err = sdb.Exec(`CREATE TABLE IF NOT EXISTS cursorDiskKV (key TEXT PRIMARY KEY, value TEXT)`) - if err != nil { - t.Fatalf("create cursorDiskKV: %v", err) - } - - usage := map[string]composerModelUsage{ - "claude-4-5-opus": {CostInCents: 36470, Amount: 50}, - } - usageJSON, _ := json.Marshal(usage) - createdAt := time.Now().Add(-2 * time.Hour).UnixMilli() - sessionVal := fmt.Sprintf(`{"usageData":%s,"unifiedMode":"agent","createdAt":%d,"totalLinesAdded":200,"totalLinesRemoved":20}`, string(usageJSON), createdAt) - _, err = sdb.Exec(`INSERT INTO cursorDiskKV (key, value) VALUES (?, ?)`, "composerData:session-cached", sessionVal) - if err != nil { - t.Fatalf("insert composer session: %v", err) - } - sdb.Close() - - // Fetch with no token. - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "test-cached", - ExtraData: map[string]string{ - "state_db": stateDBPath, - }, - }) - if err != nil { - t.Fatalf("Fetch: %v", err) - } - - // plan_spend should be created with cached limit. - ps, ok := snap.Metrics["plan_spend"] - if !ok { - t.Fatal("plan_spend missing — ensureCreditGauges should create it from composer_cost + cached limit") - } - if ps.Used == nil || *ps.Used != 364.70 { - t.Fatalf("plan_spend.Used = %v, want 364.70", ps.Used) - } - if ps.Limit == nil || *ps.Limit != 500.0 { - t.Fatalf("plan_spend.Limit = %v, want 500.0 (from cached effective limit)", ps.Limit) - } -} diff --git a/internal/providers/cursor/fetch.go b/internal/providers/cursor/fetch.go index 9bc1034..418fc3a 100644 --- a/internal/providers/cursor/fetch.go +++ b/internal/providers/cursor/fetch.go @@ -68,9 +68,11 @@ func (p *Provider) Fetch(ctx context.Context, acct core.AccountConfig) (core.Usa } if acct.ExtraData["tracking_db"] == "" && trackingDBPath != "" { acct.ExtraData["tracking_db"] = trackingDBPath + acct.SetHint("tracking_db", trackingDBPath) } if acct.ExtraData["state_db"] == "" && stateDBPath != "" { acct.ExtraData["state_db"] = stateDBPath + acct.SetHint("state_db", stateDBPath) } var hasLocalData bool diff --git a/internal/providers/cursor/test_helpers_test.go b/internal/providers/cursor/test_helpers_test.go new file mode 100644 index 0000000..57baf34 --- /dev/null +++ b/internal/providers/cursor/test_helpers_test.go @@ -0,0 +1,21 @@ +package cursor + +import "github.com/janekbaraniewski/openusage/internal/core" + +func testCursorAccount(id, token string, extra map[string]string) core.AccountConfig { + acct := core.AccountConfig{ + ID: id, + Provider: "cursor", + Token: token, + } + if len(extra) == 0 { + return acct + } + acct.ExtraData = extra + for _, key := range []string{"tracking_db", "state_db"} { + if value := extra[key]; value != "" { + acct.SetHint(key, value) + } + } + return acct +} diff --git a/internal/providers/gemini_cli/gemini_cli_test.go b/internal/providers/gemini_cli/gemini_cli_test.go index 507b858..e5c16a2 100644 --- a/internal/providers/gemini_cli/gemini_cli_test.go +++ b/internal/providers/gemini_cli/gemini_cli_test.go @@ -62,11 +62,7 @@ func TestFetch_ReadsLocalData(t *testing.T) { os.WriteFile(filepath.Join(convDir, "session3.pb"), []byte("data"), 0644) p := New() - acct := core.AccountConfig{ - ID: "test-gemini-cli", - Provider: "gemini_cli", - ExtraData: map[string]string{"config_dir": tmpDir}, - } + acct := testGeminiCLIAccount("test-gemini-cli", tmpDir) snap, err := p.Fetch(context.Background(), acct) if err != nil { @@ -128,10 +124,7 @@ func TestFetch_ExpiredOAuth(t *testing.T) { writeJSON(t, filepath.Join(tmpDir, "oauth_creds.json"), creds) p := New() - acct := core.AccountConfig{ - ID: "test-expired", - ExtraData: map[string]string{"config_dir": tmpDir}, - } + acct := testGeminiCLIAccount("test-expired", tmpDir) snap, err := p.Fetch(context.Background(), acct) if err != nil { @@ -151,10 +144,7 @@ func TestFetch_NoData(t *testing.T) { tmpDir := t.TempDir() p := New() - acct := core.AccountConfig{ - ID: "test-empty", - ExtraData: map[string]string{"config_dir": tmpDir}, - } + acct := testGeminiCLIAccount("test-empty", tmpDir) snap, err := p.Fetch(context.Background(), acct) if err != nil { @@ -351,11 +341,7 @@ func TestFetch_SessionUsageBreakdowns(t *testing.T) { writeJSON(t, filepath.Join(chatDir, "session-2026-02-01T10-00-aaaa1111.json"), chat) p := New() - acct := core.AccountConfig{ - ID: "test-gemini-cli", - Provider: "gemini_cli", - ExtraData: map[string]string{"config_dir": tmpDir}, - } + acct := testGeminiCLIAccount("test-gemini-cli", tmpDir) snap, err := p.Fetch(context.Background(), acct) if err != nil { @@ -637,11 +623,7 @@ func TestFetch_QuotaLimitMessageFallback(t *testing.T) { }) p := New() - snap, err := p.Fetch(context.Background(), core.AccountConfig{ - ID: "test-gemini-cli", - Provider: "gemini_cli", - ExtraData: map[string]string{"config_dir": tmpDir}, - }) + snap, err := p.Fetch(context.Background(), testGeminiCLIAccount("test-gemini-cli", tmpDir)) if err != nil { t.Fatalf("Fetch() error: %v", err) } diff --git a/internal/providers/gemini_cli/session_usage.go b/internal/providers/gemini_cli/session_usage.go index 22f4e9d..dd92cb8 100644 --- a/internal/providers/gemini_cli/session_usage.go +++ b/internal/providers/gemini_cli/session_usage.go @@ -1,17 +1,10 @@ package gemini_cli import ( - "bytes" - "encoding/json" "fmt" - "os" - "path/filepath" - "sort" "strings" - "time" "github.com/janekbaraniewski/openusage/internal/core" - "github.com/janekbaraniewski/openusage/internal/providers/shared" "github.com/samber/lo" ) @@ -495,843 +488,3 @@ func (p *Provider) readSessionUsageBreakdowns(tmpDir string, snap *core.UsageSna return sessionCount, nil } - -func findGeminiSessionFiles(tmpDir string) ([]string, error) { - if strings.TrimSpace(tmpDir) == "" { - return nil, nil - } - if _, err := os.Stat(tmpDir); err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, fmt.Errorf("stat tmp dir: %w", err) - } - - type item struct { - path string - modTime time.Time - } - var files []item - - walkErr := filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { - if err != nil || info == nil || info.IsDir() { - return nil - } - name := info.Name() - if !strings.HasPrefix(name, "session-") || !strings.HasSuffix(name, ".json") { - return nil - } - files = append(files, item{path: path, modTime: info.ModTime()}) - return nil - }) - if walkErr != nil { - return nil, fmt.Errorf("walk gemini tmp dir: %w", walkErr) - } - if len(files) == 0 { - return nil, nil - } - - sort.Slice(files, func(i, j int) bool { - if files[i].modTime.Equal(files[j].modTime) { - return files[i].path > files[j].path - } - return files[i].modTime.After(files[j].modTime) - }) - - return lo.Map(files, func(f item, _ int) string { return f.path }), nil -} - -func readGeminiChatFile(path string) (*geminiChatFile, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var chat geminiChatFile - if err := json.NewDecoder(f).Decode(&chat); err != nil { - return nil, err - } - return &chat, nil -} - -func emitBreakdownMetrics(prefix string, totals map[string]tokenUsage, daily map[string]map[string]float64, snap *core.UsageSnapshot) { - entries := sortUsageEntries(totals) - if len(entries) == 0 { - return - } - - for i, entry := range entries { - if i >= maxBreakdownMetrics { - break - } - keyPrefix := prefix + "_" + sanitizeMetricName(entry.Name) - setUsageMetric(snap, keyPrefix+"_total_tokens", float64(entry.Data.TotalTokens)) - setUsageMetric(snap, keyPrefix+"_input_tokens", float64(entry.Data.InputTokens)) - setUsageMetric(snap, keyPrefix+"_output_tokens", float64(entry.Data.OutputTokens)) - - if entry.Data.CachedInputTokens > 0 { - setUsageMetric(snap, keyPrefix+"_cached_tokens", float64(entry.Data.CachedInputTokens)) - } - if entry.Data.ReasoningTokens > 0 { - setUsageMetric(snap, keyPrefix+"_reasoning_tokens", float64(entry.Data.ReasoningTokens)) - } - - if byDay, ok := daily[entry.Name]; ok { - seriesKey := "tokens_" + prefix + "_" + sanitizeMetricName(entry.Name) - snap.DailySeries[seriesKey] = core.SortedTimePoints(byDay) - } - - if prefix == "model" { - rec := core.ModelUsageRecord{ - RawModelID: entry.Name, - RawSource: "json", - Window: defaultUsageWindowLabel, - InputTokens: core.Float64Ptr(float64(entry.Data.InputTokens)), - OutputTokens: core.Float64Ptr(float64(entry.Data.OutputTokens)), - TotalTokens: core.Float64Ptr(float64(entry.Data.TotalTokens)), - } - if entry.Data.CachedInputTokens > 0 { - rec.CachedTokens = core.Float64Ptr(float64(entry.Data.CachedInputTokens)) - } - if entry.Data.ReasoningTokens > 0 { - rec.ReasoningTokens = core.Float64Ptr(float64(entry.Data.ReasoningTokens)) - } - snap.AppendModelUsage(rec) - } - } - - snap.Raw[prefix+"_usage"] = formatUsageSummary(entries, maxBreakdownRaw) -} - -func emitClientSessionMetrics(clientSessions map[string]int, snap *core.UsageSnapshot) { - type entry struct { - name string - count int - } - var all []entry - for name, count := range clientSessions { - if count > 0 { - all = append(all, entry{name: name, count: count}) - } - } - sort.Slice(all, func(i, j int) bool { - if all[i].count == all[j].count { - return all[i].name < all[j].name - } - return all[i].count > all[j].count - }) - - for i, item := range all { - if i >= maxBreakdownMetrics { - break - } - value := float64(item.count) - snap.Metrics["client_"+sanitizeMetricName(item.name)+"_sessions"] = core.Metric{ - Used: &value, - Unit: "sessions", - Window: defaultUsageWindowLabel, - } - } -} - -func emitModelRequestMetrics(modelRequests, modelSessions map[string]int, snap *core.UsageSnapshot) { - type entry struct { - name string - requests int - sessions int - } - - all := make([]entry, 0, len(modelRequests)) - for name, requests := range modelRequests { - if requests <= 0 { - continue - } - all = append(all, entry{name: name, requests: requests, sessions: modelSessions[name]}) - } - - sort.Slice(all, func(i, j int) bool { - if all[i].requests == all[j].requests { - return all[i].name < all[j].name - } - return all[i].requests > all[j].requests - }) - - for i, item := range all { - if i >= maxBreakdownMetrics { - break - } - keyPrefix := "model_" + sanitizeMetricName(item.name) - req := float64(item.requests) - sess := float64(item.sessions) - snap.Metrics[keyPrefix+"_requests"] = core.Metric{ - Used: &req, - Unit: "requests", - Window: defaultUsageWindowLabel, - } - if item.sessions > 0 { - snap.Metrics[keyPrefix+"_sessions"] = core.Metric{ - Used: &sess, - Unit: "sessions", - Window: defaultUsageWindowLabel, - } - } - } -} - -func emitToolMetrics(toolTotals map[string]int, snap *core.UsageSnapshot) { - type entry struct { - name string - count int - } - var all []entry - for name, count := range toolTotals { - if count > 0 { - all = append(all, entry{name: name, count: count}) - } - } - sort.Slice(all, func(i, j int) bool { - if all[i].count == all[j].count { - return all[i].name < all[j].name - } - return all[i].count > all[j].count - }) - - var parts []string - limit := maxBreakdownRaw - for i, item := range all { - if i < limit { - parts = append(parts, fmt.Sprintf("%s (%d)", item.name, item.count)) - } - - val := float64(item.count) - snap.Metrics["tool_"+sanitizeMetricName(item.name)] = core.Metric{ - Used: &val, - Unit: "calls", - Window: defaultUsageWindowLabel, - } - } - - if len(all) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(all)-limit)) - } - - if len(parts) > 0 { - snap.Raw["tool_usage"] = strings.Join(parts, ", ") - } -} - -func aggregateTokenTotals(modelTotals map[string]tokenUsage) tokenUsage { - var total tokenUsage - for _, usage := range modelTotals { - total.InputTokens += usage.InputTokens - total.CachedInputTokens += usage.CachedInputTokens - total.OutputTokens += usage.OutputTokens - total.ReasoningTokens += usage.ReasoningTokens - total.ToolTokens += usage.ToolTokens - total.TotalTokens += usage.TotalTokens - } - return total -} - -func setUsageMetric(snap *core.UsageSnapshot, key string, value float64) { - if value <= 0 { - return - } - snap.Metrics[key] = core.Metric{ - Used: &value, - Unit: "tokens", - Window: defaultUsageWindowLabel, - } -} - -func addUsage(target map[string]tokenUsage, name string, delta tokenUsage) { - current := target[name] - current.InputTokens += delta.InputTokens - current.CachedInputTokens += delta.CachedInputTokens - current.OutputTokens += delta.OutputTokens - current.ReasoningTokens += delta.ReasoningTokens - current.ToolTokens += delta.ToolTokens - current.TotalTokens += delta.TotalTokens - target[name] = current -} - -func addDailyUsage(target map[string]map[string]float64, name, day string, value float64) { - if day == "" || value <= 0 { - return - } - if target[name] == nil { - target[name] = make(map[string]float64) - } - target[name][day] += value -} - -func sortUsageEntries(values map[string]tokenUsage) []usageEntry { - out := make([]usageEntry, 0, len(values)) - for name, data := range values { - out = append(out, usageEntry{Name: name, Data: data}) - } - sort.Slice(out, func(i, j int) bool { - if out[i].Data.TotalTokens == out[j].Data.TotalTokens { - return out[i].Name < out[j].Name - } - return out[i].Data.TotalTokens > out[j].Data.TotalTokens - }) - return out -} - -func formatUsageSummary(entries []usageEntry, max int) string { - total := 0 - for _, entry := range entries { - total += entry.Data.TotalTokens - } - if total <= 0 { - return "" - } - - limit := max - if limit > len(entries) { - limit = len(entries) - } - - parts := make([]string, 0, limit+1) - for i := 0; i < limit; i++ { - entry := entries[i] - pct := float64(entry.Data.TotalTokens) / float64(total) * 100 - parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, shared.FormatTokenCount(entry.Data.TotalTokens), pct)) - } - if len(entries) > limit { - parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) - } - return strings.Join(parts, ", ") -} - -func formatNamedCountMap(m map[string]int, unit string) string { - if len(m) == 0 { - return "" - } - parts := make([]string, 0, len(m)) - for name, count := range m { - if count <= 0 { - continue - } - parts = append(parts, fmt.Sprintf("%s: %d %s", name, count, unit)) - } - sort.Strings(parts) - return strings.Join(parts, ", ") -} - -func isGeminiToolCallSuccessful(status string) bool { - status = strings.ToLower(strings.TrimSpace(status)) - return status == "" || status == "success" || status == "succeeded" || status == "ok" || status == "completed" -} - -func isGeminiMutatingTool(toolName string) bool { - toolName = strings.ToLower(strings.TrimSpace(toolName)) - if toolName == "" { - return false - } - return strings.Contains(toolName, "edit") || - strings.Contains(toolName, "write") || - strings.Contains(toolName, "create") || - strings.Contains(toolName, "delete") || - strings.Contains(toolName, "rename") || - strings.Contains(toolName, "move") || - strings.Contains(toolName, "replace") -} - -func extractGeminiToolCommand(raw json.RawMessage) string { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return "" - } - var command string - var walk func(v any) - walk = func(v any) { - if command != "" || v == nil { - return - } - switch value := v.(type) { - case map[string]any: - for key, child := range value { - k := strings.ToLower(strings.TrimSpace(key)) - if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { - if s, ok := child.(string); ok { - command = strings.TrimSpace(s) - return - } - } - } - for _, child := range value { - walk(child) - if command != "" { - return - } - } - case []any: - for _, child := range value { - walk(child) - if command != "" { - return - } - } - } - } - walk(payload) - return command -} - -func extractGeminiToolPaths(raw json.RawMessage) []string { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return nil - } - - pathHints := map[string]bool{ - "path": true, "paths": true, "file": true, "files": true, "filepath": true, "file_path": true, - "cwd": true, "dir": true, "directory": true, "target": true, "pattern": true, "glob": true, - "from": true, "to": true, "include": true, "exclude": true, - } - - candidates := make(map[string]bool) - var walk func(v any, hinted bool) - walk = func(v any, hinted bool) { - switch value := v.(type) { - case map[string]any: - for key, child := range value { - k := strings.ToLower(strings.TrimSpace(key)) - childHinted := hinted || pathHints[k] || strings.Contains(k, "path") || strings.Contains(k, "file") - walk(child, childHinted) - } - case []any: - for _, child := range value { - walk(child, hinted) - } - case string: - if !hinted { - return - } - for _, token := range extractGeminiPathTokens(value) { - candidates[token] = true - } - } - } - walk(payload, false) - - return core.SortedStringKeys(candidates) -} - -func extractGeminiPathTokens(raw string) []string { - raw = strings.TrimSpace(raw) - if raw == "" { - return nil - } - fields := strings.Fields(raw) - if len(fields) == 0 { - fields = []string{raw} - } - - var out []string - for _, field := range fields { - token := strings.Trim(field, "\"'`()[]{}<>,:;") - if token == "" { - continue - } - lower := strings.ToLower(token) - if strings.HasPrefix(lower, "http://") || strings.HasPrefix(lower, "https://") || strings.HasPrefix(lower, "file://") { - continue - } - if strings.HasPrefix(token, "-") { - continue - } - if !strings.Contains(token, "/") && !strings.Contains(token, "\\") && !strings.Contains(token, ".") { - continue - } - token = strings.TrimPrefix(token, "./") - if token == "" { - continue - } - out = append(out, token) - } - return lo.Uniq(out) -} - -func estimateGeminiToolLineDelta(raw json.RawMessage) (added int, removed int) { - var payload any - if json.Unmarshal(raw, &payload) != nil { - return 0, 0 - } - lineCount := func(text string) int { - text = strings.TrimSpace(text) - if text == "" { - return 0 - } - return strings.Count(text, "\n") + 1 - } - var walk func(v any) - walk = func(v any) { - switch value := v.(type) { - case map[string]any: - var oldText, newText string - for _, key := range []string{"old_string", "old_text", "from", "replace"} { - if rawValue, ok := value[key]; ok { - if s, ok := rawValue.(string); ok { - oldText = s - break - } - } - } - for _, key := range []string{"new_string", "new_text", "to", "with"} { - if rawValue, ok := value[key]; ok { - if s, ok := rawValue.(string); ok { - newText = s - break - } - } - } - if oldText != "" || newText != "" { - removed += lineCount(oldText) - added += lineCount(newText) - } - if rawValue, ok := value["content"]; ok { - if s, ok := rawValue.(string); ok { - added += lineCount(s) - } - } - for _, child := range value { - walk(child) - } - case []any: - for _, child := range value { - walk(child) - } - } - } - walk(payload) - return added, removed -} - -func extractGeminiToolDiffStat(raw json.RawMessage) (geminiDiffStat, bool) { - var empty geminiDiffStat - raw = bytes.TrimSpace(raw) - if len(raw) == 0 || bytes.Equal(raw, []byte("null")) { - return empty, false - } - - var root map[string]json.RawMessage - if json.Unmarshal(raw, &root) != nil { - return empty, false - } - diffRaw, ok := root["diffStat"] - if !ok { - return empty, false - } - - var stat geminiDiffStat - if json.Unmarshal(diffRaw, &stat) != nil { - return empty, false - } - - stat.ModelAddedLines = max(0, stat.ModelAddedLines) - stat.ModelRemovedLines = max(0, stat.ModelRemovedLines) - stat.ModelAddedChars = max(0, stat.ModelAddedChars) - stat.ModelRemovedChars = max(0, stat.ModelRemovedChars) - stat.UserAddedLines = max(0, stat.UserAddedLines) - stat.UserRemovedLines = max(0, stat.UserRemovedLines) - stat.UserAddedChars = max(0, stat.UserAddedChars) - stat.UserRemovedChars = max(0, stat.UserRemovedChars) - - if stat.ModelAddedLines == 0 && - stat.ModelRemovedLines == 0 && - stat.ModelAddedChars == 0 && - stat.ModelRemovedChars == 0 && - stat.UserAddedLines == 0 && - stat.UserRemovedLines == 0 && - stat.UserAddedChars == 0 && - stat.UserRemovedChars == 0 { - return empty, false - } - - return stat, true -} - -func inferGeminiLanguageFromPath(path string) string { - p := strings.ToLower(strings.TrimSpace(path)) - if p == "" { - return "" - } - base := strings.ToLower(filepath.Base(p)) - switch base { - case "dockerfile": - return "docker" - case "makefile": - return "make" - } - switch strings.ToLower(filepath.Ext(p)) { - case ".go": - return "go" - case ".py": - return "python" - case ".ts", ".tsx": - return "typescript" - case ".js", ".jsx": - return "javascript" - case ".tf", ".tfvars", ".hcl": - return "terraform" - case ".sh", ".bash", ".zsh", ".fish": - return "shell" - case ".md", ".mdx": - return "markdown" - case ".json": - return "json" - case ".yml", ".yaml": - return "yaml" - case ".sql": - return "sql" - case ".rs": - return "rust" - case ".java": - return "java" - case ".c", ".h": - return "c" - case ".cc", ".cpp", ".cxx", ".hpp": - return "cpp" - case ".rb": - return "ruby" - case ".php": - return "php" - case ".swift": - return "swift" - case ".vue": - return "vue" - case ".svelte": - return "svelte" - case ".toml": - return "toml" - case ".xml": - return "xml" - } - return "" -} - -func usageDelta(current, previous tokenUsage) tokenUsage { - return tokenUsage{ - InputTokens: current.InputTokens - previous.InputTokens, - CachedInputTokens: current.CachedInputTokens - previous.CachedInputTokens, - OutputTokens: current.OutputTokens - previous.OutputTokens, - ReasoningTokens: current.ReasoningTokens - previous.ReasoningTokens, - ToolTokens: current.ToolTokens - previous.ToolTokens, - TotalTokens: current.TotalTokens - previous.TotalTokens, - } -} - -func validUsageDelta(delta tokenUsage) bool { - return delta.InputTokens >= 0 && - delta.CachedInputTokens >= 0 && - delta.OutputTokens >= 0 && - delta.ReasoningTokens >= 0 && - delta.ToolTokens >= 0 && - delta.TotalTokens >= 0 -} - -func normalizeModelName(name string) string { - name = strings.TrimSpace(name) - if name == "" { - return "unknown" - } - return name -} - -func normalizeClientName(name string) string { - name = strings.TrimSpace(name) - if name == "" { - return "Other" - } - return name -} - -func sanitizeMetricName(name string) string { - name = strings.ToLower(strings.TrimSpace(name)) - if name == "" { - return "unknown" - } - - var b strings.Builder - lastUnderscore := false - for _, r := range name { - switch { - case r >= 'a' && r <= 'z': - b.WriteRune(r) - lastUnderscore = false - case r >= '0' && r <= '9': - b.WriteRune(r) - lastUnderscore = false - default: - if !lastUnderscore { - b.WriteByte('_') - lastUnderscore = true - } - } - } - - out := strings.Trim(b.String(), "_") - if out == "" { - return "unknown" - } - return out -} - -func getModelContextLimit(model string) int { - model = strings.ToLower(model) - switch { - case strings.Contains(model, "1.5-pro"), strings.Contains(model, "1.5-flash-8b"): - return 2_000_000 - case strings.Contains(model, "1.5-flash"): - return 1_000_000 - case strings.Contains(model, "2.0-flash"): - return 1_000_000 - case strings.Contains(model, "gemini-3"), strings.Contains(model, "gemini-exp"): - return 2_000_000 - case strings.Contains(model, "pro"): - return 32_000 - case strings.Contains(model, "flash"): - return 32_000 - } - return 0 -} - -func dayFromTimestamp(timestamp string) string { - if timestamp == "" { - return "" - } - for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} { - if parsed, err := time.Parse(layout, timestamp); err == nil { - return parsed.Format("2006-01-02") - } - } - if len(timestamp) >= 10 { - candidate := timestamp[:10] - if _, err := time.Parse("2006-01-02", candidate); err == nil { - return candidate - } - } - return "" -} - -func dayFromSession(startTime, lastUpdated string) string { - if day := dayFromTimestamp(lastUpdated); day != "" { - return day - } - return dayFromTimestamp(startTime) -} - -func storeSeries(snap *core.UsageSnapshot, key string, values map[string]float64) { - if len(values) == 0 { - return - } - snap.DailySeries[key] = core.SortedTimePoints(values) -} - -func latestSeriesValue(values map[string]float64) (string, float64) { - if len(values) == 0 { - return "", 0 - } - dates := core.SortedStringKeys(values) - last := dates[len(dates)-1] - return last, values[last] -} - -func sumLastNDays(values map[string]float64, days int) float64 { - if len(values) == 0 || days <= 0 { - return 0 - } - lastDate, _ := latestSeriesValue(values) - if lastDate == "" { - return 0 - } - end, err := time.Parse("2006-01-02", lastDate) - if err != nil { - return 0 - } - start := end.AddDate(0, 0, -(days - 1)) - - total := 0.0 - for date, value := range values { - t, err := time.Parse("2006-01-02", date) - if err != nil { - continue - } - if !t.Before(start) && !t.After(end) { - total += value - } - } - return total -} - -func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { - if value <= 0 { - return - } - v := value - snap.Metrics[key] = core.Metric{ - Used: &v, - Unit: unit, - Window: window, - } -} - -func setPercentMetric(snap *core.UsageSnapshot, key string, value float64, window string) { - if value < 0 { - return - } - if value > 100 { - value = 100 - } - v := value - limit := 100.0 - remaining := 100 - value - snap.Metrics[key] = core.Metric{ - Used: &v, - Limit: &limit, - Remaining: &remaining, - Unit: "%", - Window: window, - } -} - -func isQuotaLimitMessage(content json.RawMessage) bool { - text := strings.ToLower(parseMessageContentText(content)) - if text == "" { - return false - } - return strings.Contains(text, "usage limit reached") || - strings.Contains(text, "all pro models") || - strings.Contains(text, "/stats for usage details") -} - -func parseMessageContentText(content json.RawMessage) string { - content = bytes.TrimSpace(content) - if len(content) == 0 { - return "" - } - - var asString string - if content[0] == '"' && json.Unmarshal(content, &asString) == nil { - return asString - } - - var asArray []map[string]any - if content[0] == '[' && json.Unmarshal(content, &asArray) == nil { - var parts []string - for _, item := range asArray { - if text, ok := item["text"].(string); ok && strings.TrimSpace(text) != "" { - parts = append(parts, text) - } - } - if len(parts) > 0 { - return strings.Join(parts, " ") - } - } - - return string(content) -} diff --git a/internal/providers/gemini_cli/session_usage_helpers.go b/internal/providers/gemini_cli/session_usage_helpers.go new file mode 100644 index 0000000..7d175b4 --- /dev/null +++ b/internal/providers/gemini_cli/session_usage_helpers.go @@ -0,0 +1,470 @@ +package gemini_cli + +import ( + "bytes" + "encoding/json" + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/samber/lo" +) + +func formatNamedCountMap(m map[string]int, unit string) string { + if len(m) == 0 { + return "" + } + parts := make([]string, 0, len(m)) + for name, count := range m { + if count <= 0 { + continue + } + parts = append(parts, fmt.Sprintf("%s: %d %s", name, count, unit)) + } + sort.Strings(parts) + return strings.Join(parts, ", ") +} + +func isGeminiToolCallSuccessful(status string) bool { + status = strings.ToLower(strings.TrimSpace(status)) + return status == "" || status == "success" || status == "succeeded" || status == "ok" || status == "completed" +} + +func isGeminiMutatingTool(toolName string) bool { + toolName = strings.ToLower(strings.TrimSpace(toolName)) + if toolName == "" { + return false + } + return strings.Contains(toolName, "edit") || + strings.Contains(toolName, "write") || + strings.Contains(toolName, "create") || + strings.Contains(toolName, "delete") || + strings.Contains(toolName, "rename") || + strings.Contains(toolName, "move") || + strings.Contains(toolName, "replace") +} + +func extractGeminiToolCommand(raw json.RawMessage) string { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return "" + } + var command string + var walk func(v any) + walk = func(v any) { + if command != "" || v == nil { + return + } + switch value := v.(type) { + case map[string]any: + for key, child := range value { + k := strings.ToLower(strings.TrimSpace(key)) + if k == "command" || k == "cmd" || k == "script" || k == "shell_command" { + if s, ok := child.(string); ok { + command = strings.TrimSpace(s) + return + } + } + } + for _, child := range value { + walk(child) + if command != "" { + return + } + } + case []any: + for _, child := range value { + walk(child) + if command != "" { + return + } + } + } + } + walk(payload) + return command +} + +func extractGeminiToolPaths(raw json.RawMessage) []string { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return nil + } + + pathHints := map[string]bool{ + "path": true, "paths": true, "file": true, "files": true, "filepath": true, "file_path": true, + "cwd": true, "dir": true, "directory": true, "target": true, "pattern": true, "glob": true, + "from": true, "to": true, "include": true, "exclude": true, + } + + candidates := make(map[string]bool) + var walk func(v any, hinted bool) + walk = func(v any, hinted bool) { + switch value := v.(type) { + case map[string]any: + for key, child := range value { + k := strings.ToLower(strings.TrimSpace(key)) + childHinted := hinted || pathHints[k] || strings.Contains(k, "path") || strings.Contains(k, "file") + walk(child, childHinted) + } + case []any: + for _, child := range value { + walk(child, hinted) + } + case string: + if !hinted { + return + } + for _, token := range extractGeminiPathTokens(value) { + candidates[token] = true + } + } + } + walk(payload, false) + + return core.SortedStringKeys(candidates) +} + +func extractGeminiPathTokens(raw string) []string { + raw = strings.TrimSpace(raw) + if raw == "" { + return nil + } + fields := strings.Fields(raw) + if len(fields) == 0 { + fields = []string{raw} + } + + var out []string + for _, field := range fields { + token := strings.Trim(field, "\"'`()[]{}<>,:;") + if token == "" { + continue + } + lower := strings.ToLower(token) + if strings.HasPrefix(lower, "http://") || strings.HasPrefix(lower, "https://") || strings.HasPrefix(lower, "file://") { + continue + } + if strings.HasPrefix(token, "-") { + continue + } + if !strings.Contains(token, "/") && !strings.Contains(token, "\\") && !strings.Contains(token, ".") { + continue + } + token = strings.TrimPrefix(token, "./") + if token == "" { + continue + } + out = append(out, token) + } + return lo.Uniq(out) +} + +func estimateGeminiToolLineDelta(raw json.RawMessage) (added int, removed int) { + var payload any + if json.Unmarshal(raw, &payload) != nil { + return 0, 0 + } + lineCount := func(text string) int { + text = strings.TrimSpace(text) + if text == "" { + return 0 + } + return strings.Count(text, "\n") + 1 + } + var walk func(v any) + walk = func(v any) { + switch value := v.(type) { + case map[string]any: + var oldText, newText string + for _, key := range []string{"old_string", "old_text", "from", "replace"} { + if rawValue, ok := value[key]; ok { + if s, ok := rawValue.(string); ok { + oldText = s + break + } + } + } + for _, key := range []string{"new_string", "new_text", "to", "with"} { + if rawValue, ok := value[key]; ok { + if s, ok := rawValue.(string); ok { + newText = s + break + } + } + } + if oldText != "" || newText != "" { + removed += lineCount(oldText) + added += lineCount(newText) + } + if rawValue, ok := value["content"]; ok { + if s, ok := rawValue.(string); ok { + added += lineCount(s) + } + } + for _, child := range value { + walk(child) + } + case []any: + for _, child := range value { + walk(child) + } + } + } + walk(payload) + return added, removed +} + +func extractGeminiToolDiffStat(raw json.RawMessage) (geminiDiffStat, bool) { + var empty geminiDiffStat + raw = bytes.TrimSpace(raw) + if len(raw) == 0 || bytes.Equal(raw, []byte("null")) { + return empty, false + } + + var root map[string]json.RawMessage + if json.Unmarshal(raw, &root) != nil { + return empty, false + } + diffRaw, ok := root["diffStat"] + if !ok { + return empty, false + } + + var stat geminiDiffStat + if json.Unmarshal(diffRaw, &stat) != nil { + return empty, false + } + + stat.ModelAddedLines = max(0, stat.ModelAddedLines) + stat.ModelRemovedLines = max(0, stat.ModelRemovedLines) + stat.ModelAddedChars = max(0, stat.ModelAddedChars) + stat.ModelRemovedChars = max(0, stat.ModelRemovedChars) + stat.UserAddedLines = max(0, stat.UserAddedLines) + stat.UserRemovedLines = max(0, stat.UserRemovedLines) + stat.UserAddedChars = max(0, stat.UserAddedChars) + stat.UserRemovedChars = max(0, stat.UserRemovedChars) + + if stat.ModelAddedLines == 0 && + stat.ModelRemovedLines == 0 && + stat.ModelAddedChars == 0 && + stat.ModelRemovedChars == 0 && + stat.UserAddedLines == 0 && + stat.UserRemovedLines == 0 && + stat.UserAddedChars == 0 && + stat.UserRemovedChars == 0 { + return empty, false + } + + return stat, true +} + +func inferGeminiLanguageFromPath(path string) string { + p := strings.ToLower(strings.TrimSpace(path)) + if p == "" { + return "" + } + base := strings.ToLower(filepath.Base(p)) + switch base { + case "dockerfile": + return "docker" + case "makefile": + return "make" + } + switch strings.ToLower(filepath.Ext(p)) { + case ".go": + return "go" + case ".py": + return "python" + case ".ts", ".tsx": + return "typescript" + case ".js", ".jsx": + return "javascript" + case ".tf", ".tfvars", ".hcl": + return "terraform" + case ".sh", ".bash", ".zsh", ".fish": + return "shell" + case ".md", ".mdx": + return "markdown" + case ".json": + return "json" + case ".yml", ".yaml": + return "yaml" + case ".sql": + return "sql" + case ".rs": + return "rust" + case ".java": + return "java" + case ".c", ".h": + return "c" + case ".cc", ".cpp", ".cxx", ".hpp": + return "cpp" + case ".rb": + return "ruby" + case ".php": + return "php" + case ".swift": + return "swift" + case ".vue": + return "vue" + case ".svelte": + return "svelte" + case ".toml": + return "toml" + case ".xml": + return "xml" + } + return "" +} + +func usageDelta(current, previous tokenUsage) tokenUsage { + return tokenUsage{ + InputTokens: current.InputTokens - previous.InputTokens, + CachedInputTokens: current.CachedInputTokens - previous.CachedInputTokens, + OutputTokens: current.OutputTokens - previous.OutputTokens, + ReasoningTokens: current.ReasoningTokens - previous.ReasoningTokens, + ToolTokens: current.ToolTokens - previous.ToolTokens, + TotalTokens: current.TotalTokens - previous.TotalTokens, + } +} + +func validUsageDelta(delta tokenUsage) bool { + return delta.InputTokens >= 0 && + delta.CachedInputTokens >= 0 && + delta.OutputTokens >= 0 && + delta.ReasoningTokens >= 0 && + delta.ToolTokens >= 0 && + delta.TotalTokens >= 0 +} + +func normalizeModelName(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "unknown" + } + return name +} + +func normalizeClientName(name string) string { + name = strings.TrimSpace(name) + if name == "" { + return "Other" + } + return name +} + +func sanitizeMetricName(name string) string { + name = strings.ToLower(strings.TrimSpace(name)) + if name == "" { + return "unknown" + } + + var b strings.Builder + lastUnderscore := false + for _, r := range name { + switch { + case r >= 'a' && r <= 'z': + b.WriteRune(r) + lastUnderscore = false + case r >= '0' && r <= '9': + b.WriteRune(r) + lastUnderscore = false + default: + if !lastUnderscore { + b.WriteByte('_') + lastUnderscore = true + } + } + } + + out := strings.Trim(b.String(), "_") + if out == "" { + return "unknown" + } + return out +} + +func getModelContextLimit(model string) int { + model = strings.ToLower(model) + switch { + case strings.Contains(model, "1.5-pro"), strings.Contains(model, "1.5-flash-8b"): + return 2_000_000 + case strings.Contains(model, "1.5-flash"): + return 1_000_000 + case strings.Contains(model, "2.0-flash"): + return 1_000_000 + case strings.Contains(model, "gemini-3"), strings.Contains(model, "gemini-exp"): + return 2_000_000 + case strings.Contains(model, "pro"): + return 32_000 + case strings.Contains(model, "flash"): + return 32_000 + } + return 0 +} + +func dayFromTimestamp(timestamp string) string { + if timestamp == "" { + return "" + } + for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} { + if parsed, err := time.Parse(layout, timestamp); err == nil { + return parsed.Format("2006-01-02") + } + } + if len(timestamp) >= 10 { + candidate := timestamp[:10] + if _, err := time.Parse("2006-01-02", candidate); err == nil { + return candidate + } + } + return "" +} + +func dayFromSession(startTime, lastUpdated string) string { + if day := dayFromTimestamp(lastUpdated); day != "" { + return day + } + return dayFromTimestamp(startTime) +} + +func isQuotaLimitMessage(content json.RawMessage) bool { + text := strings.ToLower(parseMessageContentText(content)) + if text == "" { + return false + } + return strings.Contains(text, "usage limit reached") || + strings.Contains(text, "all pro models") || + strings.Contains(text, "/stats for usage details") +} + +func parseMessageContentText(content json.RawMessage) string { + content = bytes.TrimSpace(content) + if len(content) == 0 { + return "" + } + + var asString string + if content[0] == '"' && json.Unmarshal(content, &asString) == nil { + return asString + } + + var asArray []map[string]any + if content[0] == '[' && json.Unmarshal(content, &asArray) == nil { + var parts []string + for _, item := range asArray { + if text, ok := item["text"].(string); ok && strings.TrimSpace(text) != "" { + parts = append(parts, text) + } + } + if len(parts) > 0 { + return strings.Join(parts, " ") + } + } + + return string(content) +} diff --git a/internal/providers/gemini_cli/session_usage_io.go b/internal/providers/gemini_cli/session_usage_io.go new file mode 100644 index 0000000..505b6e6 --- /dev/null +++ b/internal/providers/gemini_cli/session_usage_io.go @@ -0,0 +1,72 @@ +package gemini_cli + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/samber/lo" +) + +func findGeminiSessionFiles(tmpDir string) ([]string, error) { + if strings.TrimSpace(tmpDir) == "" { + return nil, nil + } + if _, err := os.Stat(tmpDir); err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, fmt.Errorf("stat tmp dir: %w", err) + } + + type item struct { + path string + modTime time.Time + } + var files []item + + walkErr := filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { + if err != nil || info == nil || info.IsDir() { + return nil + } + name := info.Name() + if !strings.HasPrefix(name, "session-") || !strings.HasSuffix(name, ".json") { + return nil + } + files = append(files, item{path: path, modTime: info.ModTime()}) + return nil + }) + if walkErr != nil { + return nil, fmt.Errorf("walk gemini tmp dir: %w", walkErr) + } + if len(files) == 0 { + return nil, nil + } + + sort.Slice(files, func(i, j int) bool { + if files[i].modTime.Equal(files[j].modTime) { + return files[i].path > files[j].path + } + return files[i].modTime.After(files[j].modTime) + }) + + return lo.Map(files, func(f item, _ int) string { return f.path }), nil +} + +func readGeminiChatFile(path string) (*geminiChatFile, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var chat geminiChatFile + if err := json.NewDecoder(f).Decode(&chat); err != nil { + return nil, err + } + return &chat, nil +} diff --git a/internal/providers/gemini_cli/session_usage_metrics.go b/internal/providers/gemini_cli/session_usage_metrics.go new file mode 100644 index 0000000..d680059 --- /dev/null +++ b/internal/providers/gemini_cli/session_usage_metrics.go @@ -0,0 +1,336 @@ +package gemini_cli + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" + "github.com/janekbaraniewski/openusage/internal/providers/shared" +) + +func emitBreakdownMetrics(prefix string, totals map[string]tokenUsage, daily map[string]map[string]float64, snap *core.UsageSnapshot) { + entries := sortUsageEntries(totals) + if len(entries) == 0 { + return + } + + for i, entry := range entries { + if i >= maxBreakdownMetrics { + break + } + keyPrefix := prefix + "_" + sanitizeMetricName(entry.Name) + setUsageMetric(snap, keyPrefix+"_total_tokens", float64(entry.Data.TotalTokens)) + setUsageMetric(snap, keyPrefix+"_input_tokens", float64(entry.Data.InputTokens)) + setUsageMetric(snap, keyPrefix+"_output_tokens", float64(entry.Data.OutputTokens)) + + if entry.Data.CachedInputTokens > 0 { + setUsageMetric(snap, keyPrefix+"_cached_tokens", float64(entry.Data.CachedInputTokens)) + } + if entry.Data.ReasoningTokens > 0 { + setUsageMetric(snap, keyPrefix+"_reasoning_tokens", float64(entry.Data.ReasoningTokens)) + } + + if byDay, ok := daily[entry.Name]; ok { + seriesKey := "tokens_" + prefix + "_" + sanitizeMetricName(entry.Name) + snap.DailySeries[seriesKey] = core.SortedTimePoints(byDay) + } + + if prefix == "model" { + rec := core.ModelUsageRecord{ + RawModelID: entry.Name, + RawSource: "json", + Window: defaultUsageWindowLabel, + InputTokens: core.Float64Ptr(float64(entry.Data.InputTokens)), + OutputTokens: core.Float64Ptr(float64(entry.Data.OutputTokens)), + TotalTokens: core.Float64Ptr(float64(entry.Data.TotalTokens)), + } + if entry.Data.CachedInputTokens > 0 { + rec.CachedTokens = core.Float64Ptr(float64(entry.Data.CachedInputTokens)) + } + if entry.Data.ReasoningTokens > 0 { + rec.ReasoningTokens = core.Float64Ptr(float64(entry.Data.ReasoningTokens)) + } + snap.AppendModelUsage(rec) + } + } + + snap.Raw[prefix+"_usage"] = formatUsageSummary(entries, maxBreakdownRaw) +} + +func emitClientSessionMetrics(clientSessions map[string]int, snap *core.UsageSnapshot) { + type entry struct { + name string + count int + } + var all []entry + for name, count := range clientSessions { + if count > 0 { + all = append(all, entry{name: name, count: count}) + } + } + sort.Slice(all, func(i, j int) bool { + if all[i].count == all[j].count { + return all[i].name < all[j].name + } + return all[i].count > all[j].count + }) + + for i, item := range all { + if i >= maxBreakdownMetrics { + break + } + value := float64(item.count) + snap.Metrics["client_"+sanitizeMetricName(item.name)+"_sessions"] = core.Metric{ + Used: &value, + Unit: "sessions", + Window: defaultUsageWindowLabel, + } + } +} + +func emitModelRequestMetrics(modelRequests, modelSessions map[string]int, snap *core.UsageSnapshot) { + type entry struct { + name string + requests int + sessions int + } + + all := make([]entry, 0, len(modelRequests)) + for name, requests := range modelRequests { + if requests <= 0 { + continue + } + all = append(all, entry{name: name, requests: requests, sessions: modelSessions[name]}) + } + + sort.Slice(all, func(i, j int) bool { + if all[i].requests == all[j].requests { + return all[i].name < all[j].name + } + return all[i].requests > all[j].requests + }) + + for i, item := range all { + if i >= maxBreakdownMetrics { + break + } + keyPrefix := "model_" + sanitizeMetricName(item.name) + req := float64(item.requests) + sess := float64(item.sessions) + snap.Metrics[keyPrefix+"_requests"] = core.Metric{ + Used: &req, + Unit: "requests", + Window: defaultUsageWindowLabel, + } + if item.sessions > 0 { + snap.Metrics[keyPrefix+"_sessions"] = core.Metric{ + Used: &sess, + Unit: "sessions", + Window: defaultUsageWindowLabel, + } + } + } +} + +func emitToolMetrics(toolTotals map[string]int, snap *core.UsageSnapshot) { + type entry struct { + name string + count int + } + var all []entry + for name, count := range toolTotals { + if count > 0 { + all = append(all, entry{name: name, count: count}) + } + } + sort.Slice(all, func(i, j int) bool { + if all[i].count == all[j].count { + return all[i].name < all[j].name + } + return all[i].count > all[j].count + }) + + var parts []string + limit := maxBreakdownRaw + for i, item := range all { + if i < limit { + parts = append(parts, fmt.Sprintf("%s (%d)", item.name, item.count)) + } + + val := float64(item.count) + snap.Metrics["tool_"+sanitizeMetricName(item.name)] = core.Metric{ + Used: &val, + Unit: "calls", + Window: defaultUsageWindowLabel, + } + } + + if len(all) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(all)-limit)) + } + + if len(parts) > 0 { + snap.Raw["tool_usage"] = strings.Join(parts, ", ") + } +} + +func aggregateTokenTotals(modelTotals map[string]tokenUsage) tokenUsage { + var total tokenUsage + for _, usage := range modelTotals { + total.InputTokens += usage.InputTokens + total.CachedInputTokens += usage.CachedInputTokens + total.OutputTokens += usage.OutputTokens + total.ReasoningTokens += usage.ReasoningTokens + total.ToolTokens += usage.ToolTokens + total.TotalTokens += usage.TotalTokens + } + return total +} + +func setUsageMetric(snap *core.UsageSnapshot, key string, value float64) { + if value <= 0 { + return + } + snap.Metrics[key] = core.Metric{ + Used: &value, + Unit: "tokens", + Window: defaultUsageWindowLabel, + } +} + +func addUsage(target map[string]tokenUsage, name string, delta tokenUsage) { + current := target[name] + current.InputTokens += delta.InputTokens + current.CachedInputTokens += delta.CachedInputTokens + current.OutputTokens += delta.OutputTokens + current.ReasoningTokens += delta.ReasoningTokens + current.ToolTokens += delta.ToolTokens + current.TotalTokens += delta.TotalTokens + target[name] = current +} + +func addDailyUsage(target map[string]map[string]float64, name, day string, value float64) { + if day == "" || value <= 0 { + return + } + if target[name] == nil { + target[name] = make(map[string]float64) + } + target[name][day] += value +} + +func sortUsageEntries(values map[string]tokenUsage) []usageEntry { + out := make([]usageEntry, 0, len(values)) + for name, data := range values { + out = append(out, usageEntry{Name: name, Data: data}) + } + sort.Slice(out, func(i, j int) bool { + if out[i].Data.TotalTokens == out[j].Data.TotalTokens { + return out[i].Name < out[j].Name + } + return out[i].Data.TotalTokens > out[j].Data.TotalTokens + }) + return out +} + +func formatUsageSummary(entries []usageEntry, max int) string { + total := 0 + for _, entry := range entries { + total += entry.Data.TotalTokens + } + if total <= 0 { + return "" + } + + limit := max + if limit > len(entries) { + limit = len(entries) + } + + parts := make([]string, 0, limit+1) + for i := 0; i < limit; i++ { + entry := entries[i] + pct := float64(entry.Data.TotalTokens) / float64(total) * 100 + parts = append(parts, fmt.Sprintf("%s %s (%.0f%%)", entry.Name, shared.FormatTokenCount(entry.Data.TotalTokens), pct)) + } + if len(entries) > limit { + parts = append(parts, fmt.Sprintf("+%d more", len(entries)-limit)) + } + return strings.Join(parts, ", ") +} + +func storeSeries(snap *core.UsageSnapshot, key string, values map[string]float64) { + if len(values) == 0 { + return + } + snap.DailySeries[key] = core.SortedTimePoints(values) +} + +func latestSeriesValue(values map[string]float64) (string, float64) { + if len(values) == 0 { + return "", 0 + } + dates := core.SortedStringKeys(values) + last := dates[len(dates)-1] + return last, values[last] +} + +func sumLastNDays(values map[string]float64, days int) float64 { + if len(values) == 0 || days <= 0 { + return 0 + } + lastDate, _ := latestSeriesValue(values) + if lastDate == "" { + return 0 + } + end, err := time.Parse("2006-01-02", lastDate) + if err != nil { + return 0 + } + start := end.AddDate(0, 0, -(days - 1)) + + total := 0.0 + for date, value := range values { + t, err := time.Parse("2006-01-02", date) + if err != nil { + continue + } + if !t.Before(start) && !t.After(end) { + total += value + } + } + return total +} + +func setUsedMetric(snap *core.UsageSnapshot, key string, value float64, unit, window string) { + if value <= 0 { + return + } + v := value + snap.Metrics[key] = core.Metric{ + Used: &v, + Unit: unit, + Window: window, + } +} + +func setPercentMetric(snap *core.UsageSnapshot, key string, value float64, window string) { + if value < 0 { + return + } + if value > 100 { + value = 100 + } + v := value + limit := 100.0 + remaining := 100 - value + snap.Metrics[key] = core.Metric{ + Used: &v, + Limit: &limit, + Remaining: &remaining, + Unit: "%", + Window: window, + } +} diff --git a/internal/providers/gemini_cli/test_helpers_test.go b/internal/providers/gemini_cli/test_helpers_test.go new file mode 100644 index 0000000..2081a0d --- /dev/null +++ b/internal/providers/gemini_cli/test_helpers_test.go @@ -0,0 +1,13 @@ +package gemini_cli + +import "github.com/janekbaraniewski/openusage/internal/core" + +func testGeminiCLIAccount(id, configDir string) core.AccountConfig { + acct := core.AccountConfig{ + ID: id, + Provider: "gemini_cli", + ExtraData: map[string]string{"config_dir": configDir}, + } + acct.SetHint("config_dir", configDir) + return acct +} diff --git a/internal/providers/ollama/ollama_details_test.go b/internal/providers/ollama/ollama_details_test.go new file mode 100644 index 0000000..593c668 --- /dev/null +++ b/internal/providers/ollama/ollama_details_test.go @@ -0,0 +1,439 @@ +package ollama + +import ( + "context" + "database/sql" + "encoding/json" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + "time" + + _ "github.com/mattn/go-sqlite3" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestFetchModelDetails(t *testing.T) { + localServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/version": + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"version":"0.16.3"}`)) + case "/api/status": + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"cloud":{"disabled":false}}`)) + case "/api/tags": + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"models":[{"name":"llama3:8b","model":"llama3:8b","size":5000},{"name":"deepseek-r1:14b","model":"deepseek-r1:14b","size":8000},{"name":"gemma:2b","model":"gemma:2b","size":1500}]}`)) + case "/api/show": + var body map[string]string + _ = json.NewDecoder(r.Body).Decode(&body) + name := body["name"] + switch name { + case "llama3:8b": + _, _ = w.Write([]byte(`{"capabilities":["completion","tools"],"details":{"family":"llama","parameter_size":"8B","quantization_level":"Q4_K_M"},"model_info":{"llama.context_length":8192}}`)) + case "deepseek-r1:14b": + _, _ = w.Write([]byte(`{"capabilities":["completion","tools","thinking","vision"],"details":{"family":"deepseek","parameter_size":"14B","quantization_level":"Q5_K_M"},"model_info":{"deepseek.context_length":65536}}`)) + case "gemma:2b": + _, _ = w.Write([]byte(`{"capabilities":["completion"],"details":{"family":"gemma","parameter_size":"2B","quantization_level":"Q4_0"},"model_info":{"gemma.context_length":8192}}`)) + default: + http.NotFound(w, r) + } + case "/api/ps": + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"models":[]}`)) + default: + http.NotFound(w, r) + } + })) + defer localServer.Close() + + p := New() + acct := core.AccountConfig{ + ID: "test-ollama-details", + Provider: "ollama", + Auth: "local", + BaseURL: localServer.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + // 2 models with tools: llama3 + deepseek-r1 + if got := metricValue(snap, "models_with_tools"); got != 2 { + t.Errorf("models_with_tools = %v, want 2", got) + } + // 1 model with vision: deepseek-r1 + if got := metricValue(snap, "models_with_vision"); got != 1 { + t.Errorf("models_with_vision = %v, want 1", got) + } + // 1 model with thinking: deepseek-r1 + if got := metricValue(snap, "models_with_thinking"); got != 1 { + t.Errorf("models_with_thinking = %v, want 1", got) + } + // Max context should be 65536 from deepseek-r1 + if got := metricValue(snap, "max_context_length"); got != 65536 { + t.Errorf("max_context_length = %v, want 65536", got) + } + // Total parameters: 8B + 14B + 2B = 24B + if got := metricValue(snap, "total_parameters"); got != 24e9 { + t.Errorf("total_parameters = %v, want 24e9", got) + } + + // Check capability attributes + if v := snap.Attributes["model_llama3_8b_capability_tools"]; v != "true" { + t.Errorf("llama3:8b should have capability_tools = true, got %q", v) + } + if v := snap.Attributes["model_deepseek_r1_14b_capability_vision"]; v != "true" { + t.Errorf("deepseek-r1:14b should have capability_vision = true, got %q", v) + } + if v := snap.Attributes["model_deepseek_r1_14b_capability_thinking"]; v != "true" { + t.Errorf("deepseek-r1:14b should have capability_thinking = true, got %q", v) + } + if v := snap.Attributes["model_deepseek_r1_14b_quantization"]; v != "Q5_K_M" { + t.Errorf("deepseek-r1:14b quantization = %q, want Q5_K_M", v) + } +} + +func TestThinkingMetricsFromDB(t *testing.T) { + tmpDir := t.TempDir() + dbPath := filepath.Join(tmpDir, "db.sqlite") + + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatalf("open db: %v", err) + } + + _, err = db.Exec(` + CREATE TABLE settings (id INTEGER PRIMARY KEY, context_length INTEGER DEFAULT 4096, selected_model TEXT DEFAULT ''); + CREATE TABLE chats (id TEXT PRIMARY KEY, title TEXT, created_at TIMESTAMP); + CREATE TABLE messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + chat_id TEXT NOT NULL, + role TEXT NOT NULL, + content TEXT DEFAULT '', + model_name TEXT, + created_at TIMESTAMP, + thinking_time_start TIMESTAMP, + thinking_time_end TIMESTAMP + ); + CREATE TABLE tool_calls (id INTEGER PRIMARY KEY AUTOINCREMENT, message_id INTEGER, type TEXT, function_name TEXT, function_arguments TEXT, function_result TEXT); + CREATE TABLE attachments (id INTEGER PRIMARY KEY AUTOINCREMENT, message_id INTEGER); + CREATE TABLE users (name TEXT, email TEXT, plan TEXT, cached_at TIMESTAMP); + `) + if err != nil { + t.Fatalf("create schema: %v", err) + } + + now := time.Now() + today := now.Format("2006-01-02 15:04:05") + + _, _ = db.Exec(`INSERT INTO chats (id, title, created_at) VALUES ('c1', 'test', ?)`, today) + + // 3 thinking turns: 5s, 3s, 10s + ts := []struct { + model string + start string + end string + }{ + {"deepseek-r1:14b", now.Add(-60 * time.Second).Format("2006-01-02T15:04:05Z"), now.Add(-55 * time.Second).Format("2006-01-02T15:04:05Z")}, + {"deepseek-r1:14b", now.Add(-40 * time.Second).Format("2006-01-02T15:04:05Z"), now.Add(-37 * time.Second).Format("2006-01-02T15:04:05Z")}, + {"qwen3:32b", now.Add(-20 * time.Second).Format("2006-01-02T15:04:05Z"), now.Add(-10 * time.Second).Format("2006-01-02T15:04:05Z")}, + } + for _, turn := range ts { + _, _ = db.Exec(`INSERT INTO messages (chat_id, role, content, model_name, created_at, thinking_time_start, thinking_time_end) VALUES ('c1', 'assistant', 'resp', ?, ?, ?, ?)`, + turn.model, today, turn.start, turn.end) + } + // Non-thinking message (should be excluded) + _, _ = db.Exec(`INSERT INTO messages (chat_id, role, content, model_name, created_at) VALUES ('c1', 'user', 'hello', 'deepseek-r1:14b', ?)`, today) + + db.Close() + + // Minimal local server with no-op show endpoint + localServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/version": + _, _ = w.Write([]byte(`{"version":"0.16.3"}`)) + case "/api/status": + _, _ = w.Write([]byte(`{}`)) + case "/api/tags": + _, _ = w.Write([]byte(`{"models":[]}`)) + case "/api/ps": + _, _ = w.Write([]byte(`{"models":[]}`)) + default: + http.NotFound(w, r) + } + })) + defer localServer.Close() + + p := New() + acct := core.AccountConfig{ + ID: "test-thinking", + Provider: "ollama", + Auth: "local", + BaseURL: localServer.URL, + ExtraData: map[string]string{ + "db_path": dbPath, + }, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if got := metricValue(snap, "thinking_requests"); got != 3 { + t.Errorf("thinking_requests = %v, want 3", got) + } + // Total should be ~18s (5+3+10), allow some floating point slack + if got := metricValue(snap, "total_thinking_seconds"); got < 17 || got > 19 { + t.Errorf("total_thinking_seconds = %v, want ~18", got) + } + // Avg should be ~6s (18/3) + if got := metricValue(snap, "avg_thinking_seconds"); got < 5 || got > 7 { + t.Errorf("avg_thinking_seconds = %v, want ~6", got) + } +} + +func TestExpandedSettings(t *testing.T) { + tmpDir := t.TempDir() + dbPath := filepath.Join(tmpDir, "db.sqlite") + + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatalf("open db: %v", err) + } + + _, err = db.Exec(` + CREATE TABLE settings ( + id INTEGER PRIMARY KEY, + context_length INTEGER DEFAULT 4096, + selected_model TEXT DEFAULT '', + websearch_enabled INTEGER DEFAULT 1, + turbo_enabled INTEGER DEFAULT 0, + think_enabled INTEGER DEFAULT 1, + airplane_mode INTEGER DEFAULT 0, + device_id TEXT DEFAULT 'test-device-123' + ); + CREATE TABLE chats (id TEXT PRIMARY KEY, title TEXT, created_at TIMESTAMP); + CREATE TABLE messages (id INTEGER PRIMARY KEY AUTOINCREMENT, chat_id TEXT, role TEXT, content TEXT, model_name TEXT, created_at TIMESTAMP); + CREATE TABLE tool_calls (id INTEGER PRIMARY KEY AUTOINCREMENT, message_id INTEGER, type TEXT, function_name TEXT, function_arguments TEXT, function_result TEXT); + CREATE TABLE attachments (id INTEGER PRIMARY KEY AUTOINCREMENT, message_id INTEGER); + CREATE TABLE users (name TEXT, email TEXT, plan TEXT, cached_at TIMESTAMP); + `) + if err != nil { + t.Fatalf("create schema: %v", err) + } + + _, _ = db.Exec(`INSERT INTO settings (id, context_length, selected_model, websearch_enabled, turbo_enabled, think_enabled, airplane_mode, device_id) VALUES (1, 8192, 'llama3:8b', 1, 0, 1, 0, 'test-device-123')`) + db.Close() + + localServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/version": + _, _ = w.Write([]byte(`{"version":"0.16.3"}`)) + case "/api/status": + _, _ = w.Write([]byte(`{}`)) + case "/api/tags": + _, _ = w.Write([]byte(`{"models":[]}`)) + case "/api/ps": + _, _ = w.Write([]byte(`{"models":[]}`)) + default: + http.NotFound(w, r) + } + })) + defer localServer.Close() + + p := New() + acct := core.AccountConfig{ + ID: "test-settings", + Provider: "ollama", + Auth: "local", + BaseURL: localServer.URL, + ExtraData: map[string]string{ + "db_path": dbPath, + }, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if v := snap.Attributes["selected_model"]; v != "llama3:8b" { + t.Errorf("selected_model = %q, want llama3:8b", v) + } + if v := snap.Attributes["websearch_enabled"]; v != "1" { + t.Errorf("websearch_enabled = %q, want 1", v) + } + if v := snap.Attributes["think_enabled"]; v != "1" { + t.Errorf("think_enabled = %q, want 1", v) + } + if v := snap.Attributes["device_id"]; v != "test-device-123" { + t.Errorf("device_id = %q, want test-device-123", v) + } +} + +func TestParseParameterSize(t *testing.T) { + tests := []struct { + in string + want float64 + }{ + {"7B", 7e9}, + {"70B", 70e9}, + {"235B", 235e9}, + {"500M", 500e6}, + {"", 0}, + {"invalid", 0}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + got := parseParameterSize(tt.in) + if got != tt.want { + t.Errorf("parseParameterSize(%q) = %v, want %v", tt.in, got, tt.want) + } + }) + } +} + +func TestDetailWidget(t *testing.T) { + p := New() + dw := p.DetailWidget() + if len(dw.Sections) != 8 { + t.Fatalf("DetailWidget sections = %d, want 8", len(dw.Sections)) + } + expectedSections := []string{"Usage", "Models", "Languages", "MCP Usage", "Spending", "Trends", "Tokens", "Activity"} + for i, s := range dw.Sections { + if s.Name != expectedSections[i] { + t.Errorf("section[%d] = %q, want %q", i, s.Name, expectedSections[i]) + } + if s.Order != i+1 { + t.Errorf("section[%d] order = %d, want %d", i, s.Order, i+1) + } + } +} + +func TestNormalizeModelName(t *testing.T) { + tests := []struct { + in string + want string + }{ + {in: "Qwen3:32B:latest", want: "qwen3:32b"}, + {in: "models/gpt-oss:20b", want: "gpt-oss:20b"}, + {in: "https://ollama.com/library/deepseek-r1:70b-cloud", want: "deepseek-r1:70b-cloud"}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + got := normalizeModelName(tt.in) + if got != tt.want { + t.Fatalf("normalizeModelName(%q) = %q, want %q", tt.in, got, tt.want) + } + }) + } +} + +func metricValue(snap core.UsageSnapshot, key string) float64 { + m, ok := snap.Metrics[key] + if !ok || m.Remaining == nil { + return -1 + } + return *m.Remaining +} + +func createTestDB(path string) error { + db, err := sql.Open("sqlite3", path) + if err != nil { + return err + } + defer db.Close() + + schema := ` +CREATE TABLE settings ( + id INTEGER PRIMARY KEY, + context_length INTEGER NOT NULL DEFAULT 4096, + selected_model TEXT NOT NULL DEFAULT '', + websearch_enabled INTEGER DEFAULT 0, + turbo_enabled INTEGER DEFAULT 0, + think_enabled INTEGER DEFAULT 1, + airplane_mode INTEGER DEFAULT 0 +); +CREATE TABLE chats ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL DEFAULT '', + created_at TIMESTAMP NOT NULL +); +CREATE TABLE messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + chat_id TEXT NOT NULL, + role TEXT NOT NULL, + content TEXT NOT NULL DEFAULT '', + model_name TEXT, + created_at TIMESTAMP NOT NULL, + thinking_time_start TIMESTAMP, + thinking_time_end TIMESTAMP +); +CREATE TABLE tool_calls ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + message_id INTEGER NOT NULL, + type TEXT NOT NULL DEFAULT 'function', + function_name TEXT NOT NULL DEFAULT '', + function_arguments TEXT NOT NULL DEFAULT '{}', + function_result TEXT +); +CREATE TABLE attachments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + message_id INTEGER NOT NULL +); +CREATE TABLE users ( + name TEXT NOT NULL DEFAULT '', + email TEXT NOT NULL DEFAULT '', + plan TEXT NOT NULL DEFAULT '', + cached_at TIMESTAMP NOT NULL +); +` + if _, err := db.Exec(schema); err != nil { + return err + } + + now := time.Now().In(time.Local) + today := now.Format("2006-01-02 15:04:05") + yesterday := now.Add(-24 * time.Hour).Format("2006-01-02 15:04:05") + + if _, err := db.Exec(`INSERT INTO settings (id, context_length, selected_model, websearch_enabled, think_enabled) VALUES (1, 32768, 'gpt-oss:20b', 1, 1)`); err != nil { + return err + } + if _, err := db.Exec(`INSERT INTO chats (id, title, created_at) VALUES ('chat-1', 'today', ?), ('chat-2', 'yesterday', ?)`, today, yesterday); err != nil { + return err + } + thinkStart := now.Add(-30 * time.Second).Format("2006-01-02T15:04:05Z") + thinkEnd := now.Add(-25 * time.Second).Format("2006-01-02T15:04:05Z") + thinkStart2 := now.Add(-20 * time.Second).Format("2006-01-02T15:04:05Z") + thinkEnd2 := now.Add(-17 * time.Second).Format("2006-01-02T15:04:05Z") + + if _, err := db.Exec(`INSERT INTO messages (chat_id, role, content, model_name, created_at, thinking_time_start, thinking_time_end) VALUES + ('chat-1','user','hello','gpt-oss:20b',?,NULL,NULL), + ('chat-1','assistant','hi','gpt-oss:20b',?,?,?), + ('chat-1','user','again','qwen3-vl:235b-cloud',?,NULL,NULL), + ('chat-1','assistant','done','qwen3-vl:235b-cloud',?,?,?), + ('chat-2','user','old','gpt-oss:20b',?,NULL,NULL)`, + today, today, thinkStart, thinkEnd, today, today, thinkStart2, thinkEnd2, yesterday); err != nil { + return err + } + if _, err := db.Exec(`INSERT INTO tool_calls (message_id, type, function_name, function_arguments, function_result) VALUES + (2, 'function', 'read_file', '{}', '{}'), + (4, 'function', 'web_search', '{}', '{}')`); err != nil { + return err + } + if _, err := db.Exec(`INSERT INTO attachments (message_id) VALUES (1)`); err != nil { + return err + } + if _, err := db.Exec(`INSERT INTO users (name, email, plan, cached_at) VALUES ('cached-user', 'cached@example.com', 'free', ?)`, today); err != nil { + return err + } + + return nil +} diff --git a/internal/providers/ollama/ollama_test.go b/internal/providers/ollama/ollama_test.go index ef85acc..9a0177d 100644 --- a/internal/providers/ollama/ollama_test.go +++ b/internal/providers/ollama/ollama_test.go @@ -2,7 +2,6 @@ package ollama import ( "context" - "database/sql" "encoding/json" "fmt" "net/http" @@ -560,426 +559,3 @@ func TestFetchServerLogs_CountsAnthropicMessagesPath(t *testing.T) { t.Fatalf("chat_requests_today = %v, want 1", got) } } - -func TestFetchModelDetails(t *testing.T) { - localServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/api/version": - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(`{"version":"0.16.3"}`)) - case "/api/status": - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(`{"cloud":{"disabled":false}}`)) - case "/api/tags": - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(`{"models":[{"name":"llama3:8b","model":"llama3:8b","size":5000},{"name":"deepseek-r1:14b","model":"deepseek-r1:14b","size":8000},{"name":"gemma:2b","model":"gemma:2b","size":1500}]}`)) - case "/api/show": - var body map[string]string - _ = json.NewDecoder(r.Body).Decode(&body) - name := body["name"] - switch name { - case "llama3:8b": - _, _ = w.Write([]byte(`{"capabilities":["completion","tools"],"details":{"family":"llama","parameter_size":"8B","quantization_level":"Q4_K_M"},"model_info":{"llama.context_length":8192}}`)) - case "deepseek-r1:14b": - _, _ = w.Write([]byte(`{"capabilities":["completion","tools","thinking","vision"],"details":{"family":"deepseek","parameter_size":"14B","quantization_level":"Q5_K_M"},"model_info":{"deepseek.context_length":65536}}`)) - case "gemma:2b": - _, _ = w.Write([]byte(`{"capabilities":["completion"],"details":{"family":"gemma","parameter_size":"2B","quantization_level":"Q4_0"},"model_info":{"gemma.context_length":8192}}`)) - default: - http.NotFound(w, r) - } - case "/api/ps": - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(`{"models":[]}`)) - default: - http.NotFound(w, r) - } - })) - defer localServer.Close() - - p := New() - acct := core.AccountConfig{ - ID: "test-ollama-details", - Provider: "ollama", - Auth: "local", - BaseURL: localServer.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - // 2 models with tools: llama3 + deepseek-r1 - if got := metricValue(snap, "models_with_tools"); got != 2 { - t.Errorf("models_with_tools = %v, want 2", got) - } - // 1 model with vision: deepseek-r1 - if got := metricValue(snap, "models_with_vision"); got != 1 { - t.Errorf("models_with_vision = %v, want 1", got) - } - // 1 model with thinking: deepseek-r1 - if got := metricValue(snap, "models_with_thinking"); got != 1 { - t.Errorf("models_with_thinking = %v, want 1", got) - } - // Max context should be 65536 from deepseek-r1 - if got := metricValue(snap, "max_context_length"); got != 65536 { - t.Errorf("max_context_length = %v, want 65536", got) - } - // Total parameters: 8B + 14B + 2B = 24B - if got := metricValue(snap, "total_parameters"); got != 24e9 { - t.Errorf("total_parameters = %v, want 24e9", got) - } - - // Check capability attributes - if v := snap.Attributes["model_llama3_8b_capability_tools"]; v != "true" { - t.Errorf("llama3:8b should have capability_tools = true, got %q", v) - } - if v := snap.Attributes["model_deepseek_r1_14b_capability_vision"]; v != "true" { - t.Errorf("deepseek-r1:14b should have capability_vision = true, got %q", v) - } - if v := snap.Attributes["model_deepseek_r1_14b_capability_thinking"]; v != "true" { - t.Errorf("deepseek-r1:14b should have capability_thinking = true, got %q", v) - } - if v := snap.Attributes["model_deepseek_r1_14b_quantization"]; v != "Q5_K_M" { - t.Errorf("deepseek-r1:14b quantization = %q, want Q5_K_M", v) - } -} - -func TestThinkingMetricsFromDB(t *testing.T) { - tmpDir := t.TempDir() - dbPath := filepath.Join(tmpDir, "db.sqlite") - - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - t.Fatalf("open db: %v", err) - } - - _, err = db.Exec(` - CREATE TABLE settings (id INTEGER PRIMARY KEY, context_length INTEGER DEFAULT 4096, selected_model TEXT DEFAULT ''); - CREATE TABLE chats (id TEXT PRIMARY KEY, title TEXT, created_at TIMESTAMP); - CREATE TABLE messages ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - chat_id TEXT NOT NULL, - role TEXT NOT NULL, - content TEXT DEFAULT '', - model_name TEXT, - created_at TIMESTAMP, - thinking_time_start TIMESTAMP, - thinking_time_end TIMESTAMP - ); - CREATE TABLE tool_calls (id INTEGER PRIMARY KEY AUTOINCREMENT, message_id INTEGER, type TEXT, function_name TEXT, function_arguments TEXT, function_result TEXT); - CREATE TABLE attachments (id INTEGER PRIMARY KEY AUTOINCREMENT, message_id INTEGER); - CREATE TABLE users (name TEXT, email TEXT, plan TEXT, cached_at TIMESTAMP); - `) - if err != nil { - t.Fatalf("create schema: %v", err) - } - - now := time.Now() - today := now.Format("2006-01-02 15:04:05") - - _, _ = db.Exec(`INSERT INTO chats (id, title, created_at) VALUES ('c1', 'test', ?)`, today) - - // 3 thinking turns: 5s, 3s, 10s - ts := []struct { - model string - start string - end string - }{ - {"deepseek-r1:14b", now.Add(-60 * time.Second).Format("2006-01-02T15:04:05Z"), now.Add(-55 * time.Second).Format("2006-01-02T15:04:05Z")}, - {"deepseek-r1:14b", now.Add(-40 * time.Second).Format("2006-01-02T15:04:05Z"), now.Add(-37 * time.Second).Format("2006-01-02T15:04:05Z")}, - {"qwen3:32b", now.Add(-20 * time.Second).Format("2006-01-02T15:04:05Z"), now.Add(-10 * time.Second).Format("2006-01-02T15:04:05Z")}, - } - for _, turn := range ts { - _, _ = db.Exec(`INSERT INTO messages (chat_id, role, content, model_name, created_at, thinking_time_start, thinking_time_end) VALUES ('c1', 'assistant', 'resp', ?, ?, ?, ?)`, - turn.model, today, turn.start, turn.end) - } - // Non-thinking message (should be excluded) - _, _ = db.Exec(`INSERT INTO messages (chat_id, role, content, model_name, created_at) VALUES ('c1', 'user', 'hello', 'deepseek-r1:14b', ?)`, today) - - db.Close() - - // Minimal local server with no-op show endpoint - localServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/api/version": - _, _ = w.Write([]byte(`{"version":"0.16.3"}`)) - case "/api/status": - _, _ = w.Write([]byte(`{}`)) - case "/api/tags": - _, _ = w.Write([]byte(`{"models":[]}`)) - case "/api/ps": - _, _ = w.Write([]byte(`{"models":[]}`)) - default: - http.NotFound(w, r) - } - })) - defer localServer.Close() - - p := New() - acct := core.AccountConfig{ - ID: "test-thinking", - Provider: "ollama", - Auth: "local", - BaseURL: localServer.URL, - ExtraData: map[string]string{ - "db_path": dbPath, - }, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if got := metricValue(snap, "thinking_requests"); got != 3 { - t.Errorf("thinking_requests = %v, want 3", got) - } - // Total should be ~18s (5+3+10), allow some floating point slack - if got := metricValue(snap, "total_thinking_seconds"); got < 17 || got > 19 { - t.Errorf("total_thinking_seconds = %v, want ~18", got) - } - // Avg should be ~6s (18/3) - if got := metricValue(snap, "avg_thinking_seconds"); got < 5 || got > 7 { - t.Errorf("avg_thinking_seconds = %v, want ~6", got) - } -} - -func TestExpandedSettings(t *testing.T) { - tmpDir := t.TempDir() - dbPath := filepath.Join(tmpDir, "db.sqlite") - - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - t.Fatalf("open db: %v", err) - } - - _, err = db.Exec(` - CREATE TABLE settings ( - id INTEGER PRIMARY KEY, - context_length INTEGER DEFAULT 4096, - selected_model TEXT DEFAULT '', - websearch_enabled INTEGER DEFAULT 1, - turbo_enabled INTEGER DEFAULT 0, - think_enabled INTEGER DEFAULT 1, - airplane_mode INTEGER DEFAULT 0, - device_id TEXT DEFAULT 'test-device-123' - ); - CREATE TABLE chats (id TEXT PRIMARY KEY, title TEXT, created_at TIMESTAMP); - CREATE TABLE messages (id INTEGER PRIMARY KEY AUTOINCREMENT, chat_id TEXT, role TEXT, content TEXT, model_name TEXT, created_at TIMESTAMP); - CREATE TABLE tool_calls (id INTEGER PRIMARY KEY AUTOINCREMENT, message_id INTEGER, type TEXT, function_name TEXT, function_arguments TEXT, function_result TEXT); - CREATE TABLE attachments (id INTEGER PRIMARY KEY AUTOINCREMENT, message_id INTEGER); - CREATE TABLE users (name TEXT, email TEXT, plan TEXT, cached_at TIMESTAMP); - `) - if err != nil { - t.Fatalf("create schema: %v", err) - } - - _, _ = db.Exec(`INSERT INTO settings (id, context_length, selected_model, websearch_enabled, turbo_enabled, think_enabled, airplane_mode, device_id) VALUES (1, 8192, 'llama3:8b', 1, 0, 1, 0, 'test-device-123')`) - db.Close() - - localServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/api/version": - _, _ = w.Write([]byte(`{"version":"0.16.3"}`)) - case "/api/status": - _, _ = w.Write([]byte(`{}`)) - case "/api/tags": - _, _ = w.Write([]byte(`{"models":[]}`)) - case "/api/ps": - _, _ = w.Write([]byte(`{"models":[]}`)) - default: - http.NotFound(w, r) - } - })) - defer localServer.Close() - - p := New() - acct := core.AccountConfig{ - ID: "test-settings", - Provider: "ollama", - Auth: "local", - BaseURL: localServer.URL, - ExtraData: map[string]string{ - "db_path": dbPath, - }, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if v := snap.Attributes["selected_model"]; v != "llama3:8b" { - t.Errorf("selected_model = %q, want llama3:8b", v) - } - if v := snap.Attributes["websearch_enabled"]; v != "1" { - t.Errorf("websearch_enabled = %q, want 1", v) - } - if v := snap.Attributes["think_enabled"]; v != "1" { - t.Errorf("think_enabled = %q, want 1", v) - } - if v := snap.Attributes["device_id"]; v != "test-device-123" { - t.Errorf("device_id = %q, want test-device-123", v) - } -} - -func TestParseParameterSize(t *testing.T) { - tests := []struct { - in string - want float64 - }{ - {"7B", 7e9}, - {"70B", 70e9}, - {"235B", 235e9}, - {"500M", 500e6}, - {"", 0}, - {"invalid", 0}, - } - for _, tt := range tests { - t.Run(tt.in, func(t *testing.T) { - got := parseParameterSize(tt.in) - if got != tt.want { - t.Errorf("parseParameterSize(%q) = %v, want %v", tt.in, got, tt.want) - } - }) - } -} - -func TestDetailWidget(t *testing.T) { - p := New() - dw := p.DetailWidget() - if len(dw.Sections) != 8 { - t.Fatalf("DetailWidget sections = %d, want 8", len(dw.Sections)) - } - expectedSections := []string{"Usage", "Models", "Languages", "MCP Usage", "Spending", "Trends", "Tokens", "Activity"} - for i, s := range dw.Sections { - if s.Name != expectedSections[i] { - t.Errorf("section[%d] = %q, want %q", i, s.Name, expectedSections[i]) - } - if s.Order != i+1 { - t.Errorf("section[%d] order = %d, want %d", i, s.Order, i+1) - } - } -} - -func TestNormalizeModelName(t *testing.T) { - tests := []struct { - in string - want string - }{ - {in: "Qwen3:32B:latest", want: "qwen3:32b"}, - {in: "models/gpt-oss:20b", want: "gpt-oss:20b"}, - {in: "https://ollama.com/library/deepseek-r1:70b-cloud", want: "deepseek-r1:70b-cloud"}, - } - for _, tt := range tests { - t.Run(tt.in, func(t *testing.T) { - got := normalizeModelName(tt.in) - if got != tt.want { - t.Fatalf("normalizeModelName(%q) = %q, want %q", tt.in, got, tt.want) - } - }) - } -} - -func metricValue(snap core.UsageSnapshot, key string) float64 { - m, ok := snap.Metrics[key] - if !ok || m.Remaining == nil { - return -1 - } - return *m.Remaining -} - -func createTestDB(path string) error { - db, err := sql.Open("sqlite3", path) - if err != nil { - return err - } - defer db.Close() - - schema := ` -CREATE TABLE settings ( - id INTEGER PRIMARY KEY, - context_length INTEGER NOT NULL DEFAULT 4096, - selected_model TEXT NOT NULL DEFAULT '', - websearch_enabled INTEGER DEFAULT 0, - turbo_enabled INTEGER DEFAULT 0, - think_enabled INTEGER DEFAULT 1, - airplane_mode INTEGER DEFAULT 0 -); -CREATE TABLE chats ( - id TEXT PRIMARY KEY, - title TEXT NOT NULL DEFAULT '', - created_at TIMESTAMP NOT NULL -); -CREATE TABLE messages ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - chat_id TEXT NOT NULL, - role TEXT NOT NULL, - content TEXT NOT NULL DEFAULT '', - model_name TEXT, - created_at TIMESTAMP NOT NULL, - thinking_time_start TIMESTAMP, - thinking_time_end TIMESTAMP -); -CREATE TABLE tool_calls ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - message_id INTEGER NOT NULL, - type TEXT NOT NULL DEFAULT 'function', - function_name TEXT NOT NULL DEFAULT '', - function_arguments TEXT NOT NULL DEFAULT '{}', - function_result TEXT -); -CREATE TABLE attachments ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - message_id INTEGER NOT NULL -); -CREATE TABLE users ( - name TEXT NOT NULL DEFAULT '', - email TEXT NOT NULL DEFAULT '', - plan TEXT NOT NULL DEFAULT '', - cached_at TIMESTAMP NOT NULL -); -` - if _, err := db.Exec(schema); err != nil { - return err - } - - now := time.Now().In(time.Local) - today := now.Format("2006-01-02 15:04:05") - yesterday := now.Add(-24 * time.Hour).Format("2006-01-02 15:04:05") - - if _, err := db.Exec(`INSERT INTO settings (id, context_length, selected_model, websearch_enabled, think_enabled) VALUES (1, 32768, 'gpt-oss:20b', 1, 1)`); err != nil { - return err - } - if _, err := db.Exec(`INSERT INTO chats (id, title, created_at) VALUES ('chat-1', 'today', ?), ('chat-2', 'yesterday', ?)`, today, yesterday); err != nil { - return err - } - thinkStart := now.Add(-30 * time.Second).Format("2006-01-02T15:04:05Z") - thinkEnd := now.Add(-25 * time.Second).Format("2006-01-02T15:04:05Z") - thinkStart2 := now.Add(-20 * time.Second).Format("2006-01-02T15:04:05Z") - thinkEnd2 := now.Add(-17 * time.Second).Format("2006-01-02T15:04:05Z") - - if _, err := db.Exec(`INSERT INTO messages (chat_id, role, content, model_name, created_at, thinking_time_start, thinking_time_end) VALUES - ('chat-1','user','hello','gpt-oss:20b',?,NULL,NULL), - ('chat-1','assistant','hi','gpt-oss:20b',?,?,?), - ('chat-1','user','again','qwen3-vl:235b-cloud',?,NULL,NULL), - ('chat-1','assistant','done','qwen3-vl:235b-cloud',?,?,?), - ('chat-2','user','old','gpt-oss:20b',?,NULL,NULL)`, - today, today, thinkStart, thinkEnd, today, today, thinkStart2, thinkEnd2, yesterday); err != nil { - return err - } - if _, err := db.Exec(`INSERT INTO tool_calls (message_id, type, function_name, function_arguments, function_result) VALUES - (2, 'function', 'read_file', '{}', '{}'), - (4, 'function', 'web_search', '{}', '{}')`); err != nil { - return err - } - if _, err := db.Exec(`INSERT INTO attachments (message_id) VALUES (1)`); err != nil { - return err - } - if _, err := db.Exec(`INSERT INTO users (name, email, plan, cached_at) VALUES ('cached-user', 'cached@example.com', 'free', ?)`, today); err != nil { - return err - } - - return nil -} diff --git a/internal/providers/ollama/request_helpers.go b/internal/providers/ollama/request_helpers.go index 2bcba31..55373b4 100644 --- a/internal/providers/ollama/request_helpers.go +++ b/internal/providers/ollama/request_helpers.go @@ -123,11 +123,9 @@ func cloudEndpointURL(base, path string) string { } func resolveCloudSessionCookie(acct core.AccountConfig) string { - if acct.ExtraData != nil { - for _, key := range []string{"cloud_session_cookie", "session_cookie", "cookie"} { - if v := strings.TrimSpace(acct.ExtraData[key]); v != "" { - return v - } + for _, key := range []string{"cloud_session_cookie", "session_cookie", "cookie"} { + if v := strings.TrimSpace(acct.Hint(key, "")); v != "" { + return v } } return strings.TrimSpace(os.Getenv("OLLAMA_SESSION_COOKIE")) diff --git a/internal/providers/openrouter/openrouter_activity_test.go b/internal/providers/openrouter/openrouter_activity_test.go new file mode 100644 index 0000000..520170d --- /dev/null +++ b/internal/providers/openrouter/openrouter_activity_test.go @@ -0,0 +1,968 @@ +package openrouter + +import ( + "context" + "fmt" + "math" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestFetch_ActivityEndpointNewSchema(t *testing.T) { + now := time.Now().UTC() + today := now.Format("2006-01-02") + sixDaysAgo := now.AddDate(0, 0, -6).Format("2006-01-02") + fifteenDaysAgo := now.AddDate(0, 0, -15).Format("2006-01-02") + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"activity-key","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"date":"%s","model":"anthropic/claude-3.5-sonnet","endpoint_id":"ep-claude","provider_name":"Anthropic","usage":1.2,"byok_usage_inference":0.4,"prompt_tokens":1000,"completion_tokens":500,"reasoning_tokens":150,"requests":3}, + {"date":"%s","model":"openai/gpt-4o","endpoint_id":"ep-gpt4o","provider_name":"OpenAI","usage":0.8,"byok_usage_inference":0.2,"prompt_tokens":600,"completion_tokens":300,"reasoning_tokens":0,"requests":2}, + {"date":"%s","model":"google/gemini-2.5-pro","endpoint_id":"ep-gemini","provider_name":"Google","usage":2.5,"byok_usage_inference":0.5,"prompt_tokens":1200,"completion_tokens":400,"reasoning_tokens":50,"requests":4} + ]}`, today, sixDaysAgo, fifteenDaysAgo))) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_ACTIVITY_NEW", "test-key") + defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_NEW") + + p := New() + acct := core.AccountConfig{ + ID: "test-activity-new", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_ACTIVITY_NEW", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if got := snap.Raw["activity_endpoint"]; got != "/activity" { + t.Fatalf("activity_endpoint = %q, want /activity", got) + } + if got := snap.Raw["activity_rows"]; got != "3" { + t.Fatalf("activity_rows = %q, want 3", got) + } + if got := snap.Raw["activity_endpoints"]; got != "3" { + t.Fatalf("activity_endpoints = %q, want 3", got) + } + + byokToday := snap.Metrics["today_byok_cost"] + if byokToday.Used == nil || math.Abs(*byokToday.Used-0.4) > 0.0001 { + t.Fatalf("today_byok_cost = %v, want 0.4", byokToday.Used) + } + byok7d := snap.Metrics["7d_byok_cost"] + if byok7d.Used == nil || math.Abs(*byok7d.Used-0.6) > 0.0001 { + t.Fatalf("7d_byok_cost = %v, want 0.6", byok7d.Used) + } + byok30d := snap.Metrics["30d_byok_cost"] + if byok30d.Used == nil || math.Abs(*byok30d.Used-1.1) > 0.0001 { + t.Fatalf("30d_byok_cost = %v, want 1.1", byok30d.Used) + } + + if got := seriesValueByDate(snap.DailySeries["analytics_requests"], today); math.Abs(got-3) > 0.001 { + t.Fatalf("analytics_requests[%s] = %v, want 3", today, got) + } + if got := seriesValueByDate(snap.DailySeries["analytics_tokens"], today); math.Abs(got-1650) > 0.001 { + t.Fatalf("analytics_tokens[%s] = %v, want 1650", today, got) + } + if analytics30dCost := snap.Metrics["analytics_30d_cost"]; analytics30dCost.Used == nil || math.Abs(*analytics30dCost.Used-4.5) > 0.001 { + t.Fatalf("analytics_30d_cost = %v, want 4.5", analytics30dCost.Used) + } + if analytics30dReq := snap.Metrics["analytics_30d_requests"]; analytics30dReq.Used == nil || math.Abs(*analytics30dReq.Used-9) > 0.001 { + t.Fatalf("analytics_30d_requests = %v, want 9", analytics30dReq.Used) + } + if analytics7dCost := snap.Metrics["analytics_7d_cost"]; analytics7dCost.Used == nil || math.Abs(*analytics7dCost.Used-2.0) > 0.001 { + t.Fatalf("analytics_7d_cost = %v, want 2.0", analytics7dCost.Used) + } + if endpointCost := snap.Metrics["endpoint_ep-gemini_cost_usd"]; endpointCost.Used == nil || math.Abs(*endpointCost.Used-2.5) > 0.001 { + t.Fatalf("endpoint_ep-gemini_cost_usd = %v, want 2.5", endpointCost.Used) + } + if providerCost := snap.Metrics["provider_google_cost_usd"]; providerCost.Used == nil || math.Abs(*providerCost.Used-2.5) > 0.001 { + t.Fatalf("provider_google_cost_usd = %v, want 2.5", providerCost.Used) + } + + mCost := snap.Metrics["model_anthropic_claude-3.5-sonnet_cost_usd"] + if mCost.Used == nil || math.Abs(*mCost.Used-1.2) > 0.0001 { + t.Fatalf("model cost = %v, want 1.2", mCost.Used) + } + mIn := snap.Metrics["model_anthropic_claude-3.5-sonnet_input_tokens"] + if mIn.Used == nil || math.Abs(*mIn.Used-1000) > 0.001 { + t.Fatalf("model input tokens = %v, want 1000", mIn.Used) + } + mOut := snap.Metrics["model_anthropic_claude-3.5-sonnet_output_tokens"] + if mOut.Used == nil || math.Abs(*mOut.Used-500) > 0.001 { + t.Fatalf("model output tokens = %v, want 500", mOut.Used) + } + mReasoning := snap.Metrics["model_anthropic_claude-3.5-sonnet_reasoning_tokens"] + if mReasoning.Used == nil || math.Abs(*mReasoning.Used-150) > 0.001 { + t.Fatalf("model reasoning tokens = %v, want 150", mReasoning.Used) + } + if got := snap.Raw["model_anthropic_claude-3.5-sonnet_requests"]; got != "3" { + t.Fatalf("model requests raw = %q, want 3", got) + } +} + +func TestFetch_ActivityDateTimeFormat(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"activity-key","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[ + {"date":"2026-02-20 00:00:00","model":"moonshotai/kimi-k2.5","provider_name":"baseten/fp4","usage":0.10,"byok_usage_inference":0.01,"prompt_tokens":1000,"completion_tokens":100,"reasoning_tokens":20,"requests":2}, + {"date":"2026-02-20 12:34:56","model":"moonshotai/kimi-k2.5","provider_name":"baseten/fp4","usage":0.20,"byok_usage_inference":0.02,"prompt_tokens":2000,"completion_tokens":200,"reasoning_tokens":30,"requests":3} + ]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_ACTIVITY_DT", "test-key") + defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_DT") + + p := New() + acct := core.AccountConfig{ + ID: "test-activity-dt", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_ACTIVITY_DT", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if got := seriesValueByDate(snap.DailySeries["analytics_cost"], "2026-02-20"); math.Abs(got-0.30) > 0.0001 { + t.Fatalf("analytics_cost[2026-02-20] = %v, want 0.30", got) + } + if got := seriesValueByDate(snap.DailySeries["analytics_tokens"], "2026-02-20"); math.Abs(got-3350) > 0.0001 { + t.Fatalf("analytics_tokens[2026-02-20] = %v, want 3350", got) + } + if got := seriesValueByDate(snap.DailySeries["analytics_requests"], "2026-02-20"); math.Abs(got-5) > 0.0001 { + t.Fatalf("analytics_requests[2026-02-20] = %v, want 5", got) + } + if got := seriesValueByDate(snap.DailySeries["analytics_reasoning_tokens"], "2026-02-20"); math.Abs(got-50) > 0.0001 { + t.Fatalf("analytics_reasoning_tokens[2026-02-20] = %v, want 50", got) + } + + mCost := snap.Metrics["model_moonshotai_kimi-k2.5_cost_usd"] + if mCost.Used == nil || math.Abs(*mCost.Used-0.30) > 0.0001 { + t.Fatalf("model cost = %v, want 0.30", mCost.Used) + } + if got := snap.Raw["provider_baseten_fp4_requests"]; got != "5" { + t.Fatalf("provider requests raw = %q, want 5", got) + } + if providerCost := snap.Metrics["provider_baseten_fp4_cost_usd"]; providerCost.Used == nil || math.Abs(*providerCost.Used-0.30) > 0.0001 { + t.Fatalf("provider cost metric = %v, want 0.30", providerCost.Used) + } + if analyticsTokens := snap.Metrics["analytics_30d_tokens"]; analyticsTokens.Used == nil || math.Abs(*analyticsTokens.Used-3350) > 0.1 { + t.Fatalf("analytics_30d_tokens = %v, want 3350", analyticsTokens.Used) + } +} + +func TestResolveGenerationHostingProvider_PrefersUpstreamResponses(t *testing.T) { + ok200 := 200 + fail503 := 503 + + tests := []struct { + name string + gen generationEntry + want string + }{ + { + name: "prefers successful provider response", + gen: generationEntry{ + Model: "moonshotai/kimi-k2.5", + ProviderName: "Openusage", + ProviderResponses: []generationProviderResponse{ + {ProviderName: "Openusage", Status: &fail503}, + {ProviderName: "Novita", Status: &ok200}, + }, + }, + want: "Novita", + }, + { + name: "falls back to provider_name when responses missing", + gen: generationEntry{ + Model: "openai/gpt-4o", + ProviderName: "OpenAI", + }, + want: "OpenAI", + }, + { + name: "falls back to model vendor prefix", + gen: generationEntry{ + Model: "z-ai/glm-5", + }, + want: "z-ai", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if got := resolveGenerationHostingProvider(tc.gen); got != tc.want { + t.Fatalf("resolveGenerationHostingProvider() = %q, want %q", got, tc.want) + } + }) + } +} + +func TestFetch_GenerationUsesUpstreamProviderResponsesForProviderBreakdown(t *testing.T) { + now := time.Now().UTC().Format(time.RFC3339) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"gen-provider","usage":0.3,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":0.3}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + { + "id":"gen-1", + "model":"moonshotai/kimi-k2.5", + "total_cost":0.2, + "tokens_prompt":1200, + "tokens_completion":800, + "created_at":"%s", + "provider_name":"Openusage", + "provider_responses":[ + {"provider_name":"Openusage","status":503}, + {"provider_name":"Novita","status":200} + ] + }, + { + "id":"gen-2", + "model":"z-ai/glm-5", + "total_cost":0.1, + "tokens_prompt":100, + "tokens_completion":50, + "created_at":"%s" + } + ]}`, now, now))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_GEN_PROVIDER_RESPONSES", "test-key") + defer os.Unsetenv("TEST_OR_KEY_GEN_PROVIDER_RESPONSES") + + p := New() + acct := core.AccountConfig{ + ID: "test-gen-provider-responses", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_GEN_PROVIDER_RESPONSES", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if got := snap.Raw["provider_novita_requests"]; got != "1" { + t.Fatalf("provider_novita_requests = %q, want 1", got) + } + if got := snap.Raw["provider_z-ai_requests"]; got != "1" { + t.Fatalf("provider_z-ai_requests = %q, want 1", got) + } + if _, ok := snap.Metrics["provider_openusage_requests"]; ok { + t.Fatal("provider_openusage_requests should not be emitted when upstream provider_responses are present") + } + if got := snap.Raw["model_moonshotai_kimi-k2.5_providers"]; got != "Novita" { + t.Fatalf("model_moonshotai_kimi-k2.5_providers = %q, want Novita", got) + } +} + +func TestResolveGenerationHostingProvider_TreatsOpenusageAsNonHostProvider(t *testing.T) { + gen := generationEntry{ + Model: "moonshotai-kimi-k2.5", + ProviderName: "Openusage", + } + if got := resolveGenerationHostingProvider(gen); got != "moonshotai" { + t.Fatalf("resolveGenerationHostingProvider() = %q, want moonshotai", got) + } +} + +func TestResolveGenerationHostingProvider_UsesAlternativeEntryFields(t *testing.T) { + gen := generationEntry{ + Model: "moonshotai-kimi-k2.5", + ProviderName: "Openusage", + UpstreamProvider: "Novita", + UpstreamProviderName: "", + } + if got := resolveGenerationHostingProvider(gen); got != "Novita" { + t.Fatalf("resolveGenerationHostingProvider() = %q, want Novita", got) + } +} + +func TestFetch_GenerationProviderDetailEnrichmentForGenericProviderLabel(t *testing.T) { + now := time.Now().UTC().Format(time.RFC3339) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"gen-detail","usage":0.1,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":0.1}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + case "/generation": + if r.URL.Query().Get("id") == "gen-1" { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{ + "id":"gen-1", + "model":"moonshotai/kimi-k2.5", + "total_cost":0.1, + "tokens_prompt":1000, + "tokens_completion":500, + "provider_name":"Openusage", + "provider_responses":[ + {"provider_name":"Openusage","status":503}, + {"provider_name":"Novita","status":200} + ] + }}`)) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + { + "id":"gen-1", + "model":"moonshotai/kimi-k2.5", + "total_cost":0.1, + "tokens_prompt":1000, + "tokens_completion":500, + "created_at":"%s", + "provider_name":"Openusage" + } + ]}`, now))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_GEN_DETAIL_ENRICH", "test-key") + defer os.Unsetenv("TEST_OR_KEY_GEN_DETAIL_ENRICH") + + p := New() + acct := core.AccountConfig{ + ID: "test-gen-detail-enrich", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_GEN_DETAIL_ENRICH", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if got := snap.Raw["generation_provider_detail_lookups"]; got != "1" { + t.Fatalf("generation_provider_detail_lookups = %q, want 1", got) + } + if got := snap.Raw["generation_provider_detail_hits"]; got != "1" { + t.Fatalf("generation_provider_detail_hits = %q, want 1", got) + } + if got := snap.Raw["provider_novita_requests"]; got != "1" { + t.Fatalf("provider_novita_requests = %q, want 1", got) + } + if _, ok := snap.Metrics["provider_openusage_requests"]; ok { + t.Fatal("provider_openusage_requests should not be emitted after detail enrichment") + } +} + +func TestFetch_GenerationExtendedMetrics(t *testing.T) { + now := time.Now().UTC().Format(time.RFC3339) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"gen-ext","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + { + "id":"gen-1", + "model":"openai/gpt-4o", + "total_cost":0.09, + "is_byok":true, + "upstream_inference_cost":0.07, + "tokens_prompt":1000, + "tokens_completion":500, + "native_tokens_prompt":900, + "native_tokens_completion":450, + "native_tokens_reasoning":120, + "native_tokens_cached":80, + "native_tokens_completion_images":5, + "num_media_prompt":2, + "num_media_completion":1, + "num_input_audio_prompt":3, + "num_search_results":4, + "streamed":true, + "latency":2000, + "generation_time":1500, + "moderation_latency":120, + "cancelled":true, + "finish_reason":"stop", + "origin":"https://openrouter.ai", + "router":"openrouter/auto", + "api_type":"completions", + "created_at":"%s", + "provider_name":"OpenAI" + } + ]}`, now))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_GEN_EXT", "test-key") + defer os.Unsetenv("TEST_OR_KEY_GEN_EXT") + + p := New() + acct := core.AccountConfig{ + ID: "test-generation-ext", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_GEN_EXT", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + check := func(name string, want float64) { + t.Helper() + m, ok := snap.Metrics[name] + if !ok || m.Used == nil { + t.Fatalf("missing metric %s", name) + } + if math.Abs(*m.Used-want) > 0.0001 { + t.Fatalf("%s = %v, want %v", name, *m.Used, want) + } + } + + check("today_reasoning_tokens", 120) + check("today_cached_tokens", 80) + check("today_image_tokens", 5) + check("today_native_input_tokens", 900) + check("today_native_output_tokens", 450) + check("today_media_prompts", 2) + check("today_media_completions", 1) + check("today_audio_inputs", 3) + check("today_search_results", 4) + check("today_cancelled", 1) + check("today_streamed_requests", 1) + check("today_streamed_percent", 100) + check("today_avg_latency", 2) + check("today_avg_generation_time", 1.5) + check("today_avg_moderation_latency", 0.12) + check("today_completions_requests", 1) + check("today_byok_cost", 0.07) + check("7d_byok_cost", 0.07) + check("30d_byok_cost", 0.07) + check("tool_openai_gpt-4o", 1) + check("tool_calls_total", 1) + check("tool_completed", 0) + check("tool_cancelled", 1) + check("tool_success_rate", 0) + check("model_openai_gpt-4o_reasoning_tokens", 120) + check("model_openai_gpt-4o_cached_tokens", 80) + check("model_openai_gpt-4o_image_tokens", 5) + check("model_openai_gpt-4o_native_input_tokens", 900) + check("model_openai_gpt-4o_native_output_tokens", 450) + check("model_openai_gpt-4o_avg_latency", 2) + + if got := snap.Raw["today_finish_reasons"]; !strings.Contains(got, "stop=1") { + t.Fatalf("today_finish_reasons = %q, want stop=1", got) + } + if got := snap.Raw["today_origins"]; !strings.Contains(got, "https://openrouter.ai=1") { + t.Fatalf("today_origins = %q, want https://openrouter.ai=1", got) + } + if got := snap.Raw["today_routers"]; !strings.Contains(got, "openrouter/auto=1") { + t.Fatalf("today_routers = %q, want openrouter/auto=1", got) + } + if got := snap.Raw["tool_usage_source"]; got != "inferred_from_model_requests" { + t.Fatalf("tool_usage_source = %q, want inferred_from_model_requests", got) + } + if got := snap.Raw["tool_usage"]; !strings.Contains(got, "openai/gpt-4o: 1 calls") { + t.Fatalf("tool_usage = %q, want model-based usage summary", got) + } +} + +func TestFetch_ActivityForbidden_ReportsManagementKeyRequirement(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) + case "/activity": + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"error":{"message":"Only management keys can fetch activity for an account","code":403}}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_ACTIVITY_403", "test-key") + defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_403") + + p := New() + acct := core.AccountConfig{ + ID: "test-activity-403", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_ACTIVITY_403", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if snap.Status != core.StatusOK { + t.Fatalf("Status = %v, want OK", snap.Status) + } + if got := snap.Raw["analytics_error"]; !strings.Contains(got, "management keys") { + t.Fatalf("analytics_error = %q, want management-keys message", got) + } + if !strings.Contains(snap.Message, "$2.2500 used / $10.00 credits") { + t.Fatalf("message = %q, want credits-detail based message", snap.Message) + } +} + +func TestFetch_ActivityForbidden_FallsBackToAnalyticsUserActivity(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) + case "/activity": + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"error":{"message":"Only management keys can fetch activity for an account","code":403}}`)) + case "/analytics/user-activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[ + {"date":"2026-02-21","model":"qwen/qwen3-coder-flash","total_cost":0.918,"total_tokens":3058944,"requests":72} + ]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_ACTIVITY_FALLBACK", "test-key") + defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_FALLBACK") + + p := New() + acct := core.AccountConfig{ + ID: "test-activity-fallback", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_ACTIVITY_FALLBACK", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + if snap.Status != core.StatusOK { + t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) + } + if _, ok := snap.Raw["analytics_error"]; ok { + t.Fatalf("unexpected analytics_error: %q", snap.Raw["analytics_error"]) + } + if got := snap.Raw["activity_endpoint"]; got != "/analytics/user-activity" { + t.Fatalf("activity_endpoint = %q, want /analytics/user-activity", got) + } + if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_total_tokens"]; !ok || m.Used == nil || *m.Used != 3058944 { + t.Fatalf("missing/invalid qwen total tokens metric: %+v", m) + } +} + +func TestFetch_ActivityDateFallback_UsesYesterdayAndNoCacheHeaders(t *testing.T) { + var seenEmptyDate bool + var seenFallbackDate string + var seenCacheControl string + var seenPragma string + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) + case "/activity": + seenCacheControl = r.Header.Get("Cache-Control") + seenPragma = r.Header.Get("Pragma") + date := strings.TrimSpace(r.URL.Query().Get("date")) + if date == "" { + seenEmptyDate = true + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"error":{"message":"Date must be within the last 30 (completed) UTC days","code":400}}`)) + return + } + seenFallbackDate = date + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[ + {"date":"2026-02-21 00:00:00","model_permaslug":"qwen/qwen3-coder-flash","usage":0.91764,"requests":72,"prompt_tokens":3052166,"completion_tokens":6778,"reasoning_tokens":0,"cached_tokens":1508864} + ]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_ACTIVITY_DATE_FALLBACK", "test-key") + defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_DATE_FALLBACK") + + p := New() + acct := core.AccountConfig{ + ID: "test-activity-date-fallback", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_ACTIVITY_DATE_FALLBACK", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + if snap.Status != core.StatusOK { + t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) + } + if !seenEmptyDate { + t.Fatal("expected initial /activity call without date") + } + if seenFallbackDate == "" { + t.Fatal("expected fallback /activity call with date query") + } + if seenCacheControl != "no-cache, no-store, max-age=0" { + t.Fatalf("cache-control = %q, want no-cache, no-store, max-age=0", seenCacheControl) + } + if seenPragma != "no-cache" { + t.Fatalf("pragma = %q, want no-cache", seenPragma) + } + if got := snap.Raw["activity_endpoint"]; !strings.HasPrefix(got, "/activity?date=") { + t.Fatalf("activity_endpoint = %q, want /activity?date=...", got) + } + if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_input_tokens"]; !ok || m.Used == nil || *m.Used != 3052166 { + t.Fatalf("missing/invalid qwen input tokens metric: %+v", m) + } +} + +func TestFetch_TransactionAnalyticsNestedPayload(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) + case "/api/internal/v1/transaction-analytics": + if r.URL.RawQuery != "window=1mo" { + t.Fatalf("unexpected query: %q", r.URL.RawQuery) + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"cachedAt":"2026-02-22T00:00:00Z","data":[ + {"date":"2026-02-21 00:00:00","model_permaslug":"qwen/qwen3-coder-flash","usage":0.91764,"requests":72,"prompt_tokens":3052166,"completion_tokens":6778,"reasoning_tokens":0,"cached_tokens":1508864} + ]}}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_TX_ANALYTICS", "test-key") + defer os.Unsetenv("TEST_OR_KEY_TX_ANALYTICS") + + p := New() + acct := core.AccountConfig{ + ID: "test-tx-analytics", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_TX_ANALYTICS", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + if snap.Status != core.StatusOK { + t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) + } + if got := snap.Raw["activity_endpoint"]; got != "/api/internal/v1/transaction-analytics?window=1mo" { + t.Fatalf("activity_endpoint = %q, want transaction analytics endpoint", got) + } + if got := snap.Raw["activity_cached_at"]; got != "2026-02-22T00:00:00Z" { + t.Fatalf("activity_cached_at = %q, want 2026-02-22T00:00:00Z", got) + } + if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_input_tokens"]; !ok || m.Used == nil || *m.Used != 3052166 { + t.Fatalf("missing/invalid qwen input tokens metric: %+v", m) + } + if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_output_tokens"]; !ok || m.Used == nil || *m.Used != 6778 { + t.Fatalf("missing/invalid qwen output tokens metric: %+v", m) + } + if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_cached_tokens"]; !ok || m.Used == nil || *m.Used != 1508864 { + t.Fatalf("missing/invalid qwen cached tokens metric: %+v", m) + } + if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_cost_usd"]; !ok || m.Used == nil || math.Abs(*m.Used-0.91764) > 0.000001 { + t.Fatalf("missing/invalid qwen cost metric: %+v", m) + } +} + +func TestFetch_TransactionAnalyticsNumericCachedAtAndByokRequests(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) + case "/api/internal/v1/transaction-analytics": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"cachedAt":1771717984900,"data":[ + {"date":"2026-02-21 00:00:00","model_permaslug":"qwen/qwen3-coder-flash","usage":0.91764,"requests":72,"byok_requests":3,"prompt_tokens":3052166,"completion_tokens":6778,"reasoning_tokens":0,"cached_tokens":1508864} + ]}}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_TX_ANALYTICS_NUM", "test-key") + defer os.Unsetenv("TEST_OR_KEY_TX_ANALYTICS_NUM") + + p := New() + acct := core.AccountConfig{ + ID: "test-tx-analytics-num", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_TX_ANALYTICS_NUM", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + if got := snap.Raw["activity_cached_at"]; got != "2026-02-21T23:53:04Z" { + t.Fatalf("activity_cached_at = %q, want 2026-02-21T23:53:04Z", got) + } + if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_byok_requests"]; !ok || m.Used == nil || *m.Used != 3 { + t.Fatalf("missing/invalid byok requests metric: %+v", m) + } +} + +func TestFetch_TransactionAnalyticsURL_UsesRootWhenBaseURLHasAPIV1(t *testing.T) { + var seenInternalPath string + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/api/v1/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/api/v1/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) + case "/api/internal/v1/transaction-analytics": + seenInternalPath = r.URL.Path + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"cachedAt":1771717984900,"data":[ + {"date":"2026-02-21 00:00:00","model_permaslug":"qwen/qwen3-coder-flash","usage":0.91764,"requests":72,"prompt_tokens":3052166,"completion_tokens":6778,"reasoning_tokens":0,"cached_tokens":1508864} + ]}}`)) + case "/api/v1/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_TX_URL", "test-key") + defer os.Unsetenv("TEST_OR_KEY_TX_URL") + + p := New() + acct := core.AccountConfig{ + ID: "test-tx-url", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_TX_URL", + BaseURL: server.URL + "/api/v1", + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + if snap.Status != core.StatusOK { + t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) + } + if seenInternalPath != "/api/internal/v1/transaction-analytics" { + t.Fatalf("internal analytics path = %q, want /api/internal/v1/transaction-analytics", seenInternalPath) + } + if got := snap.Raw["activity_endpoint"]; got != "/api/internal/v1/transaction-analytics?window=1mo" { + t.Fatalf("activity_endpoint = %q, want transaction analytics endpoint", got) + } +} + +func TestFetch_GenerationListUnsupported_Graceful(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"std-key","usage":1.0,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":1.0}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + case "/generation": + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"success":false,"error":{"name":"ZodError","message":"expected string for id"}}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_GEN_400", "test-key") + defer os.Unsetenv("TEST_OR_KEY_GEN_400") + + p := New() + acct := core.AccountConfig{ + ID: "test-generation-400", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_GEN_400", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if got := snap.Raw["generation_note"]; got == "" { + t.Fatal("missing generation_note for unsupported generation listing") + } + if got := snap.Raw["generations_fetched"]; got != "0" { + t.Fatalf("generations_fetched = %q, want 0", got) + } + if _, ok := snap.Raw["generation_error"]; ok { + t.Fatalf("unexpected generation_error = %q", snap.Raw["generation_error"]) + } +} + +func seriesValueByDate(points []core.TimePoint, date string) float64 { + for _, p := range points { + if p.Date == date { + return p.Value + } + } + return 0 +} diff --git a/internal/providers/openrouter/openrouter_analytics_rollups_test.go b/internal/providers/openrouter/openrouter_analytics_rollups_test.go new file mode 100644 index 0000000..1b2f8c9 --- /dev/null +++ b/internal/providers/openrouter/openrouter_analytics_rollups_test.go @@ -0,0 +1,534 @@ +package openrouter + +import ( + "context" + "fmt" + "math" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestFetch_PeriodCosts(t *testing.T) { + now := time.Now().UTC() + today := now.Format(time.RFC3339) + threeDaysAgo := now.AddDate(0, 0, -3).Format(time.RFC3339) + tenDaysAgo := now.AddDate(0, 0, -10).Format(time.RFC3339) + twentyDaysAgo := now.AddDate(0, 0, -20).Format(time.RFC3339) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":10.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":10.0,"remaining_balance":90.0}}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + data := fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"anthropic/claude-3.5-sonnet","total_cost":0.50,"tokens_prompt":1000,"tokens_completion":500,"created_at":"%s","provider_name":"Anthropic"}, + {"id":"gen-2","model":"openai/gpt-4o","total_cost":0.30,"tokens_prompt":800,"tokens_completion":400,"created_at":"%s","provider_name":"OpenAI"}, + {"id":"gen-3","model":"anthropic/claude-3.5-sonnet","total_cost":1.00,"tokens_prompt":2000,"tokens_completion":1000,"created_at":"%s","provider_name":"Anthropic"}, + {"id":"gen-4","model":"openai/gpt-4o","total_cost":0.20,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"OpenAI"} + ]}`, today, threeDaysAgo, tenDaysAgo, twentyDaysAgo) + w.Write([]byte(data)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_PERIOD", "test-key") + defer os.Unsetenv("TEST_OR_KEY_PERIOD") + + p := New() + acct := core.AccountConfig{ + ID: "test-period", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_PERIOD", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if snap.Status != core.StatusOK { + t.Errorf("Status = %v, want OK", snap.Status) + } + + // 7d cost: today (0.50) + 3 days ago (0.30) = 0.80 + cost7d, ok := snap.Metrics["7d_api_cost"] + if !ok { + t.Fatal("missing 7d_api_cost metric") + } + if cost7d.Used == nil || math.Abs(*cost7d.Used-0.80) > 0.001 { + t.Errorf("7d_api_cost = %v, want 0.80", cost7d.Used) + } + + // 30d cost: all four = 0.50 + 0.30 + 1.00 + 0.20 = 2.00 + cost30d, ok := snap.Metrics["30d_api_cost"] + if !ok { + t.Fatal("missing 30d_api_cost metric") + } + if cost30d.Used == nil || math.Abs(*cost30d.Used-2.00) > 0.001 { + t.Errorf("30d_api_cost = %v, want 2.00", cost30d.Used) + } + + // DailySeries["cost"] should have entries for each unique date + costSeries, ok := snap.DailySeries["cost"] + if !ok { + t.Fatal("missing cost in DailySeries") + } + if len(costSeries) < 3 { + t.Errorf("cost DailySeries has %d entries, want at least 3 distinct days", len(costSeries)) + } + + // DailySeries["requests"] should exist + reqSeries, ok := snap.DailySeries["requests"] + if !ok { + t.Fatal("missing requests in DailySeries") + } + // Total requests across all days should sum to 4 + var totalReqs float64 + for _, pt := range reqSeries { + totalReqs += pt.Value + } + if math.Abs(totalReqs-4) > 0.001 { + t.Errorf("total requests in DailySeries = %v, want 4", totalReqs) + } + + // Per-model token series should exist for the top models + if _, ok := snap.DailySeries["tokens_anthropic_claude-3.5-sonnet"]; !ok { + t.Error("missing tokens_anthropic_claude-3.5-sonnet in DailySeries") + } + if _, ok := snap.DailySeries["tokens_openai_gpt-4o"]; !ok { + t.Error("missing tokens_openai_gpt-4o in DailySeries") + } +} + +func TestFetch_BurnRate(t *testing.T) { + now := time.Now().UTC() + // All generations within the last 60 minutes + tenMinAgo := now.Add(-10 * time.Minute).Format(time.RFC3339) + thirtyMinAgo := now.Add(-30 * time.Minute).Format(time.RFC3339) + fiftyMinAgo := now.Add(-50 * time.Minute).Format(time.RFC3339) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0,"remaining_balance":95.0}}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + data := fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"anthropic/claude-3.5-sonnet","total_cost":0.10,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"Anthropic"}, + {"id":"gen-2","model":"anthropic/claude-3.5-sonnet","total_cost":0.20,"tokens_prompt":1000,"tokens_completion":400,"created_at":"%s","provider_name":"Anthropic"}, + {"id":"gen-3","model":"openai/gpt-4o","total_cost":0.30,"tokens_prompt":1500,"tokens_completion":600,"created_at":"%s","provider_name":"OpenAI"} + ]}`, tenMinAgo, thirtyMinAgo, fiftyMinAgo) + w.Write([]byte(data)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_BURN", "test-key") + defer os.Unsetenv("TEST_OR_KEY_BURN") + + p := New() + acct := core.AccountConfig{ + ID: "test-burn", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_BURN", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if snap.Status != core.StatusOK { + t.Errorf("Status = %v, want OK", snap.Status) + } + + // Burn rate: total cost in last 60 min = 0.10 + 0.20 + 0.30 = 0.60 USD/hour + burnRate, ok := snap.Metrics["burn_rate"] + if !ok { + t.Fatal("missing burn_rate metric") + } + expectedBurn := 0.60 + if burnRate.Used == nil || math.Abs(*burnRate.Used-expectedBurn) > 0.001 { + t.Errorf("burn_rate = %v, want %v", burnRate.Used, expectedBurn) + } + if burnRate.Unit != "USD/hour" { + t.Errorf("burn_rate unit = %q, want USD/hour", burnRate.Unit) + } + + // Daily projected: 0.60 * 24 = 14.40 + dailyProj, ok := snap.Metrics["daily_projected"] + if !ok { + t.Fatal("missing daily_projected metric") + } + expectedProj := 14.40 + if dailyProj.Used == nil || math.Abs(*dailyProj.Used-expectedProj) > 0.01 { + t.Errorf("daily_projected = %v, want %v", dailyProj.Used, expectedProj) + } +} + +func TestFetch_AnalyticsGracefulDegradation(t *testing.T) { + now := todayISO() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0,"remaining_balance":95.0}}`)) + case "/analytics/user-activity": + // Return 404 to simulate analytics not available + w.WriteHeader(http.StatusNotFound) + case "/generation": + w.WriteHeader(http.StatusOK) + data := fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"openai/gpt-4o","total_cost":0.05,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"OpenAI"} + ]}`, now) + w.Write([]byte(data)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_GRACEFUL", "test-key") + defer os.Unsetenv("TEST_OR_KEY_GRACEFUL") + + p := New() + acct := core.AccountConfig{ + ID: "test-graceful", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_GRACEFUL", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + // Status should still be OK despite analytics failure + if snap.Status != core.StatusOK { + t.Errorf("Status = %v, want OK; message=%s", snap.Status, snap.Message) + } + + // Analytics error should be logged + analyticsErr, ok := snap.Raw["analytics_error"] + if !ok { + t.Error("expected analytics_error in Raw") + } + if !strings.Contains(analyticsErr, "404") { + t.Errorf("analytics_error = %q, want to contain '404'", analyticsErr) + } + + // Generation data should still be processed + if snap.Raw["generations_fetched"] != "1" { + t.Errorf("generations_fetched = %q, want 1", snap.Raw["generations_fetched"]) + } + + // Metrics from credits and generations should still work + if _, ok := snap.Metrics["credits"]; !ok { + t.Error("missing credits metric") + } + if _, ok := snap.Metrics["today_requests"]; !ok { + t.Error("missing today_requests metric") + } + + // DailySeries from generations should still be populated + if _, ok := snap.DailySeries["cost"]; !ok { + t.Error("missing cost in DailySeries despite analytics failure") + } +} + +func TestFetch_DateBasedCutoff(t *testing.T) { + now := time.Now().UTC() + recent := now.Add(-1 * time.Hour).Format(time.RFC3339) + fiveDaysAgo := now.AddDate(0, 0, -5).Format(time.RFC3339) + // 35 days ago: beyond the 30-day cutoff + thirtyFiveDaysAgo := now.AddDate(0, 0, -35).Format(time.RFC3339) + + generationRequests := 0 + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0,"remaining_balance":95.0}}`)) + case "/generation": + generationRequests++ + w.WriteHeader(http.StatusOK) + if generationRequests == 1 { + // First page: 2 recent + 1 old (beyond 30 day cutoff) + data := fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"openai/gpt-4o","total_cost":0.10,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"OpenAI"}, + {"id":"gen-2","model":"openai/gpt-4o","total_cost":0.20,"tokens_prompt":1000,"tokens_completion":400,"created_at":"%s","provider_name":"OpenAI"}, + {"id":"gen-3","model":"openai/gpt-4o","total_cost":0.50,"tokens_prompt":2000,"tokens_completion":800,"created_at":"%s","provider_name":"OpenAI"} + ]}`, recent, fiveDaysAgo, thirtyFiveDaysAgo) + w.Write([]byte(data)) + } else { + // Should not reach here due to date cutoff + w.Write([]byte(`{"data":[ + {"id":"gen-old","model":"openai/gpt-4o","total_cost":999.0,"tokens_prompt":99999,"tokens_completion":99999,"created_at":"2025-01-01T00:00:00Z","provider_name":"OpenAI"} + ]}`)) + } + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_CUTOFF", "test-key") + defer os.Unsetenv("TEST_OR_KEY_CUTOFF") + + p := New() + acct := core.AccountConfig{ + ID: "test-cutoff", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_CUTOFF", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if snap.Status != core.StatusOK { + t.Errorf("Status = %v, want OK", snap.Status) + } + + // Only 2 generations should be fetched (the old one is beyond cutoff) + if snap.Raw["generations_fetched"] != "2" { + t.Errorf("generations_fetched = %q, want 2 (old generation should be excluded)", snap.Raw["generations_fetched"]) + } + + // 30d cost should only include the 2 recent generations: 0.10 + 0.20 = 0.30 + cost30d, ok := snap.Metrics["30d_api_cost"] + if !ok { + t.Fatal("missing 30d_api_cost metric") + } + if cost30d.Used == nil || math.Abs(*cost30d.Used-0.30) > 0.001 { + t.Errorf("30d_api_cost = %v, want 0.30 (should not include generation beyond 30 days)", cost30d.Used) + } + + // Should only have made 1 generation request (stopped due to date cutoff) + if generationRequests != 1 { + t.Errorf("generation API requests = %d, want 1 (should stop on date cutoff)", generationRequests) + } +} + +func TestFetch_CurrentKeyRichData(t *testing.T) { + limitReset := time.Now().UTC().Add(2 * time.Hour).Format(time.RFC3339) + expiresAt := time.Now().UTC().Add(48 * time.Hour).Format(time.RFC3339) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":{ + "label":"mgmt-key", + "usage":12.5, + "limit":50.0, + "limit_remaining":37.5, + "usage_daily":1.25, + "usage_weekly":6.5, + "usage_monthly":12.5, + "byok_usage":3.0, + "byok_usage_inference":0.2, + "byok_usage_daily":0.2, + "byok_usage_weekly":0.9, + "byok_usage_monthly":3.0, + "is_free_tier":false, + "is_management_key":true, + "is_provisioning_key":false, + "include_byok_in_limit":true, + "limit_reset":"%s", + "expires_at":"%s", + "rate_limit":{"requests":240,"interval":"10s","note":"model-dependent"} + }}`, limitReset, expiresAt))) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":50.0,"total_usage":12.5}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_RICH", "test-key") + defer os.Unsetenv("TEST_OR_KEY_RICH") + + p := New() + acct := core.AccountConfig{ + ID: "test-rich-key", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_RICH", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if snap.Status != core.StatusOK { + t.Fatalf("Status = %v, want OK", snap.Status) + } + + checkMetric := func(name string, want float64) { + t.Helper() + m, ok := snap.Metrics[name] + if !ok || m.Used == nil { + t.Fatalf("missing metric %s", name) + } + if math.Abs(*m.Used-want) > 0.0001 { + t.Fatalf("%s = %v, want %v", name, *m.Used, want) + } + } + + checkMetric("usage_daily", 1.25) + checkMetric("usage_weekly", 6.5) + checkMetric("usage_monthly", 12.5) + checkMetric("byok_usage", 3.0) + checkMetric("byok_daily", 0.2) + checkMetric("byok_weekly", 0.9) + checkMetric("byok_monthly", 3.0) + checkMetric("limit_remaining", 37.5) + + if got := snap.Raw["key_type"]; got != "management" { + t.Fatalf("key_type = %q, want management", got) + } + if got := snap.Raw["rate_limit_note"]; got != "model-dependent" { + t.Fatalf("rate_limit_note = %q, want model-dependent", got) + } + if _, ok := snap.Resets["limit_reset"]; !ok { + t.Fatal("missing limit_reset in Resets") + } + if _, ok := snap.Resets["key_expires"]; !ok { + t.Fatal("missing key_expires in Resets") + } +} + +func TestFetch_ManagementKeyLoadsKeysMetadata(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{ + "label":"sk-or-v1-mgr...abc", + "usage":1.0, + "limit":50.0, + "is_free_tier":false, + "is_management_key":true, + "is_provisioning_key":true, + "rate_limit":{"requests":240,"interval":"10s","note":"deprecated"} + }}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":50.0,"total_usage":1.0}}`)) + case "/keys": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[ + {"hash":"1234567890abcdef","name":"Primary","label":"sk-or-v1-mgr...abc","disabled":false,"limit":50.0,"limit_remaining":49.0,"limit_reset":null,"include_byok_in_limit":false,"usage":1.0,"usage_daily":0.1,"usage_weekly":0.2,"usage_monthly":1.0,"byok_usage":0.0,"byok_usage_daily":0.0,"byok_usage_weekly":0.0,"byok_usage_monthly":0.0,"created_at":"2026-02-20T10:00:00Z","updated_at":"2026-02-20T10:30:00Z","expires_at":null}, + {"hash":"abcdef0123456789","name":"Secondary","label":"sk-or-v1-secondary","disabled":true,"limit":null,"limit_remaining":null,"limit_reset":null,"include_byok_in_limit":false,"usage":0.0,"usage_daily":0.0,"usage_weekly":0.0,"usage_monthly":0.0,"byok_usage":0.0,"byok_usage_daily":0.0,"byok_usage_weekly":0.0,"byok_usage_monthly":0.0,"created_at":"2026-02-19T10:00:00Z","updated_at":null,"expires_at":null} + ]}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[]}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_KEYS_META", "test-key") + defer os.Unsetenv("TEST_OR_KEY_KEYS_META") + + p := New() + acct := core.AccountConfig{ + ID: "test-keys-meta", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_KEYS_META", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if got := snap.Raw["keys_total"]; got != "2" { + t.Fatalf("keys_total = %q, want 2", got) + } + if got := snap.Raw["keys_active"]; got != "1" { + t.Fatalf("keys_active = %q, want 1", got) + } + if got := snap.Raw["keys_disabled"]; got != "1" { + t.Fatalf("keys_disabled = %q, want 1", got) + } + if got := snap.Raw["key_name"]; got != "Primary" { + t.Fatalf("key_name = %q, want Primary", got) + } + if got := snap.Raw["key_disabled"]; got != "false" { + t.Fatalf("key_disabled = %q, want false", got) + } + if got := snap.Raw["key_created_at"]; got == "" { + t.Fatal("expected key_created_at") + } + + if total := snap.Metrics["keys_total"]; total.Used == nil || *total.Used != 2 { + t.Fatalf("keys_total metric = %v, want 2", total.Used) + } + if active := snap.Metrics["keys_active"]; active.Used == nil || *active.Used != 1 { + t.Fatalf("keys_active metric = %v, want 1", active.Used) + } + if disabled := snap.Metrics["keys_disabled"]; disabled.Used == nil || *disabled.Used != 1 { + t.Fatalf("keys_disabled metric = %v, want 1", disabled.Used) + } +} diff --git a/internal/providers/openrouter/openrouter_analytics_test.go b/internal/providers/openrouter/openrouter_analytics_test.go new file mode 100644 index 0000000..20d2fca --- /dev/null +++ b/internal/providers/openrouter/openrouter_analytics_test.go @@ -0,0 +1,579 @@ +package openrouter + +import ( + "context" + "fmt" + "math" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func TestFetch_AnalyticsEndpoint(t *testing.T) { + now := todayISO() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0,"remaining_balance":95.0}}`)) + case "/analytics/user-activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[ + {"date":"2026-02-18","model":"anthropic/claude-3.5-sonnet","total_cost":1.50,"total_tokens":50000,"requests":20}, + {"date":"2026-02-19","model":"anthropic/claude-3.5-sonnet","total_cost":2.00,"total_tokens":70000,"requests":30}, + {"date":"2026-02-19","model":"openai/gpt-4o","total_cost":0.50,"total_tokens":10000,"requests":5}, + {"date":"2026-02-20","model":"anthropic/claude-3.5-sonnet","total_cost":0.75,"total_tokens":25000,"requests":10} + ]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + data := fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"anthropic/claude-3.5-sonnet","total_cost":0.01,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"Anthropic"} + ]}`, now) + w.Write([]byte(data)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_ANALYTICS", "test-key") + defer os.Unsetenv("TEST_OR_KEY_ANALYTICS") + + p := New() + acct := core.AccountConfig{ + ID: "test-analytics", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_ANALYTICS", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if snap.Status != core.StatusOK { + t.Errorf("Status = %v, want OK; message=%s", snap.Status, snap.Message) + } + + if snap.DailySeries == nil { + t.Fatal("DailySeries is nil") + } + + analyticsCost, ok := snap.DailySeries["analytics_cost"] + if !ok { + t.Fatal("missing analytics_cost in DailySeries") + } + if len(analyticsCost) != 3 { + t.Fatalf("analytics_cost has %d entries, want 3", len(analyticsCost)) + } + // Verify sorted by date + if analyticsCost[0].Date != "2026-02-18" { + t.Errorf("analytics_cost[0].Date = %q, want 2026-02-18", analyticsCost[0].Date) + } + // 2026-02-19 has two entries summed: 2.00 + 0.50 = 2.50 + if math.Abs(analyticsCost[1].Value-2.50) > 0.001 { + t.Errorf("analytics_cost[1].Value = %v, want 2.50", analyticsCost[1].Value) + } + + analyticsTokens, ok := snap.DailySeries["analytics_tokens"] + if !ok { + t.Fatal("missing analytics_tokens in DailySeries") + } + if len(analyticsTokens) != 3 { + t.Fatalf("analytics_tokens has %d entries, want 3", len(analyticsTokens)) + } + // 2026-02-19: 70000 + 10000 = 80000 + if math.Abs(analyticsTokens[1].Value-80000) > 0.1 { + t.Errorf("analytics_tokens[1].Value = %v, want 80000", analyticsTokens[1].Value) + } + + // Verify no analytics_error in Raw + if _, hasErr := snap.Raw["analytics_error"]; hasErr { + t.Errorf("unexpected analytics_error: %s", snap.Raw["analytics_error"]) + } +} + +func TestFetch_AnalyticsTotalTokensOnly_TracksModelAndNormalizesName(t *testing.T) { + now := todayISO() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0,"remaining_balance":99.0}}`)) + case "/analytics/user-activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[ + {"date":"2026-02-20","model":"Qwen/Qwen3-Coder-Flash","total_cost":0.0,"total_tokens":4000,"requests":1}, + {"date":"2026-02-21","model":"qwen/qwen3-coder-flash","total_cost":0.0,"total_tokens":8000,"requests":1} + ]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"openai/gpt-4o","total_cost":0.001,"tokens_prompt":10,"tokens_completion":5,"created_at":"%s","provider_name":"OpenAI"} + ]}`, now))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_ANALYTICS_TOTAL_ONLY", "test-key") + defer os.Unsetenv("TEST_OR_KEY_ANALYTICS_TOTAL_ONLY") + + p := New() + acct := core.AccountConfig{ + ID: "test-analytics-total-only", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_ANALYTICS_TOTAL_ONLY", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if snap.Status != core.StatusOK { + t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) + } + + tok, ok := snap.Metrics["model_qwen_qwen3-coder-flash_total_tokens"] + if !ok { + t.Fatal("missing normalized qwen total tokens metric") + } + if tok.Used == nil || *tok.Used != 12000 { + t.Fatalf("model_qwen_qwen3-coder-flash_total_tokens = %v, want 12000", tok.Used) + } + + reqs, ok := snap.Metrics["model_qwen_qwen3-coder-flash_requests"] + if !ok { + t.Fatal("missing normalized qwen requests metric") + } + if reqs.Used == nil || *reqs.Used != 2 { + t.Fatalf("model_qwen_qwen3-coder-flash_requests = %v, want 2", reqs.Used) + } + + if _, ok := snap.Metrics["model_Qwen_Qwen3-Coder-Flash_total_tokens"]; ok { + t.Fatal("unexpected unnormalized model metric key present") + } + + foundQwenRecord := false + for _, rec := range snap.ModelUsage { + if rec.RawModelID != "qwen/qwen3-coder-flash" { + continue + } + foundQwenRecord = true + if rec.TotalTokens == nil || *rec.TotalTokens != 12000 { + t.Fatalf("qwen model_usage total_tokens = %v, want 12000", rec.TotalTokens) + } + if rec.Requests == nil || *rec.Requests != 2 { + t.Fatalf("qwen model_usage requests = %v, want 2", rec.Requests) + } + } + if !foundQwenRecord { + t.Fatal("expected normalized qwen model_usage record") + } + + if m, ok := snap.Metrics["lang_code"]; !ok || m.Used == nil || *m.Used != 2 { + t.Fatalf("lang_code = %v, want 2", m.Used) + } + if m, ok := snap.Metrics["lang_general"]; !ok || m.Used == nil || *m.Used != 1 { + t.Fatalf("lang_general = %v, want 1", m.Used) + } +} + +func TestFetch_GenerationPerModel_FallsBackTo30dWhenAnalyticsUnavailable(t *testing.T) { + now := time.Now().UTC() + today := now.Format(time.RFC3339) + tenDaysAgo := now.AddDate(0, 0, -10).Format(time.RFC3339) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0,"remaining_balance":99.0}}`)) + case "/activity", "/analytics/user-activity": + w.WriteHeader(http.StatusNotFound) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"qwen/qwen3-coder-flash","total_cost":0.20,"tokens_prompt":1000,"tokens_completion":2000,"created_at":"%s","provider_name":"Novita"}, + {"id":"gen-2","model":"QWEN/QWEN3-CODER-FLASH","total_cost":0.30,"tokens_prompt":3000,"tokens_completion":4000,"created_at":"%s","provider_name":"Novita"} + ]}`, today, tenDaysAgo))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_GEN_30D", "test-key") + defer os.Unsetenv("TEST_OR_KEY_GEN_30D") + + p := New() + acct := core.AccountConfig{ + ID: "test-gen-30d", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_GEN_30D", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + inp, ok := snap.Metrics["model_qwen_qwen3-coder-flash_input_tokens"] + if !ok || inp.Used == nil { + t.Fatalf("missing model_qwen_qwen3-coder-flash_input_tokens metric: %+v", inp) + } + if *inp.Used != 4000 { + t.Fatalf("input tokens = %v, want 4000", *inp.Used) + } + if inp.Window != "30d" { + t.Fatalf("input window = %q, want 30d", inp.Window) + } + + out, ok := snap.Metrics["model_qwen_qwen3-coder-flash_output_tokens"] + if !ok || out.Used == nil { + t.Fatalf("missing model_qwen_qwen3-coder-flash_output_tokens metric: %+v", out) + } + if *out.Used != 6000 { + t.Fatalf("output tokens = %v, want 6000", *out.Used) + } + + reqs, ok := snap.Metrics["model_qwen_qwen3-coder-flash_requests"] + if !ok || reqs.Used == nil { + t.Fatalf("missing model_qwen_qwen3-coder-flash_requests metric: %+v", reqs) + } + if *reqs.Used != 2 { + t.Fatalf("requests = %v, want 2", *reqs.Used) + } +} + +func TestFetch_AnalyticsRows_GenerationModelMixIsAuthoritative(t *testing.T) { + now := time.Now().UTC().Format(time.RFC3339) + today := time.Now().UTC().Format("2006-01-02") + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0,"remaining_balance":99.0}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":[ + {"date":"` + today + `","model":"qwen/qwen3-coder-flash","total_cost":0.0,"total_tokens":9000,"requests":3} + ]}`)) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"qwen/qwen3-coder-flash","total_cost":0.2,"tokens_prompt":5000,"tokens_completion":5000,"created_at":"%s","provider_name":"Novita"} + ]}`, now))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_NO_DOUBLE", "test-key") + defer os.Unsetenv("TEST_OR_KEY_NO_DOUBLE") + + p := New() + acct := core.AccountConfig{ + ID: "test-no-double", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_NO_DOUBLE", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + tok, ok := snap.Metrics["model_qwen_qwen3-coder-flash_total_tokens"] + if !ok || tok.Used == nil { + t.Fatalf("missing model total tokens metric: %+v", tok) + } + if *tok.Used != 10000 { + t.Fatalf("total_tokens = %v, want 10000 (generation live)", *tok.Used) + } + + inp, ok := snap.Metrics["model_qwen_qwen3-coder-flash_input_tokens"] + if !ok || inp.Used == nil || *inp.Used != 5000 { + t.Fatalf("model input tokens = %+v, want 5000 from generation", inp) + } + if got := snap.Raw["model_mix_source"]; got != "generation_live" { + t.Fatalf("model_mix_source = %q, want generation_live", got) + } +} + +func TestFetch_AnalyticsCachedAt_GenerationLiveModelMix(t *testing.T) { + now := time.Now().UTC() + cachedAt := now.Add(-1 * time.Hour).Truncate(time.Second) + afterCache := now.Add(-20 * time.Minute).Truncate(time.Second) + beforeCache := now.Add(-2 * time.Hour).Truncate(time.Second) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":5.01,"limit":10.0,"usage_monthly":5.01,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":5.01,"remaining_balance":4.99}}`)) + case "/api/internal/v1/transaction-analytics": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":{"data":[ + {"date":"%s","model":"qwen/qwen3-coder-flash","total_cost":1.00,"total_tokens":1000,"requests":1} + ],"cachedAt":"%s"}}`, now.Format("2006-01-02"), cachedAt.Format(time.RFC3339)))) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"id":"gen-before","model":"qwen/qwen3-coder-flash","total_cost":0.50,"tokens_prompt":100,"tokens_completion":50,"created_at":"%s","provider_name":"Novita"}, + {"id":"gen-after","model":"qwen/qwen3-coder-flash","total_cost":0.25,"tokens_prompt":80,"tokens_completion":20,"created_at":"%s","provider_name":"Novita"} + ]}`, beforeCache.Format(time.RFC3339), afterCache.Format(time.RFC3339)))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_CACHE_DELTA", "test-key") + defer os.Unsetenv("TEST_OR_KEY_CACHE_DELTA") + + p := New() + acct := core.AccountConfig{ + ID: "test-cache-delta", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_CACHE_DELTA", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + cost, ok := snap.Metrics["model_qwen_qwen3-coder-flash_cost_usd"] + if !ok || cost.Used == nil { + t.Fatalf("missing model cost metric: %+v", cost) + } + if math.Abs(*cost.Used-0.75) > 0.0001 { + t.Fatalf("model cost = %v, want 0.75 (generation live)", *cost.Used) + } + + reqs, ok := snap.Metrics["model_qwen_qwen3-coder-flash_requests"] + if !ok || reqs.Used == nil { + t.Fatalf("missing model requests metric: %+v", reqs) + } + if math.Abs(*reqs.Used-2.0) > 0.0001 { + t.Fatalf("model requests = %v, want 2", *reqs.Used) + } + + if got := snap.Raw["model_mix_source"]; got != "generation_live" { + t.Fatalf("model_mix_source = %q, want generation_live", got) + } +} + +func TestFetch_AnalyticsMaxDate_GenerationLiveModelMix(t *testing.T) { + now := time.Now().UTC() + staleDay := now.AddDate(0, 0, -2).Format("2006-01-02") + newerTs := now.Add(-30 * time.Minute).Format(time.RFC3339) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":5.74,"limit":10.0,"usage_monthly":5.74,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":5.74,"remaining_balance":4.26}}`)) + case "/activity": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"date":"%s","model":"qwen/qwen3-coder-flash","total_cost":1.00,"total_tokens":1000,"requests":1} + ]}`, staleDay))) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"id":"gen-new","model":"qwen/qwen3-coder-flash","total_cost":0.40,"tokens_prompt":120,"tokens_completion":80,"created_at":"%s","provider_name":"Novita"} + ]}`, newerTs))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_MAXDATE_DELTA", "test-key") + defer os.Unsetenv("TEST_OR_KEY_MAXDATE_DELTA") + + p := New() + acct := core.AccountConfig{ + ID: "test-maxdate-delta", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_MAXDATE_DELTA", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + cost, ok := snap.Metrics["model_qwen_qwen3-coder-flash_cost_usd"] + if !ok || cost.Used == nil { + t.Fatalf("missing model cost metric: %+v", cost) + } + if math.Abs(*cost.Used-0.40) > 0.0001 { + t.Fatalf("model cost = %v, want 0.40 (generation live)", *cost.Used) + } + + if got := snap.Raw["model_mix_source"]; got != "generation_live" { + t.Fatalf("model_mix_source = %q, want generation_live", got) + } +} + +func TestFetch_StaleAnalytics_GenerationLiveAndStaleMarker(t *testing.T) { + now := time.Now().UTC() + staleCachedAt := now.Add(-2 * time.Hour).Truncate(time.Second) + generationTs := now.Add(-5 * time.Minute).Truncate(time.Second) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":5.74,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":5.74,"remaining_balance":4.26}}`)) + case "/api/internal/v1/transaction-analytics": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":{"data":[ + {"date":"%s","model":"old/model","total_cost":3.0,"total_tokens":3000000,"requests":10} + ],"cachedAt":"%s"}}`, now.AddDate(0, 0, -2).Format("2006-01-02"), staleCachedAt.Format(time.RFC3339)))) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"fresh/model","total_cost":0.40,"tokens_prompt":120,"tokens_completion":80,"created_at":"%s","provider_name":"Novita"} + ]}`, generationTs.Format(time.RFC3339)))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_STALE_MIX", "test-key") + defer os.Unsetenv("TEST_OR_KEY_STALE_MIX") + + p := New() + acct := core.AccountConfig{ + ID: "test-stale-mix", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_STALE_MIX", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + if got := snap.Raw["activity_rows_stale"]; got != "true" { + t.Fatalf("activity_rows_stale = %q, want true", got) + } + + if got := snap.Raw["model_mix_source"]; got != "generation_live" { + t.Fatalf("model_mix_source = %q, want generation_live", got) + } + + if tok, ok := snap.Metrics["model_old_model_total_tokens"]; !ok || tok.Used == nil || *tok.Used != 3000000 { + t.Fatalf("old model total tokens metric missing/invalid: %+v", tok) + } + if cost, ok := snap.Metrics["model_fresh_model_cost_usd"]; !ok || cost.Used == nil || *cost.Used != 0.4 { + t.Fatalf("fresh model delta cost metric missing/invalid: %+v", cost) + } +} + +func TestFetch_FreshAnalytics_GenerationLiveAndFreshMarker(t *testing.T) { + now := time.Now().UTC() + freshCachedAt := now.Add(-2 * time.Minute).Truncate(time.Second) + generationTs := now.Add(-1 * time.Minute).Truncate(time.Second) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/auth/key": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"label":"test","usage":5.74,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) + case "/credits": + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":5.74,"remaining_balance":4.26}}`)) + case "/api/internal/v1/transaction-analytics": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":{"data":[ + {"date":"%s","model":"qwen/qwen3-coder-flash","total_cost":1.0,"total_tokens":1000,"requests":1} + ],"cachedAt":"%s"}}`, now.Format("2006-01-02"), freshCachedAt.Format(time.RFC3339)))) + case "/generation": + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf(`{"data":[ + {"id":"gen-1","model":"qwen/qwen3-coder-flash","total_cost":0.10,"tokens_prompt":10,"tokens_completion":5,"created_at":"%s","provider_name":"Novita"} + ]}`, generationTs.Format(time.RFC3339)))) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + os.Setenv("TEST_OR_KEY_FRESH_MIX", "test-key") + defer os.Unsetenv("TEST_OR_KEY_FRESH_MIX") + + p := New() + acct := core.AccountConfig{ + ID: "test-fresh-mix", + Provider: "openrouter", + APIKeyEnv: "TEST_OR_KEY_FRESH_MIX", + BaseURL: server.URL, + } + + snap, err := p.Fetch(context.Background(), acct) + if err != nil { + t.Fatalf("Fetch() error: %v", err) + } + + source := snap.Raw["model_mix_source"] + if source != "generation_live" { + t.Fatalf("model_mix_source = %q, want generation_live", source) + } + if got := snap.Raw["activity_rows_stale"]; got != "false" { + t.Fatalf("activity_rows_stale = %q, want false", got) + } +} diff --git a/internal/providers/openrouter/openrouter_test.go b/internal/providers/openrouter/openrouter_test.go index f2503fd..2d7e82a 100644 --- a/internal/providers/openrouter/openrouter_test.go +++ b/internal/providers/openrouter/openrouter_test.go @@ -3,7 +3,6 @@ package openrouter import ( "context" "fmt" - "math" "net/http" "net/http/httptest" "os" @@ -616,2040 +615,3 @@ func TestFetch_FreeTier(t *testing.T) { t.Errorf("message = %q, want to contain $0.0000", snap.Message) } } - -func TestFetch_AnalyticsEndpoint(t *testing.T) { - now := todayISO() - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0,"remaining_balance":95.0}}`)) - case "/analytics/user-activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[ - {"date":"2026-02-18","model":"anthropic/claude-3.5-sonnet","total_cost":1.50,"total_tokens":50000,"requests":20}, - {"date":"2026-02-19","model":"anthropic/claude-3.5-sonnet","total_cost":2.00,"total_tokens":70000,"requests":30}, - {"date":"2026-02-19","model":"openai/gpt-4o","total_cost":0.50,"total_tokens":10000,"requests":5}, - {"date":"2026-02-20","model":"anthropic/claude-3.5-sonnet","total_cost":0.75,"total_tokens":25000,"requests":10} - ]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - data := fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"anthropic/claude-3.5-sonnet","total_cost":0.01,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"Anthropic"} - ]}`, now) - w.Write([]byte(data)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_ANALYTICS", "test-key") - defer os.Unsetenv("TEST_OR_KEY_ANALYTICS") - - p := New() - acct := core.AccountConfig{ - ID: "test-analytics", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_ANALYTICS", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if snap.Status != core.StatusOK { - t.Errorf("Status = %v, want OK; message=%s", snap.Status, snap.Message) - } - - if snap.DailySeries == nil { - t.Fatal("DailySeries is nil") - } - - analyticsCost, ok := snap.DailySeries["analytics_cost"] - if !ok { - t.Fatal("missing analytics_cost in DailySeries") - } - if len(analyticsCost) != 3 { - t.Fatalf("analytics_cost has %d entries, want 3", len(analyticsCost)) - } - // Verify sorted by date - if analyticsCost[0].Date != "2026-02-18" { - t.Errorf("analytics_cost[0].Date = %q, want 2026-02-18", analyticsCost[0].Date) - } - // 2026-02-19 has two entries summed: 2.00 + 0.50 = 2.50 - if math.Abs(analyticsCost[1].Value-2.50) > 0.001 { - t.Errorf("analytics_cost[1].Value = %v, want 2.50", analyticsCost[1].Value) - } - - analyticsTokens, ok := snap.DailySeries["analytics_tokens"] - if !ok { - t.Fatal("missing analytics_tokens in DailySeries") - } - if len(analyticsTokens) != 3 { - t.Fatalf("analytics_tokens has %d entries, want 3", len(analyticsTokens)) - } - // 2026-02-19: 70000 + 10000 = 80000 - if math.Abs(analyticsTokens[1].Value-80000) > 0.1 { - t.Errorf("analytics_tokens[1].Value = %v, want 80000", analyticsTokens[1].Value) - } - - // Verify no analytics_error in Raw - if _, hasErr := snap.Raw["analytics_error"]; hasErr { - t.Errorf("unexpected analytics_error: %s", snap.Raw["analytics_error"]) - } -} - -func TestFetch_AnalyticsTotalTokensOnly_TracksModelAndNormalizesName(t *testing.T) { - now := todayISO() - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0,"remaining_balance":99.0}}`)) - case "/analytics/user-activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[ - {"date":"2026-02-20","model":"Qwen/Qwen3-Coder-Flash","total_cost":0.0,"total_tokens":4000,"requests":1}, - {"date":"2026-02-21","model":"qwen/qwen3-coder-flash","total_cost":0.0,"total_tokens":8000,"requests":1} - ]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"openai/gpt-4o","total_cost":0.001,"tokens_prompt":10,"tokens_completion":5,"created_at":"%s","provider_name":"OpenAI"} - ]}`, now))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_ANALYTICS_TOTAL_ONLY", "test-key") - defer os.Unsetenv("TEST_OR_KEY_ANALYTICS_TOTAL_ONLY") - - p := New() - acct := core.AccountConfig{ - ID: "test-analytics-total-only", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_ANALYTICS_TOTAL_ONLY", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if snap.Status != core.StatusOK { - t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) - } - - tok, ok := snap.Metrics["model_qwen_qwen3-coder-flash_total_tokens"] - if !ok { - t.Fatal("missing normalized qwen total tokens metric") - } - if tok.Used == nil || *tok.Used != 12000 { - t.Fatalf("model_qwen_qwen3-coder-flash_total_tokens = %v, want 12000", tok.Used) - } - - reqs, ok := snap.Metrics["model_qwen_qwen3-coder-flash_requests"] - if !ok { - t.Fatal("missing normalized qwen requests metric") - } - if reqs.Used == nil || *reqs.Used != 2 { - t.Fatalf("model_qwen_qwen3-coder-flash_requests = %v, want 2", reqs.Used) - } - - if _, ok := snap.Metrics["model_Qwen_Qwen3-Coder-Flash_total_tokens"]; ok { - t.Fatal("unexpected unnormalized model metric key present") - } - - foundQwenRecord := false - for _, rec := range snap.ModelUsage { - if rec.RawModelID != "qwen/qwen3-coder-flash" { - continue - } - foundQwenRecord = true - if rec.TotalTokens == nil || *rec.TotalTokens != 12000 { - t.Fatalf("qwen model_usage total_tokens = %v, want 12000", rec.TotalTokens) - } - if rec.Requests == nil || *rec.Requests != 2 { - t.Fatalf("qwen model_usage requests = %v, want 2", rec.Requests) - } - } - if !foundQwenRecord { - t.Fatal("expected normalized qwen model_usage record") - } - - if m, ok := snap.Metrics["lang_code"]; !ok || m.Used == nil || *m.Used != 2 { - t.Fatalf("lang_code = %v, want 2", m.Used) - } - if m, ok := snap.Metrics["lang_general"]; !ok || m.Used == nil || *m.Used != 1 { - t.Fatalf("lang_general = %v, want 1", m.Used) - } -} - -func TestFetch_GenerationPerModel_FallsBackTo30dWhenAnalyticsUnavailable(t *testing.T) { - now := time.Now().UTC() - today := now.Format(time.RFC3339) - tenDaysAgo := now.AddDate(0, 0, -10).Format(time.RFC3339) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0,"remaining_balance":99.0}}`)) - case "/activity", "/analytics/user-activity": - w.WriteHeader(http.StatusNotFound) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"qwen/qwen3-coder-flash","total_cost":0.20,"tokens_prompt":1000,"tokens_completion":2000,"created_at":"%s","provider_name":"Novita"}, - {"id":"gen-2","model":"QWEN/QWEN3-CODER-FLASH","total_cost":0.30,"tokens_prompt":3000,"tokens_completion":4000,"created_at":"%s","provider_name":"Novita"} - ]}`, today, tenDaysAgo))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_GEN_30D", "test-key") - defer os.Unsetenv("TEST_OR_KEY_GEN_30D") - - p := New() - acct := core.AccountConfig{ - ID: "test-gen-30d", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_GEN_30D", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - inp, ok := snap.Metrics["model_qwen_qwen3-coder-flash_input_tokens"] - if !ok || inp.Used == nil { - t.Fatalf("missing model_qwen_qwen3-coder-flash_input_tokens metric: %+v", inp) - } - if *inp.Used != 4000 { - t.Fatalf("input tokens = %v, want 4000", *inp.Used) - } - if inp.Window != "30d" { - t.Fatalf("input window = %q, want 30d", inp.Window) - } - - out, ok := snap.Metrics["model_qwen_qwen3-coder-flash_output_tokens"] - if !ok || out.Used == nil { - t.Fatalf("missing model_qwen_qwen3-coder-flash_output_tokens metric: %+v", out) - } - if *out.Used != 6000 { - t.Fatalf("output tokens = %v, want 6000", *out.Used) - } - - reqs, ok := snap.Metrics["model_qwen_qwen3-coder-flash_requests"] - if !ok || reqs.Used == nil { - t.Fatalf("missing model_qwen_qwen3-coder-flash_requests metric: %+v", reqs) - } - if *reqs.Used != 2 { - t.Fatalf("requests = %v, want 2", *reqs.Used) - } -} - -func TestFetch_AnalyticsRows_GenerationModelMixIsAuthoritative(t *testing.T) { - now := time.Now().UTC().Format(time.RFC3339) - today := time.Now().UTC().Format("2006-01-02") - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0,"remaining_balance":99.0}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[ - {"date":"` + today + `","model":"qwen/qwen3-coder-flash","total_cost":0.0,"total_tokens":9000,"requests":3} - ]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"qwen/qwen3-coder-flash","total_cost":0.2,"tokens_prompt":5000,"tokens_completion":5000,"created_at":"%s","provider_name":"Novita"} - ]}`, now))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_NO_DOUBLE", "test-key") - defer os.Unsetenv("TEST_OR_KEY_NO_DOUBLE") - - p := New() - acct := core.AccountConfig{ - ID: "test-no-double", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_NO_DOUBLE", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - tok, ok := snap.Metrics["model_qwen_qwen3-coder-flash_total_tokens"] - if !ok || tok.Used == nil { - t.Fatalf("missing model total tokens metric: %+v", tok) - } - if *tok.Used != 10000 { - t.Fatalf("total_tokens = %v, want 10000 (generation live)", *tok.Used) - } - - inp, ok := snap.Metrics["model_qwen_qwen3-coder-flash_input_tokens"] - if !ok || inp.Used == nil || *inp.Used != 5000 { - t.Fatalf("model input tokens = %+v, want 5000 from generation", inp) - } - if got := snap.Raw["model_mix_source"]; got != "generation_live" { - t.Fatalf("model_mix_source = %q, want generation_live", got) - } -} - -func TestFetch_AnalyticsCachedAt_GenerationLiveModelMix(t *testing.T) { - now := time.Now().UTC() - cachedAt := now.Add(-1 * time.Hour).Truncate(time.Second) - afterCache := now.Add(-20 * time.Minute).Truncate(time.Second) - beforeCache := now.Add(-2 * time.Hour).Truncate(time.Second) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":5.01,"limit":10.0,"usage_monthly":5.01,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":5.01,"remaining_balance":4.99}}`)) - case "/api/internal/v1/transaction-analytics": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":{"data":[ - {"date":"%s","model":"qwen/qwen3-coder-flash","total_cost":1.00,"total_tokens":1000,"requests":1} - ],"cachedAt":"%s"}}`, now.Format("2006-01-02"), cachedAt.Format(time.RFC3339)))) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"id":"gen-before","model":"qwen/qwen3-coder-flash","total_cost":0.50,"tokens_prompt":100,"tokens_completion":50,"created_at":"%s","provider_name":"Novita"}, - {"id":"gen-after","model":"qwen/qwen3-coder-flash","total_cost":0.25,"tokens_prompt":80,"tokens_completion":20,"created_at":"%s","provider_name":"Novita"} - ]}`, beforeCache.Format(time.RFC3339), afterCache.Format(time.RFC3339)))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_CACHE_DELTA", "test-key") - defer os.Unsetenv("TEST_OR_KEY_CACHE_DELTA") - - p := New() - acct := core.AccountConfig{ - ID: "test-cache-delta", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_CACHE_DELTA", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - cost, ok := snap.Metrics["model_qwen_qwen3-coder-flash_cost_usd"] - if !ok || cost.Used == nil { - t.Fatalf("missing model cost metric: %+v", cost) - } - if math.Abs(*cost.Used-0.75) > 0.0001 { - t.Fatalf("model cost = %v, want 0.75 (generation live)", *cost.Used) - } - - reqs, ok := snap.Metrics["model_qwen_qwen3-coder-flash_requests"] - if !ok || reqs.Used == nil { - t.Fatalf("missing model requests metric: %+v", reqs) - } - if math.Abs(*reqs.Used-2.0) > 0.0001 { - t.Fatalf("model requests = %v, want 2", *reqs.Used) - } - - if got := snap.Raw["model_mix_source"]; got != "generation_live" { - t.Fatalf("model_mix_source = %q, want generation_live", got) - } -} - -func TestFetch_AnalyticsMaxDate_GenerationLiveModelMix(t *testing.T) { - now := time.Now().UTC() - staleDay := now.AddDate(0, 0, -2).Format("2006-01-02") - newerTs := now.Add(-30 * time.Minute).Format(time.RFC3339) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":5.74,"limit":10.0,"usage_monthly":5.74,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":5.74,"remaining_balance":4.26}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"date":"%s","model":"qwen/qwen3-coder-flash","total_cost":1.00,"total_tokens":1000,"requests":1} - ]}`, staleDay))) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"id":"gen-new","model":"qwen/qwen3-coder-flash","total_cost":0.40,"tokens_prompt":120,"tokens_completion":80,"created_at":"%s","provider_name":"Novita"} - ]}`, newerTs))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_MAXDATE_DELTA", "test-key") - defer os.Unsetenv("TEST_OR_KEY_MAXDATE_DELTA") - - p := New() - acct := core.AccountConfig{ - ID: "test-maxdate-delta", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_MAXDATE_DELTA", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - cost, ok := snap.Metrics["model_qwen_qwen3-coder-flash_cost_usd"] - if !ok || cost.Used == nil { - t.Fatalf("missing model cost metric: %+v", cost) - } - if math.Abs(*cost.Used-0.40) > 0.0001 { - t.Fatalf("model cost = %v, want 0.40 (generation live)", *cost.Used) - } - - if got := snap.Raw["model_mix_source"]; got != "generation_live" { - t.Fatalf("model_mix_source = %q, want generation_live", got) - } -} - -func TestFetch_StaleAnalytics_GenerationLiveAndStaleMarker(t *testing.T) { - now := time.Now().UTC() - staleCachedAt := now.Add(-2 * time.Hour).Truncate(time.Second) - generationTs := now.Add(-5 * time.Minute).Truncate(time.Second) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":5.74,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":5.74,"remaining_balance":4.26}}`)) - case "/api/internal/v1/transaction-analytics": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":{"data":[ - {"date":"%s","model":"old/model","total_cost":3.0,"total_tokens":3000000,"requests":10} - ],"cachedAt":"%s"}}`, now.AddDate(0, 0, -2).Format("2006-01-02"), staleCachedAt.Format(time.RFC3339)))) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"fresh/model","total_cost":0.40,"tokens_prompt":120,"tokens_completion":80,"created_at":"%s","provider_name":"Novita"} - ]}`, generationTs.Format(time.RFC3339)))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_STALE_MIX", "test-key") - defer os.Unsetenv("TEST_OR_KEY_STALE_MIX") - - p := New() - acct := core.AccountConfig{ - ID: "test-stale-mix", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_STALE_MIX", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if got := snap.Raw["activity_rows_stale"]; got != "true" { - t.Fatalf("activity_rows_stale = %q, want true", got) - } - - if got := snap.Raw["model_mix_source"]; got != "generation_live" { - t.Fatalf("model_mix_source = %q, want generation_live", got) - } - - if tok, ok := snap.Metrics["model_old_model_total_tokens"]; !ok || tok.Used == nil || *tok.Used != 3000000 { - t.Fatalf("old model total tokens metric missing/invalid: %+v", tok) - } - if cost, ok := snap.Metrics["model_fresh_model_cost_usd"]; !ok || cost.Used == nil || *cost.Used != 0.4 { - t.Fatalf("fresh model delta cost metric missing/invalid: %+v", cost) - } -} - -func TestFetch_FreshAnalytics_GenerationLiveAndFreshMarker(t *testing.T) { - now := time.Now().UTC() - freshCachedAt := now.Add(-2 * time.Minute).Truncate(time.Second) - generationTs := now.Add(-1 * time.Minute).Truncate(time.Second) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":5.74,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":5.74,"remaining_balance":4.26}}`)) - case "/api/internal/v1/transaction-analytics": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":{"data":[ - {"date":"%s","model":"qwen/qwen3-coder-flash","total_cost":1.0,"total_tokens":1000,"requests":1} - ],"cachedAt":"%s"}}`, now.Format("2006-01-02"), freshCachedAt.Format(time.RFC3339)))) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"qwen/qwen3-coder-flash","total_cost":0.10,"tokens_prompt":10,"tokens_completion":5,"created_at":"%s","provider_name":"Novita"} - ]}`, generationTs.Format(time.RFC3339)))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_FRESH_MIX", "test-key") - defer os.Unsetenv("TEST_OR_KEY_FRESH_MIX") - - p := New() - acct := core.AccountConfig{ - ID: "test-fresh-mix", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_FRESH_MIX", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - source := snap.Raw["model_mix_source"] - if source != "generation_live" { - t.Fatalf("model_mix_source = %q, want generation_live", source) - } - if got := snap.Raw["activity_rows_stale"]; got != "false" { - t.Fatalf("activity_rows_stale = %q, want false", got) - } -} - -func TestFetch_PeriodCosts(t *testing.T) { - now := time.Now().UTC() - today := now.Format(time.RFC3339) - threeDaysAgo := now.AddDate(0, 0, -3).Format(time.RFC3339) - tenDaysAgo := now.AddDate(0, 0, -10).Format(time.RFC3339) - twentyDaysAgo := now.AddDate(0, 0, -20).Format(time.RFC3339) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":10.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":10.0,"remaining_balance":90.0}}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - data := fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"anthropic/claude-3.5-sonnet","total_cost":0.50,"tokens_prompt":1000,"tokens_completion":500,"created_at":"%s","provider_name":"Anthropic"}, - {"id":"gen-2","model":"openai/gpt-4o","total_cost":0.30,"tokens_prompt":800,"tokens_completion":400,"created_at":"%s","provider_name":"OpenAI"}, - {"id":"gen-3","model":"anthropic/claude-3.5-sonnet","total_cost":1.00,"tokens_prompt":2000,"tokens_completion":1000,"created_at":"%s","provider_name":"Anthropic"}, - {"id":"gen-4","model":"openai/gpt-4o","total_cost":0.20,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"OpenAI"} - ]}`, today, threeDaysAgo, tenDaysAgo, twentyDaysAgo) - w.Write([]byte(data)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_PERIOD", "test-key") - defer os.Unsetenv("TEST_OR_KEY_PERIOD") - - p := New() - acct := core.AccountConfig{ - ID: "test-period", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_PERIOD", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if snap.Status != core.StatusOK { - t.Errorf("Status = %v, want OK", snap.Status) - } - - // 7d cost: today (0.50) + 3 days ago (0.30) = 0.80 - cost7d, ok := snap.Metrics["7d_api_cost"] - if !ok { - t.Fatal("missing 7d_api_cost metric") - } - if cost7d.Used == nil || math.Abs(*cost7d.Used-0.80) > 0.001 { - t.Errorf("7d_api_cost = %v, want 0.80", cost7d.Used) - } - - // 30d cost: all four = 0.50 + 0.30 + 1.00 + 0.20 = 2.00 - cost30d, ok := snap.Metrics["30d_api_cost"] - if !ok { - t.Fatal("missing 30d_api_cost metric") - } - if cost30d.Used == nil || math.Abs(*cost30d.Used-2.00) > 0.001 { - t.Errorf("30d_api_cost = %v, want 2.00", cost30d.Used) - } - - // DailySeries["cost"] should have entries for each unique date - costSeries, ok := snap.DailySeries["cost"] - if !ok { - t.Fatal("missing cost in DailySeries") - } - if len(costSeries) < 3 { - t.Errorf("cost DailySeries has %d entries, want at least 3 distinct days", len(costSeries)) - } - - // DailySeries["requests"] should exist - reqSeries, ok := snap.DailySeries["requests"] - if !ok { - t.Fatal("missing requests in DailySeries") - } - // Total requests across all days should sum to 4 - var totalReqs float64 - for _, pt := range reqSeries { - totalReqs += pt.Value - } - if math.Abs(totalReqs-4) > 0.001 { - t.Errorf("total requests in DailySeries = %v, want 4", totalReqs) - } - - // Per-model token series should exist for the top models - if _, ok := snap.DailySeries["tokens_anthropic_claude-3.5-sonnet"]; !ok { - t.Error("missing tokens_anthropic_claude-3.5-sonnet in DailySeries") - } - if _, ok := snap.DailySeries["tokens_openai_gpt-4o"]; !ok { - t.Error("missing tokens_openai_gpt-4o in DailySeries") - } -} - -func TestFetch_BurnRate(t *testing.T) { - now := time.Now().UTC() - // All generations within the last 60 minutes - tenMinAgo := now.Add(-10 * time.Minute).Format(time.RFC3339) - thirtyMinAgo := now.Add(-30 * time.Minute).Format(time.RFC3339) - fiftyMinAgo := now.Add(-50 * time.Minute).Format(time.RFC3339) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0,"remaining_balance":95.0}}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - data := fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"anthropic/claude-3.5-sonnet","total_cost":0.10,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"Anthropic"}, - {"id":"gen-2","model":"anthropic/claude-3.5-sonnet","total_cost":0.20,"tokens_prompt":1000,"tokens_completion":400,"created_at":"%s","provider_name":"Anthropic"}, - {"id":"gen-3","model":"openai/gpt-4o","total_cost":0.30,"tokens_prompt":1500,"tokens_completion":600,"created_at":"%s","provider_name":"OpenAI"} - ]}`, tenMinAgo, thirtyMinAgo, fiftyMinAgo) - w.Write([]byte(data)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_BURN", "test-key") - defer os.Unsetenv("TEST_OR_KEY_BURN") - - p := New() - acct := core.AccountConfig{ - ID: "test-burn", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_BURN", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if snap.Status != core.StatusOK { - t.Errorf("Status = %v, want OK", snap.Status) - } - - // Burn rate: total cost in last 60 min = 0.10 + 0.20 + 0.30 = 0.60 USD/hour - burnRate, ok := snap.Metrics["burn_rate"] - if !ok { - t.Fatal("missing burn_rate metric") - } - expectedBurn := 0.60 - if burnRate.Used == nil || math.Abs(*burnRate.Used-expectedBurn) > 0.001 { - t.Errorf("burn_rate = %v, want %v", burnRate.Used, expectedBurn) - } - if burnRate.Unit != "USD/hour" { - t.Errorf("burn_rate unit = %q, want USD/hour", burnRate.Unit) - } - - // Daily projected: 0.60 * 24 = 14.40 - dailyProj, ok := snap.Metrics["daily_projected"] - if !ok { - t.Fatal("missing daily_projected metric") - } - expectedProj := 14.40 - if dailyProj.Used == nil || math.Abs(*dailyProj.Used-expectedProj) > 0.01 { - t.Errorf("daily_projected = %v, want %v", dailyProj.Used, expectedProj) - } -} - -func TestFetch_AnalyticsGracefulDegradation(t *testing.T) { - now := todayISO() - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0,"remaining_balance":95.0}}`)) - case "/analytics/user-activity": - // Return 404 to simulate analytics not available - w.WriteHeader(http.StatusNotFound) - case "/generation": - w.WriteHeader(http.StatusOK) - data := fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"openai/gpt-4o","total_cost":0.05,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"OpenAI"} - ]}`, now) - w.Write([]byte(data)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_GRACEFUL", "test-key") - defer os.Unsetenv("TEST_OR_KEY_GRACEFUL") - - p := New() - acct := core.AccountConfig{ - ID: "test-graceful", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_GRACEFUL", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - // Status should still be OK despite analytics failure - if snap.Status != core.StatusOK { - t.Errorf("Status = %v, want OK; message=%s", snap.Status, snap.Message) - } - - // Analytics error should be logged - analyticsErr, ok := snap.Raw["analytics_error"] - if !ok { - t.Error("expected analytics_error in Raw") - } - if !strings.Contains(analyticsErr, "404") { - t.Errorf("analytics_error = %q, want to contain '404'", analyticsErr) - } - - // Generation data should still be processed - if snap.Raw["generations_fetched"] != "1" { - t.Errorf("generations_fetched = %q, want 1", snap.Raw["generations_fetched"]) - } - - // Metrics from credits and generations should still work - if _, ok := snap.Metrics["credits"]; !ok { - t.Error("missing credits metric") - } - if _, ok := snap.Metrics["today_requests"]; !ok { - t.Error("missing today_requests metric") - } - - // DailySeries from generations should still be populated - if _, ok := snap.DailySeries["cost"]; !ok { - t.Error("missing cost in DailySeries despite analytics failure") - } -} - -func TestFetch_DateBasedCutoff(t *testing.T) { - now := time.Now().UTC() - recent := now.Add(-1 * time.Hour).Format(time.RFC3339) - fiveDaysAgo := now.AddDate(0, 0, -5).Format(time.RFC3339) - // 35 days ago: beyond the 30-day cutoff - thirtyFiveDaysAgo := now.AddDate(0, 0, -35).Format(time.RFC3339) - - generationRequests := 0 - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/auth/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"test","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0,"remaining_balance":95.0}}`)) - case "/generation": - generationRequests++ - w.WriteHeader(http.StatusOK) - if generationRequests == 1 { - // First page: 2 recent + 1 old (beyond 30 day cutoff) - data := fmt.Sprintf(`{"data":[ - {"id":"gen-1","model":"openai/gpt-4o","total_cost":0.10,"tokens_prompt":500,"tokens_completion":200,"created_at":"%s","provider_name":"OpenAI"}, - {"id":"gen-2","model":"openai/gpt-4o","total_cost":0.20,"tokens_prompt":1000,"tokens_completion":400,"created_at":"%s","provider_name":"OpenAI"}, - {"id":"gen-3","model":"openai/gpt-4o","total_cost":0.50,"tokens_prompt":2000,"tokens_completion":800,"created_at":"%s","provider_name":"OpenAI"} - ]}`, recent, fiveDaysAgo, thirtyFiveDaysAgo) - w.Write([]byte(data)) - } else { - // Should not reach here due to date cutoff - w.Write([]byte(`{"data":[ - {"id":"gen-old","model":"openai/gpt-4o","total_cost":999.0,"tokens_prompt":99999,"tokens_completion":99999,"created_at":"2025-01-01T00:00:00Z","provider_name":"OpenAI"} - ]}`)) - } - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_CUTOFF", "test-key") - defer os.Unsetenv("TEST_OR_KEY_CUTOFF") - - p := New() - acct := core.AccountConfig{ - ID: "test-cutoff", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_CUTOFF", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if snap.Status != core.StatusOK { - t.Errorf("Status = %v, want OK", snap.Status) - } - - // Only 2 generations should be fetched (the old one is beyond cutoff) - if snap.Raw["generations_fetched"] != "2" { - t.Errorf("generations_fetched = %q, want 2 (old generation should be excluded)", snap.Raw["generations_fetched"]) - } - - // 30d cost should only include the 2 recent generations: 0.10 + 0.20 = 0.30 - cost30d, ok := snap.Metrics["30d_api_cost"] - if !ok { - t.Fatal("missing 30d_api_cost metric") - } - if cost30d.Used == nil || math.Abs(*cost30d.Used-0.30) > 0.001 { - t.Errorf("30d_api_cost = %v, want 0.30 (should not include generation beyond 30 days)", cost30d.Used) - } - - // Should only have made 1 generation request (stopped due to date cutoff) - if generationRequests != 1 { - t.Errorf("generation API requests = %d, want 1 (should stop on date cutoff)", generationRequests) - } -} - -func TestFetch_CurrentKeyRichData(t *testing.T) { - limitReset := time.Now().UTC().Add(2 * time.Hour).Format(time.RFC3339) - expiresAt := time.Now().UTC().Add(48 * time.Hour).Format(time.RFC3339) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":{ - "label":"mgmt-key", - "usage":12.5, - "limit":50.0, - "limit_remaining":37.5, - "usage_daily":1.25, - "usage_weekly":6.5, - "usage_monthly":12.5, - "byok_usage":3.0, - "byok_usage_inference":0.2, - "byok_usage_daily":0.2, - "byok_usage_weekly":0.9, - "byok_usage_monthly":3.0, - "is_free_tier":false, - "is_management_key":true, - "is_provisioning_key":false, - "include_byok_in_limit":true, - "limit_reset":"%s", - "expires_at":"%s", - "rate_limit":{"requests":240,"interval":"10s","note":"model-dependent"} - }}`, limitReset, expiresAt))) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":50.0,"total_usage":12.5}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_RICH", "test-key") - defer os.Unsetenv("TEST_OR_KEY_RICH") - - p := New() - acct := core.AccountConfig{ - ID: "test-rich-key", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_RICH", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if snap.Status != core.StatusOK { - t.Fatalf("Status = %v, want OK", snap.Status) - } - - checkMetric := func(name string, want float64) { - t.Helper() - m, ok := snap.Metrics[name] - if !ok || m.Used == nil { - t.Fatalf("missing metric %s", name) - } - if math.Abs(*m.Used-want) > 0.0001 { - t.Fatalf("%s = %v, want %v", name, *m.Used, want) - } - } - - checkMetric("usage_daily", 1.25) - checkMetric("usage_weekly", 6.5) - checkMetric("usage_monthly", 12.5) - checkMetric("byok_usage", 3.0) - checkMetric("byok_daily", 0.2) - checkMetric("byok_weekly", 0.9) - checkMetric("byok_monthly", 3.0) - checkMetric("limit_remaining", 37.5) - - if got := snap.Raw["key_type"]; got != "management" { - t.Fatalf("key_type = %q, want management", got) - } - if got := snap.Raw["rate_limit_note"]; got != "model-dependent" { - t.Fatalf("rate_limit_note = %q, want model-dependent", got) - } - if _, ok := snap.Resets["limit_reset"]; !ok { - t.Fatal("missing limit_reset in Resets") - } - if _, ok := snap.Resets["key_expires"]; !ok { - t.Fatal("missing key_expires in Resets") - } -} - -func TestFetch_ManagementKeyLoadsKeysMetadata(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{ - "label":"sk-or-v1-mgr...abc", - "usage":1.0, - "limit":50.0, - "is_free_tier":false, - "is_management_key":true, - "is_provisioning_key":true, - "rate_limit":{"requests":240,"interval":"10s","note":"deprecated"} - }}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":50.0,"total_usage":1.0}}`)) - case "/keys": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[ - {"hash":"1234567890abcdef","name":"Primary","label":"sk-or-v1-mgr...abc","disabled":false,"limit":50.0,"limit_remaining":49.0,"limit_reset":null,"include_byok_in_limit":false,"usage":1.0,"usage_daily":0.1,"usage_weekly":0.2,"usage_monthly":1.0,"byok_usage":0.0,"byok_usage_daily":0.0,"byok_usage_weekly":0.0,"byok_usage_monthly":0.0,"created_at":"2026-02-20T10:00:00Z","updated_at":"2026-02-20T10:30:00Z","expires_at":null}, - {"hash":"abcdef0123456789","name":"Secondary","label":"sk-or-v1-secondary","disabled":true,"limit":null,"limit_remaining":null,"limit_reset":null,"include_byok_in_limit":false,"usage":0.0,"usage_daily":0.0,"usage_weekly":0.0,"usage_monthly":0.0,"byok_usage":0.0,"byok_usage_daily":0.0,"byok_usage_weekly":0.0,"byok_usage_monthly":0.0,"created_at":"2026-02-19T10:00:00Z","updated_at":null,"expires_at":null} - ]}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_KEYS_META", "test-key") - defer os.Unsetenv("TEST_OR_KEY_KEYS_META") - - p := New() - acct := core.AccountConfig{ - ID: "test-keys-meta", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_KEYS_META", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if got := snap.Raw["keys_total"]; got != "2" { - t.Fatalf("keys_total = %q, want 2", got) - } - if got := snap.Raw["keys_active"]; got != "1" { - t.Fatalf("keys_active = %q, want 1", got) - } - if got := snap.Raw["keys_disabled"]; got != "1" { - t.Fatalf("keys_disabled = %q, want 1", got) - } - if got := snap.Raw["key_name"]; got != "Primary" { - t.Fatalf("key_name = %q, want Primary", got) - } - if got := snap.Raw["key_disabled"]; got != "false" { - t.Fatalf("key_disabled = %q, want false", got) - } - if got := snap.Raw["key_created_at"]; got == "" { - t.Fatal("expected key_created_at") - } - - if total := snap.Metrics["keys_total"]; total.Used == nil || *total.Used != 2 { - t.Fatalf("keys_total metric = %v, want 2", total.Used) - } - if active := snap.Metrics["keys_active"]; active.Used == nil || *active.Used != 1 { - t.Fatalf("keys_active metric = %v, want 1", active.Used) - } - if disabled := snap.Metrics["keys_disabled"]; disabled.Used == nil || *disabled.Used != 1 { - t.Fatalf("keys_disabled metric = %v, want 1", disabled.Used) - } -} - -func TestFetch_ActivityEndpointNewSchema(t *testing.T) { - now := time.Now().UTC() - today := now.Format("2006-01-02") - sixDaysAgo := now.AddDate(0, 0, -6).Format("2006-01-02") - fifteenDaysAgo := now.AddDate(0, 0, -15).Format("2006-01-02") - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"activity-key","usage":5.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":5.0}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - {"date":"%s","model":"anthropic/claude-3.5-sonnet","endpoint_id":"ep-claude","provider_name":"Anthropic","usage":1.2,"byok_usage_inference":0.4,"prompt_tokens":1000,"completion_tokens":500,"reasoning_tokens":150,"requests":3}, - {"date":"%s","model":"openai/gpt-4o","endpoint_id":"ep-gpt4o","provider_name":"OpenAI","usage":0.8,"byok_usage_inference":0.2,"prompt_tokens":600,"completion_tokens":300,"reasoning_tokens":0,"requests":2}, - {"date":"%s","model":"google/gemini-2.5-pro","endpoint_id":"ep-gemini","provider_name":"Google","usage":2.5,"byok_usage_inference":0.5,"prompt_tokens":1200,"completion_tokens":400,"reasoning_tokens":50,"requests":4} - ]}`, today, sixDaysAgo, fifteenDaysAgo))) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_ACTIVITY_NEW", "test-key") - defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_NEW") - - p := New() - acct := core.AccountConfig{ - ID: "test-activity-new", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_ACTIVITY_NEW", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if got := snap.Raw["activity_endpoint"]; got != "/activity" { - t.Fatalf("activity_endpoint = %q, want /activity", got) - } - if got := snap.Raw["activity_rows"]; got != "3" { - t.Fatalf("activity_rows = %q, want 3", got) - } - if got := snap.Raw["activity_endpoints"]; got != "3" { - t.Fatalf("activity_endpoints = %q, want 3", got) - } - - byokToday := snap.Metrics["today_byok_cost"] - if byokToday.Used == nil || math.Abs(*byokToday.Used-0.4) > 0.0001 { - t.Fatalf("today_byok_cost = %v, want 0.4", byokToday.Used) - } - byok7d := snap.Metrics["7d_byok_cost"] - if byok7d.Used == nil || math.Abs(*byok7d.Used-0.6) > 0.0001 { - t.Fatalf("7d_byok_cost = %v, want 0.6", byok7d.Used) - } - byok30d := snap.Metrics["30d_byok_cost"] - if byok30d.Used == nil || math.Abs(*byok30d.Used-1.1) > 0.0001 { - t.Fatalf("30d_byok_cost = %v, want 1.1", byok30d.Used) - } - - if got := seriesValueByDate(snap.DailySeries["analytics_requests"], today); math.Abs(got-3) > 0.001 { - t.Fatalf("analytics_requests[%s] = %v, want 3", today, got) - } - if got := seriesValueByDate(snap.DailySeries["analytics_tokens"], today); math.Abs(got-1650) > 0.001 { - t.Fatalf("analytics_tokens[%s] = %v, want 1650", today, got) - } - if analytics30dCost := snap.Metrics["analytics_30d_cost"]; analytics30dCost.Used == nil || math.Abs(*analytics30dCost.Used-4.5) > 0.001 { - t.Fatalf("analytics_30d_cost = %v, want 4.5", analytics30dCost.Used) - } - if analytics30dReq := snap.Metrics["analytics_30d_requests"]; analytics30dReq.Used == nil || math.Abs(*analytics30dReq.Used-9) > 0.001 { - t.Fatalf("analytics_30d_requests = %v, want 9", analytics30dReq.Used) - } - if analytics7dCost := snap.Metrics["analytics_7d_cost"]; analytics7dCost.Used == nil || math.Abs(*analytics7dCost.Used-2.0) > 0.001 { - t.Fatalf("analytics_7d_cost = %v, want 2.0", analytics7dCost.Used) - } - if endpointCost := snap.Metrics["endpoint_ep-gemini_cost_usd"]; endpointCost.Used == nil || math.Abs(*endpointCost.Used-2.5) > 0.001 { - t.Fatalf("endpoint_ep-gemini_cost_usd = %v, want 2.5", endpointCost.Used) - } - if providerCost := snap.Metrics["provider_google_cost_usd"]; providerCost.Used == nil || math.Abs(*providerCost.Used-2.5) > 0.001 { - t.Fatalf("provider_google_cost_usd = %v, want 2.5", providerCost.Used) - } - - mCost := snap.Metrics["model_anthropic_claude-3.5-sonnet_cost_usd"] - if mCost.Used == nil || math.Abs(*mCost.Used-1.2) > 0.0001 { - t.Fatalf("model cost = %v, want 1.2", mCost.Used) - } - mIn := snap.Metrics["model_anthropic_claude-3.5-sonnet_input_tokens"] - if mIn.Used == nil || math.Abs(*mIn.Used-1000) > 0.001 { - t.Fatalf("model input tokens = %v, want 1000", mIn.Used) - } - mOut := snap.Metrics["model_anthropic_claude-3.5-sonnet_output_tokens"] - if mOut.Used == nil || math.Abs(*mOut.Used-500) > 0.001 { - t.Fatalf("model output tokens = %v, want 500", mOut.Used) - } - mReasoning := snap.Metrics["model_anthropic_claude-3.5-sonnet_reasoning_tokens"] - if mReasoning.Used == nil || math.Abs(*mReasoning.Used-150) > 0.001 { - t.Fatalf("model reasoning tokens = %v, want 150", mReasoning.Used) - } - if got := snap.Raw["model_anthropic_claude-3.5-sonnet_requests"]; got != "3" { - t.Fatalf("model requests raw = %q, want 3", got) - } -} - -func TestFetch_ActivityDateTimeFormat(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"activity-key","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":200,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[ - {"date":"2026-02-20 00:00:00","model":"moonshotai/kimi-k2.5","provider_name":"baseten/fp4","usage":0.10,"byok_usage_inference":0.01,"prompt_tokens":1000,"completion_tokens":100,"reasoning_tokens":20,"requests":2}, - {"date":"2026-02-20 12:34:56","model":"moonshotai/kimi-k2.5","provider_name":"baseten/fp4","usage":0.20,"byok_usage_inference":0.02,"prompt_tokens":2000,"completion_tokens":200,"reasoning_tokens":30,"requests":3} - ]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_ACTIVITY_DT", "test-key") - defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_DT") - - p := New() - acct := core.AccountConfig{ - ID: "test-activity-dt", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_ACTIVITY_DT", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if got := seriesValueByDate(snap.DailySeries["analytics_cost"], "2026-02-20"); math.Abs(got-0.30) > 0.0001 { - t.Fatalf("analytics_cost[2026-02-20] = %v, want 0.30", got) - } - if got := seriesValueByDate(snap.DailySeries["analytics_tokens"], "2026-02-20"); math.Abs(got-3350) > 0.0001 { - t.Fatalf("analytics_tokens[2026-02-20] = %v, want 3350", got) - } - if got := seriesValueByDate(snap.DailySeries["analytics_requests"], "2026-02-20"); math.Abs(got-5) > 0.0001 { - t.Fatalf("analytics_requests[2026-02-20] = %v, want 5", got) - } - if got := seriesValueByDate(snap.DailySeries["analytics_reasoning_tokens"], "2026-02-20"); math.Abs(got-50) > 0.0001 { - t.Fatalf("analytics_reasoning_tokens[2026-02-20] = %v, want 50", got) - } - - mCost := snap.Metrics["model_moonshotai_kimi-k2.5_cost_usd"] - if mCost.Used == nil || math.Abs(*mCost.Used-0.30) > 0.0001 { - t.Fatalf("model cost = %v, want 0.30", mCost.Used) - } - if got := snap.Raw["provider_baseten_fp4_requests"]; got != "5" { - t.Fatalf("provider requests raw = %q, want 5", got) - } - if providerCost := snap.Metrics["provider_baseten_fp4_cost_usd"]; providerCost.Used == nil || math.Abs(*providerCost.Used-0.30) > 0.0001 { - t.Fatalf("provider cost metric = %v, want 0.30", providerCost.Used) - } - if analyticsTokens := snap.Metrics["analytics_30d_tokens"]; analyticsTokens.Used == nil || math.Abs(*analyticsTokens.Used-3350) > 0.1 { - t.Fatalf("analytics_30d_tokens = %v, want 3350", analyticsTokens.Used) - } -} - -func TestResolveGenerationHostingProvider_PrefersUpstreamResponses(t *testing.T) { - ok200 := 200 - fail503 := 503 - - tests := []struct { - name string - gen generationEntry - want string - }{ - { - name: "prefers successful provider response", - gen: generationEntry{ - Model: "moonshotai/kimi-k2.5", - ProviderName: "Openusage", - ProviderResponses: []generationProviderResponse{ - {ProviderName: "Openusage", Status: &fail503}, - {ProviderName: "Novita", Status: &ok200}, - }, - }, - want: "Novita", - }, - { - name: "falls back to provider_name when responses missing", - gen: generationEntry{ - Model: "openai/gpt-4o", - ProviderName: "OpenAI", - }, - want: "OpenAI", - }, - { - name: "falls back to model vendor prefix", - gen: generationEntry{ - Model: "z-ai/glm-5", - }, - want: "z-ai", - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - if got := resolveGenerationHostingProvider(tc.gen); got != tc.want { - t.Fatalf("resolveGenerationHostingProvider() = %q, want %q", got, tc.want) - } - }) - } -} - -func TestFetch_GenerationUsesUpstreamProviderResponsesForProviderBreakdown(t *testing.T) { - now := time.Now().UTC().Format(time.RFC3339) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"gen-provider","usage":0.3,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":0.3}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - { - "id":"gen-1", - "model":"moonshotai/kimi-k2.5", - "total_cost":0.2, - "tokens_prompt":1200, - "tokens_completion":800, - "created_at":"%s", - "provider_name":"Openusage", - "provider_responses":[ - {"provider_name":"Openusage","status":503}, - {"provider_name":"Novita","status":200} - ] - }, - { - "id":"gen-2", - "model":"z-ai/glm-5", - "total_cost":0.1, - "tokens_prompt":100, - "tokens_completion":50, - "created_at":"%s" - } - ]}`, now, now))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_GEN_PROVIDER_RESPONSES", "test-key") - defer os.Unsetenv("TEST_OR_KEY_GEN_PROVIDER_RESPONSES") - - p := New() - acct := core.AccountConfig{ - ID: "test-gen-provider-responses", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_GEN_PROVIDER_RESPONSES", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if got := snap.Raw["provider_novita_requests"]; got != "1" { - t.Fatalf("provider_novita_requests = %q, want 1", got) - } - if got := snap.Raw["provider_z-ai_requests"]; got != "1" { - t.Fatalf("provider_z-ai_requests = %q, want 1", got) - } - if _, ok := snap.Metrics["provider_openusage_requests"]; ok { - t.Fatal("provider_openusage_requests should not be emitted when upstream provider_responses are present") - } - if got := snap.Raw["model_moonshotai_kimi-k2.5_providers"]; got != "Novita" { - t.Fatalf("model_moonshotai_kimi-k2.5_providers = %q, want Novita", got) - } -} - -func TestResolveGenerationHostingProvider_TreatsOpenusageAsNonHostProvider(t *testing.T) { - gen := generationEntry{ - Model: "moonshotai-kimi-k2.5", - ProviderName: "Openusage", - } - if got := resolveGenerationHostingProvider(gen); got != "moonshotai" { - t.Fatalf("resolveGenerationHostingProvider() = %q, want moonshotai", got) - } -} - -func TestResolveGenerationHostingProvider_UsesAlternativeEntryFields(t *testing.T) { - gen := generationEntry{ - Model: "moonshotai-kimi-k2.5", - ProviderName: "Openusage", - UpstreamProvider: "Novita", - UpstreamProviderName: "", - } - if got := resolveGenerationHostingProvider(gen); got != "Novita" { - t.Fatalf("resolveGenerationHostingProvider() = %q, want Novita", got) - } -} - -func TestFetch_GenerationProviderDetailEnrichmentForGenericProviderLabel(t *testing.T) { - now := time.Now().UTC().Format(time.RFC3339) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"gen-detail","usage":0.1,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":0.1}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - case "/generation": - if r.URL.Query().Get("id") == "gen-1" { - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{ - "id":"gen-1", - "model":"moonshotai/kimi-k2.5", - "total_cost":0.1, - "tokens_prompt":1000, - "tokens_completion":500, - "provider_name":"Openusage", - "provider_responses":[ - {"provider_name":"Openusage","status":503}, - {"provider_name":"Novita","status":200} - ] - }}`)) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - { - "id":"gen-1", - "model":"moonshotai/kimi-k2.5", - "total_cost":0.1, - "tokens_prompt":1000, - "tokens_completion":500, - "created_at":"%s", - "provider_name":"Openusage" - } - ]}`, now))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_GEN_DETAIL_ENRICH", "test-key") - defer os.Unsetenv("TEST_OR_KEY_GEN_DETAIL_ENRICH") - - p := New() - acct := core.AccountConfig{ - ID: "test-gen-detail-enrich", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_GEN_DETAIL_ENRICH", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if got := snap.Raw["generation_provider_detail_lookups"]; got != "1" { - t.Fatalf("generation_provider_detail_lookups = %q, want 1", got) - } - if got := snap.Raw["generation_provider_detail_hits"]; got != "1" { - t.Fatalf("generation_provider_detail_hits = %q, want 1", got) - } - if got := snap.Raw["provider_novita_requests"]; got != "1" { - t.Fatalf("provider_novita_requests = %q, want 1", got) - } - if _, ok := snap.Metrics["provider_openusage_requests"]; ok { - t.Fatal("provider_openusage_requests should not be emitted after detail enrichment") - } -} - -func TestFetch_GenerationExtendedMetrics(t *testing.T) { - now := time.Now().UTC().Format(time.RFC3339) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"gen-ext","usage":1.0,"limit":100.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":100.0,"total_usage":1.0}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(fmt.Sprintf(`{"data":[ - { - "id":"gen-1", - "model":"openai/gpt-4o", - "total_cost":0.09, - "is_byok":true, - "upstream_inference_cost":0.07, - "tokens_prompt":1000, - "tokens_completion":500, - "native_tokens_prompt":900, - "native_tokens_completion":450, - "native_tokens_reasoning":120, - "native_tokens_cached":80, - "native_tokens_completion_images":5, - "num_media_prompt":2, - "num_media_completion":1, - "num_input_audio_prompt":3, - "num_search_results":4, - "streamed":true, - "latency":2000, - "generation_time":1500, - "moderation_latency":120, - "cancelled":true, - "finish_reason":"stop", - "origin":"https://openrouter.ai", - "router":"openrouter/auto", - "api_type":"completions", - "created_at":"%s", - "provider_name":"OpenAI" - } - ]}`, now))) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_GEN_EXT", "test-key") - defer os.Unsetenv("TEST_OR_KEY_GEN_EXT") - - p := New() - acct := core.AccountConfig{ - ID: "test-generation-ext", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_GEN_EXT", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - check := func(name string, want float64) { - t.Helper() - m, ok := snap.Metrics[name] - if !ok || m.Used == nil { - t.Fatalf("missing metric %s", name) - } - if math.Abs(*m.Used-want) > 0.0001 { - t.Fatalf("%s = %v, want %v", name, *m.Used, want) - } - } - - check("today_reasoning_tokens", 120) - check("today_cached_tokens", 80) - check("today_image_tokens", 5) - check("today_native_input_tokens", 900) - check("today_native_output_tokens", 450) - check("today_media_prompts", 2) - check("today_media_completions", 1) - check("today_audio_inputs", 3) - check("today_search_results", 4) - check("today_cancelled", 1) - check("today_streamed_requests", 1) - check("today_streamed_percent", 100) - check("today_avg_latency", 2) - check("today_avg_generation_time", 1.5) - check("today_avg_moderation_latency", 0.12) - check("today_completions_requests", 1) - check("today_byok_cost", 0.07) - check("7d_byok_cost", 0.07) - check("30d_byok_cost", 0.07) - check("tool_openai_gpt-4o", 1) - check("tool_calls_total", 1) - check("tool_completed", 0) - check("tool_cancelled", 1) - check("tool_success_rate", 0) - check("model_openai_gpt-4o_reasoning_tokens", 120) - check("model_openai_gpt-4o_cached_tokens", 80) - check("model_openai_gpt-4o_image_tokens", 5) - check("model_openai_gpt-4o_native_input_tokens", 900) - check("model_openai_gpt-4o_native_output_tokens", 450) - check("model_openai_gpt-4o_avg_latency", 2) - - if got := snap.Raw["today_finish_reasons"]; !strings.Contains(got, "stop=1") { - t.Fatalf("today_finish_reasons = %q, want stop=1", got) - } - if got := snap.Raw["today_origins"]; !strings.Contains(got, "https://openrouter.ai=1") { - t.Fatalf("today_origins = %q, want https://openrouter.ai=1", got) - } - if got := snap.Raw["today_routers"]; !strings.Contains(got, "openrouter/auto=1") { - t.Fatalf("today_routers = %q, want openrouter/auto=1", got) - } - if got := snap.Raw["tool_usage_source"]; got != "inferred_from_model_requests" { - t.Fatalf("tool_usage_source = %q, want inferred_from_model_requests", got) - } - if got := snap.Raw["tool_usage"]; !strings.Contains(got, "openai/gpt-4o: 1 calls") { - t.Fatalf("tool_usage = %q, want model-based usage summary", got) - } -} - -func TestFetch_ActivityForbidden_ReportsManagementKeyRequirement(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) - case "/activity": - w.WriteHeader(http.StatusForbidden) - w.Write([]byte(`{"error":{"message":"Only management keys can fetch activity for an account","code":403}}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_ACTIVITY_403", "test-key") - defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_403") - - p := New() - acct := core.AccountConfig{ - ID: "test-activity-403", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_ACTIVITY_403", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if snap.Status != core.StatusOK { - t.Fatalf("Status = %v, want OK", snap.Status) - } - if got := snap.Raw["analytics_error"]; !strings.Contains(got, "management keys") { - t.Fatalf("analytics_error = %q, want management-keys message", got) - } - if !strings.Contains(snap.Message, "$2.2500 used / $10.00 credits") { - t.Fatalf("message = %q, want credits-detail based message", snap.Message) - } -} - -func TestFetch_ActivityForbidden_FallsBackToAnalyticsUserActivity(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) - case "/activity": - w.WriteHeader(http.StatusForbidden) - w.Write([]byte(`{"error":{"message":"Only management keys can fetch activity for an account","code":403}}`)) - case "/analytics/user-activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[ - {"date":"2026-02-21","model":"qwen/qwen3-coder-flash","total_cost":0.918,"total_tokens":3058944,"requests":72} - ]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_ACTIVITY_FALLBACK", "test-key") - defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_FALLBACK") - - p := New() - acct := core.AccountConfig{ - ID: "test-activity-fallback", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_ACTIVITY_FALLBACK", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - if snap.Status != core.StatusOK { - t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) - } - if _, ok := snap.Raw["analytics_error"]; ok { - t.Fatalf("unexpected analytics_error: %q", snap.Raw["analytics_error"]) - } - if got := snap.Raw["activity_endpoint"]; got != "/analytics/user-activity" { - t.Fatalf("activity_endpoint = %q, want /analytics/user-activity", got) - } - if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_total_tokens"]; !ok || m.Used == nil || *m.Used != 3058944 { - t.Fatalf("missing/invalid qwen total tokens metric: %+v", m) - } -} - -func TestFetch_ActivityDateFallback_UsesYesterdayAndNoCacheHeaders(t *testing.T) { - var seenEmptyDate bool - var seenFallbackDate string - var seenCacheControl string - var seenPragma string - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) - case "/activity": - seenCacheControl = r.Header.Get("Cache-Control") - seenPragma = r.Header.Get("Pragma") - date := strings.TrimSpace(r.URL.Query().Get("date")) - if date == "" { - seenEmptyDate = true - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte(`{"error":{"message":"Date must be within the last 30 (completed) UTC days","code":400}}`)) - return - } - seenFallbackDate = date - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[ - {"date":"2026-02-21 00:00:00","model_permaslug":"qwen/qwen3-coder-flash","usage":0.91764,"requests":72,"prompt_tokens":3052166,"completion_tokens":6778,"reasoning_tokens":0,"cached_tokens":1508864} - ]}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_ACTIVITY_DATE_FALLBACK", "test-key") - defer os.Unsetenv("TEST_OR_KEY_ACTIVITY_DATE_FALLBACK") - - p := New() - acct := core.AccountConfig{ - ID: "test-activity-date-fallback", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_ACTIVITY_DATE_FALLBACK", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - if snap.Status != core.StatusOK { - t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) - } - if !seenEmptyDate { - t.Fatal("expected initial /activity call without date") - } - if seenFallbackDate == "" { - t.Fatal("expected fallback /activity call with date query") - } - if seenCacheControl != "no-cache, no-store, max-age=0" { - t.Fatalf("cache-control = %q, want no-cache, no-store, max-age=0", seenCacheControl) - } - if seenPragma != "no-cache" { - t.Fatalf("pragma = %q, want no-cache", seenPragma) - } - if got := snap.Raw["activity_endpoint"]; !strings.HasPrefix(got, "/activity?date=") { - t.Fatalf("activity_endpoint = %q, want /activity?date=...", got) - } - if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_input_tokens"]; !ok || m.Used == nil || *m.Used != 3052166 { - t.Fatalf("missing/invalid qwen input tokens metric: %+v", m) - } -} - -func TestFetch_TransactionAnalyticsNestedPayload(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) - case "/api/internal/v1/transaction-analytics": - if r.URL.RawQuery != "window=1mo" { - t.Fatalf("unexpected query: %q", r.URL.RawQuery) - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"cachedAt":"2026-02-22T00:00:00Z","data":[ - {"date":"2026-02-21 00:00:00","model_permaslug":"qwen/qwen3-coder-flash","usage":0.91764,"requests":72,"prompt_tokens":3052166,"completion_tokens":6778,"reasoning_tokens":0,"cached_tokens":1508864} - ]}}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_TX_ANALYTICS", "test-key") - defer os.Unsetenv("TEST_OR_KEY_TX_ANALYTICS") - - p := New() - acct := core.AccountConfig{ - ID: "test-tx-analytics", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_TX_ANALYTICS", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - if snap.Status != core.StatusOK { - t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) - } - if got := snap.Raw["activity_endpoint"]; got != "/api/internal/v1/transaction-analytics?window=1mo" { - t.Fatalf("activity_endpoint = %q, want transaction analytics endpoint", got) - } - if got := snap.Raw["activity_cached_at"]; got != "2026-02-22T00:00:00Z" { - t.Fatalf("activity_cached_at = %q, want 2026-02-22T00:00:00Z", got) - } - if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_input_tokens"]; !ok || m.Used == nil || *m.Used != 3052166 { - t.Fatalf("missing/invalid qwen input tokens metric: %+v", m) - } - if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_output_tokens"]; !ok || m.Used == nil || *m.Used != 6778 { - t.Fatalf("missing/invalid qwen output tokens metric: %+v", m) - } - if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_cached_tokens"]; !ok || m.Used == nil || *m.Used != 1508864 { - t.Fatalf("missing/invalid qwen cached tokens metric: %+v", m) - } - if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_cost_usd"]; !ok || m.Used == nil || math.Abs(*m.Used-0.91764) > 0.000001 { - t.Fatalf("missing/invalid qwen cost metric: %+v", m) - } -} - -func TestFetch_TransactionAnalyticsNumericCachedAtAndByokRequests(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) - case "/api/internal/v1/transaction-analytics": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"cachedAt":1771717984900,"data":[ - {"date":"2026-02-21 00:00:00","model_permaslug":"qwen/qwen3-coder-flash","usage":0.91764,"requests":72,"byok_requests":3,"prompt_tokens":3052166,"completion_tokens":6778,"reasoning_tokens":0,"cached_tokens":1508864} - ]}}`)) - case "/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_TX_ANALYTICS_NUM", "test-key") - defer os.Unsetenv("TEST_OR_KEY_TX_ANALYTICS_NUM") - - p := New() - acct := core.AccountConfig{ - ID: "test-tx-analytics-num", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_TX_ANALYTICS_NUM", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - if got := snap.Raw["activity_cached_at"]; got != "2026-02-21T23:53:04Z" { - t.Fatalf("activity_cached_at = %q, want 2026-02-21T23:53:04Z", got) - } - if m, ok := snap.Metrics["model_qwen_qwen3-coder-flash_byok_requests"]; !ok || m.Used == nil || *m.Used != 3 { - t.Fatalf("missing/invalid byok requests metric: %+v", m) - } -} - -func TestFetch_TransactionAnalyticsURL_UsesRootWhenBaseURLHasAPIV1(t *testing.T) { - var seenInternalPath string - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/api/v1/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"std-key","usage":0.5,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/api/v1/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":2.25}}`)) - case "/api/internal/v1/transaction-analytics": - seenInternalPath = r.URL.Path - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"cachedAt":1771717984900,"data":[ - {"date":"2026-02-21 00:00:00","model_permaslug":"qwen/qwen3-coder-flash","usage":0.91764,"requests":72,"prompt_tokens":3052166,"completion_tokens":6778,"reasoning_tokens":0,"cached_tokens":1508864} - ]}}`)) - case "/api/v1/generation": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_TX_URL", "test-key") - defer os.Unsetenv("TEST_OR_KEY_TX_URL") - - p := New() - acct := core.AccountConfig{ - ID: "test-tx-url", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_TX_URL", - BaseURL: server.URL + "/api/v1", - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - if snap.Status != core.StatusOK { - t.Fatalf("Status = %v, want OK; message=%s", snap.Status, snap.Message) - } - if seenInternalPath != "/api/internal/v1/transaction-analytics" { - t.Fatalf("internal analytics path = %q, want /api/internal/v1/transaction-analytics", seenInternalPath) - } - if got := snap.Raw["activity_endpoint"]; got != "/api/internal/v1/transaction-analytics?window=1mo" { - t.Fatalf("activity_endpoint = %q, want transaction analytics endpoint", got) - } -} - -func TestFetch_GenerationListUnsupported_Graceful(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - switch r.URL.Path { - case "/key": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"label":"std-key","usage":1.0,"limit":10.0,"is_free_tier":false,"rate_limit":{"requests":100,"interval":"10s"}}}`)) - case "/credits": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":{"total_credits":10.0,"total_usage":1.0}}`)) - case "/activity": - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"data":[]}`)) - case "/generation": - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte(`{"success":false,"error":{"name":"ZodError","message":"expected string for id"}}`)) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - os.Setenv("TEST_OR_KEY_GEN_400", "test-key") - defer os.Unsetenv("TEST_OR_KEY_GEN_400") - - p := New() - acct := core.AccountConfig{ - ID: "test-generation-400", - Provider: "openrouter", - APIKeyEnv: "TEST_OR_KEY_GEN_400", - BaseURL: server.URL, - } - - snap, err := p.Fetch(context.Background(), acct) - if err != nil { - t.Fatalf("Fetch() error: %v", err) - } - - if got := snap.Raw["generation_note"]; got == "" { - t.Fatal("missing generation_note for unsupported generation listing") - } - if got := snap.Raw["generations_fetched"]; got != "0" { - t.Fatalf("generations_fetched = %q, want 0", got) - } - if _, ok := snap.Raw["generation_error"]; ok { - t.Fatalf("unexpected generation_error = %q", snap.Raw["generation_error"]) - } -} - -func seriesValueByDate(points []core.TimePoint, date string) float64 { - for _, p := range points { - if p.Date == date { - return p.Value - } - } - return 0 -} diff --git a/internal/providers/zai/usage_projection.go b/internal/providers/zai/usage_projection.go new file mode 100644 index 0000000..c438c84 --- /dev/null +++ b/internal/providers/zai/usage_projection.go @@ -0,0 +1,398 @@ +package zai + +import ( + "sort" + "strconv" + "strings" + "time" + + "github.com/janekbaraniewski/openusage/internal/core" +) + +func projectModelUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { + today := time.Now().UTC().Format("2006-01-02") + hasNamedModelRows := false + for _, sample := range samples { + if strings.TrimSpace(sample.Name) != "" { + hasNamedModelRows = true + break + } + } + + total := usageRollup{} + todayRollup := usageRollup{} + modelTotals := make(map[string]*usageRollup) + clientTotals := make(map[string]*usageRollup) + sourceTotals := make(map[string]*usageRollup) + providerTotals := make(map[string]*usageRollup) + interfaceTotals := make(map[string]*usageRollup) + endpointTotals := make(map[string]*usageRollup) + languageTotals := make(map[string]*usageRollup) + dailyCost := make(map[string]float64) + dailyReq := make(map[string]float64) + dailyTokens := make(map[string]float64) + modelDailyTokens := make(map[string]map[string]float64) + clientDailyReq := make(map[string]map[string]float64) + sourceDailyReq := make(map[string]map[string]float64) + sourceTodayReq := make(map[string]float64) + + for _, sample := range samples { + modelName := strings.TrimSpace(sample.Name) + useRow := !hasNamedModelRows || modelName != "" + if !useRow { + if lang := normalizeUsageDimension(sample.Language); lang != "" { + accumulateUsageRollup(languageTotals, lang, sample) + } + if client := normalizeUsageDimension(sample.Client); client != "" { + accumulateUsageRollup(clientTotals, client, sample) + if sample.Date != "" { + if _, ok := clientDailyReq[client]; !ok { + clientDailyReq[client] = make(map[string]float64) + } + clientDailyReq[client][sample.Date] += sample.Requests + } + } + if source := normalizeUsageDimension(sample.Source); source != "" { + accumulateUsageRollup(sourceTotals, source, sample) + if sample.Date == today { + sourceTodayReq[source] += sample.Requests + } + if sample.Date != "" { + if _, ok := sourceDailyReq[source]; !ok { + sourceDailyReq[source] = make(map[string]float64) + } + sourceDailyReq[source][sample.Date] += sample.Requests + } + } + if provider := normalizeUsageDimension(sample.Provider); provider != "" { + accumulateUsageRollup(providerTotals, provider, sample) + } + if iface := normalizeUsageDimension(sample.Interface); iface != "" { + accumulateUsageRollup(interfaceTotals, iface, sample) + } + if endpoint := normalizeUsageDimension(sample.Endpoint); endpoint != "" { + accumulateUsageRollup(endpointTotals, endpoint, sample) + } + continue + } + accumulateRollupValues(&total, sample) + if modelName != "" { + accumulateUsageRollup(modelTotals, modelName, sample) + } + + if sample.Date == today { + accumulateRollupValues(&todayRollup, sample) + } + + if sample.Date != "" && modelName != "" { + dailyCost[sample.Date] += sample.CostUSD + dailyReq[sample.Date] += sample.Requests + dailyTokens[sample.Date] += sample.Total + if _, ok := modelDailyTokens[modelName]; !ok { + modelDailyTokens[modelName] = make(map[string]float64) + } + modelDailyTokens[modelName][sample.Date] += sample.Total + } + + if client := normalizeUsageDimension(sample.Client); client != "" { + accumulateUsageRollup(clientTotals, client, sample) + if sample.Date != "" { + if _, ok := clientDailyReq[client]; !ok { + clientDailyReq[client] = make(map[string]float64) + } + clientDailyReq[client][sample.Date] += sample.Requests + } + } + + if source := normalizeUsageDimension(sample.Source); source != "" { + accumulateUsageRollup(sourceTotals, source, sample) + if sample.Date == today { + sourceTodayReq[source] += sample.Requests + } + if sample.Date != "" { + if _, ok := sourceDailyReq[source]; !ok { + sourceDailyReq[source] = make(map[string]float64) + } + sourceDailyReq[source][sample.Date] += sample.Requests + } + } + + if provider := normalizeUsageDimension(sample.Provider); provider != "" { + accumulateUsageRollup(providerTotals, provider, sample) + } + if iface := normalizeUsageDimension(sample.Interface); iface != "" { + accumulateUsageRollup(interfaceTotals, iface, sample) + } + if endpoint := normalizeUsageDimension(sample.Endpoint); endpoint != "" { + accumulateUsageRollup(endpointTotals, endpoint, sample) + } + lang := normalizeUsageDimension(sample.Language) + if lang == "" { + lang = inferModelUsageLanguage(modelName) + } + if lang != "" { + accumulateUsageRollup(languageTotals, lang, sample) + } + } + + setUsedMetric(snap, "today_requests", todayRollup.Requests, "requests", "today") + setUsedMetric(snap, "requests_today", todayRollup.Requests, "requests", "today") + setUsedMetric(snap, "today_input_tokens", todayRollup.Input, "tokens", "today") + setUsedMetric(snap, "today_output_tokens", todayRollup.Output, "tokens", "today") + setUsedMetric(snap, "today_reasoning_tokens", todayRollup.Reasoning, "tokens", "today") + setUsedMetric(snap, "today_tokens", todayRollup.Total, "tokens", "today") + setUsedMetric(snap, "today_api_cost", todayRollup.CostUSD, "USD", "today") + setUsedMetric(snap, "today_cost", todayRollup.CostUSD, "USD", "today") + + setUsedMetric(snap, "7d_requests", total.Requests, "requests", "7d") + setUsedMetric(snap, "7d_tokens", total.Total, "tokens", "7d") + setUsedMetric(snap, "7d_api_cost", total.CostUSD, "USD", "7d") + setUsedMetric(snap, "window_requests", total.Requests, "requests", "7d") + setUsedMetric(snap, "window_tokens", total.Total, "tokens", "7d") + setUsedMetric(snap, "window_cost", total.CostUSD, "USD", "7d") + + setUsedMetric(snap, "active_models", float64(len(modelTotals)), "models", "7d") + snap.Raw["model_usage_window"] = "7d" + snap.Raw["activity_models"] = strconv.Itoa(len(modelTotals)) + snap.SetAttribute("activity_models", strconv.Itoa(len(modelTotals))) + + modelKeys := core.SortedStringKeys(modelTotals) + for _, model := range modelKeys { + stats := modelTotals[model] + slug := sanitizeMetricSlug(model) + setUsedMetric(snap, "model_"+slug+"_requests", stats.Requests, "requests", "7d") + setUsedMetric(snap, "model_"+slug+"_input_tokens", stats.Input, "tokens", "7d") + setUsedMetric(snap, "model_"+slug+"_output_tokens", stats.Output, "tokens", "7d") + setUsedMetric(snap, "model_"+slug+"_total_tokens", stats.Total, "tokens", "7d") + setUsedMetric(snap, "model_"+slug+"_cost_usd", stats.CostUSD, "USD", "7d") + snap.Raw["model_"+slug+"_name"] = model + + rec := core.ModelUsageRecord{RawModelID: model, RawSource: "api", Window: "7d"} + if stats.Input > 0 { + rec.InputTokens = core.Float64Ptr(stats.Input) + } + if stats.Output > 0 { + rec.OutputTokens = core.Float64Ptr(stats.Output) + } + if stats.Reasoning > 0 { + rec.ReasoningTokens = core.Float64Ptr(stats.Reasoning) + } + if stats.Total > 0 { + rec.TotalTokens = core.Float64Ptr(stats.Total) + } + if stats.CostUSD > 0 { + rec.CostUSD = core.Float64Ptr(stats.CostUSD) + } + if stats.Requests > 0 { + rec.Requests = core.Float64Ptr(stats.Requests) + } + snap.AppendModelUsage(rec) + } + + for _, client := range sortedUsageRollupKeys(clientTotals) { + stats := clientTotals[client] + slug := sanitizeMetricSlug(client) + setUsedMetric(snap, "client_"+slug+"_total_tokens", stats.Total, "tokens", "7d") + setUsedMetric(snap, "client_"+slug+"_input_tokens", stats.Input, "tokens", "7d") + setUsedMetric(snap, "client_"+slug+"_output_tokens", stats.Output, "tokens", "7d") + setUsedMetric(snap, "client_"+slug+"_reasoning_tokens", stats.Reasoning, "tokens", "7d") + setUsedMetric(snap, "client_"+slug+"_requests", stats.Requests, "requests", "7d") + snap.Raw["client_"+slug+"_name"] = client + } + + for _, source := range sortedUsageRollupKeys(sourceTotals) { + stats := sourceTotals[source] + slug := sanitizeMetricSlug(source) + setUsedMetric(snap, "source_"+slug+"_requests", stats.Requests, "requests", "7d") + if reqToday := sourceTodayReq[source]; reqToday > 0 { + setUsedMetric(snap, "source_"+slug+"_requests_today", reqToday, "requests", "1d") + } + } + + for _, provider := range sortedUsageRollupKeys(providerTotals) { + stats := providerTotals[provider] + slug := sanitizeMetricSlug(provider) + setUsedMetric(snap, "provider_"+slug+"_cost_usd", stats.CostUSD, "USD", "7d") + setUsedMetric(snap, "provider_"+slug+"_requests", stats.Requests, "requests", "7d") + setUsedMetric(snap, "provider_"+slug+"_input_tokens", stats.Input, "tokens", "7d") + setUsedMetric(snap, "provider_"+slug+"_output_tokens", stats.Output, "tokens", "7d") + snap.Raw["provider_"+slug+"_name"] = provider + } + + for _, iface := range sortedUsageRollupKeys(interfaceTotals) { + stats := interfaceTotals[iface] + setUsedMetric(snap, "interface_"+sanitizeMetricSlug(iface), stats.Requests, "calls", "7d") + } + + for _, endpoint := range sortedUsageRollupKeys(endpointTotals) { + stats := endpointTotals[endpoint] + setUsedMetric(snap, "endpoint_"+sanitizeMetricSlug(endpoint)+"_requests", stats.Requests, "requests", "7d") + } + + languageReqSummary := make(map[string]float64, len(languageTotals)) + for _, lang := range sortedUsageRollupKeys(languageTotals) { + stats := languageTotals[lang] + slug := sanitizeMetricSlug(lang) + value := stats.Requests + if value <= 0 { + value = stats.Total + } + setUsedMetric(snap, "lang_"+slug, value, "requests", "7d") + languageReqSummary[lang] = stats.Requests + } + setUsedMetric(snap, "active_languages", float64(len(languageTotals)), "languages", "7d") + setUsedMetric(snap, "activity_providers", float64(len(providerTotals)), "providers", "7d") + + snap.DailySeries["cost"] = core.SortedTimePoints(dailyCost) + snap.DailySeries["requests"] = core.SortedTimePoints(dailyReq) + snap.DailySeries["tokens"] = core.SortedTimePoints(dailyTokens) + + type modelTotal struct { + name string + tokens float64 + } + var ranked []modelTotal + for model, stats := range modelTotals { + ranked = append(ranked, modelTotal{name: model, tokens: stats.Total}) + } + sort.Slice(ranked, func(i, j int) bool { return ranked[i].tokens > ranked[j].tokens }) + if len(ranked) > 3 { + ranked = ranked[:3] + } + for _, entry := range ranked { + if dayMap, ok := modelDailyTokens[entry.name]; ok { + snap.DailySeries["tokens_"+sanitizeMetricSlug(entry.name)] = core.SortedTimePoints(dayMap) + } + } + + for client, dayMap := range clientDailyReq { + if len(dayMap) > 0 { + snap.DailySeries["usage_client_"+sanitizeMetricSlug(client)] = core.SortedTimePoints(dayMap) + } + } + for source, dayMap := range sourceDailyReq { + if len(dayMap) > 0 { + snap.DailySeries["usage_source_"+sanitizeMetricSlug(source)] = core.SortedTimePoints(dayMap) + } + } + + modelShare := make(map[string]float64, len(modelTotals)) + modelUnit := "tok" + for model, stats := range modelTotals { + if stats.Total > 0 { + modelShare[model] = stats.Total + } else if stats.Requests > 0 { + modelShare[model] = stats.Requests + modelUnit = "req" + } + } + if summary := summarizeShareUsage(modelShare, 6); summary != "" { + snap.Raw["model_usage"] = summary + snap.Raw["model_usage_unit"] = modelUnit + } + + clientShare := make(map[string]float64, len(clientTotals)) + for client, stats := range clientTotals { + if stats.Total > 0 { + clientShare[client] = stats.Total + } else if stats.Requests > 0 { + clientShare[client] = stats.Requests + } + } + if summary := summarizeShareUsage(clientShare, 6); summary != "" { + snap.Raw["client_usage"] = summary + } + + sourceShare := make(map[string]float64, len(sourceTotals)) + for source, stats := range sourceTotals { + if stats.Requests > 0 { + sourceShare[source] = stats.Requests + } + } + if summary := summarizeCountUsage(sourceShare, "req", 6); summary != "" { + snap.Raw["source_usage"] = summary + } + + providerShare := make(map[string]float64, len(providerTotals)) + for provider, stats := range providerTotals { + if stats.CostUSD > 0 { + providerShare[provider] = stats.CostUSD + } else if stats.Requests > 0 { + providerShare[provider] = stats.Requests + } + } + if summary := summarizeShareUsage(providerShare, 6); summary != "" { + snap.Raw["provider_usage"] = summary + } + if summary := summarizeCountUsage(languageReqSummary, "req", 8); summary != "" { + snap.Raw["language_usage"] = summary + } + + snap.Raw["activity_days"] = strconv.Itoa(len(dailyReq)) + snap.Raw["activity_clients"] = strconv.Itoa(len(clientTotals)) + snap.Raw["activity_sources"] = strconv.Itoa(len(sourceTotals)) + snap.Raw["activity_providers"] = strconv.Itoa(len(providerTotals)) + snap.Raw["activity_languages"] = strconv.Itoa(len(languageTotals)) + snap.Raw["activity_endpoints"] = strconv.Itoa(len(endpointTotals)) + snap.SetAttribute("activity_days", snap.Raw["activity_days"]) + snap.SetAttribute("activity_clients", snap.Raw["activity_clients"]) + snap.SetAttribute("activity_sources", snap.Raw["activity_sources"]) + snap.SetAttribute("activity_providers", snap.Raw["activity_providers"]) + snap.SetAttribute("activity_languages", snap.Raw["activity_languages"]) + snap.SetAttribute("activity_endpoints", snap.Raw["activity_endpoints"]) +} + +func projectToolUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { + today := time.Now().UTC().Format("2006-01-02") + totalCalls := 0.0 + todayCalls := 0.0 + toolTotals := make(map[string]*usageRollup) + dailyCalls := make(map[string]float64) + + for _, sample := range samples { + tool := sample.Name + if tool == "" { + tool = "unknown" + } + acc, ok := toolTotals[tool] + if !ok { + acc = &usageRollup{} + toolTotals[tool] = acc + } + acc.Requests += sample.Requests + acc.CostUSD += sample.CostUSD + totalCalls += sample.Requests + if sample.Date == today { + todayCalls += sample.Requests + } + if sample.Date != "" { + dailyCalls[sample.Date] += sample.Requests + } + } + + setUsedMetric(snap, "tool_calls_today", todayCalls, "calls", "today") + setUsedMetric(snap, "today_tool_calls", todayCalls, "calls", "today") + setUsedMetric(snap, "7d_tool_calls", totalCalls, "calls", "7d") + + for _, tool := range core.SortedStringKeys(toolTotals) { + stats := toolTotals[tool] + slug := sanitizeMetricSlug(tool) + setUsedMetric(snap, "tool_"+slug, stats.Requests, "calls", "7d") + setUsedMetric(snap, "toolcost_"+slug+"_usd", stats.CostUSD, "USD", "7d") + snap.Raw["tool_"+slug+"_name"] = tool + } + + if len(dailyCalls) > 0 { + snap.DailySeries["tool_calls"] = core.SortedTimePoints(dailyCalls) + } + + toolSummary := make(map[string]float64, len(toolTotals)) + for tool, stats := range toolTotals { + if stats.Requests > 0 { + toolSummary[tool] = stats.Requests + } + } + if summary := summarizeCountUsage(toolSummary, "calls", 8); summary != "" { + snap.Raw["tool_usage"] = summary + } +} diff --git a/internal/providers/zai/zai.go b/internal/providers/zai/zai.go index aff69bb..80fb522 100644 --- a/internal/providers/zai/zai.go +++ b/internal/providers/zai/zai.go @@ -7,7 +7,6 @@ import ( "io" "math" "net/http" - "sort" "strconv" "strings" "time" @@ -369,7 +368,7 @@ func (p *Provider) fetchModelUsage(ctx context.Context, monitorBase, apiKey stri return nil } - applyModelUsageSamples(samples, snap) + projectModelUsageSamples(samples, snap) state.hasUsageData = true snap.Raw["model_usage_api"] = "ok" return nil @@ -422,7 +421,7 @@ func (p *Provider) fetchToolUsage(ctx context.Context, monitorBase, apiKey strin return nil } - applyToolUsageSamples(samples, snap) + projectToolUsageSamples(samples, snap) state.hasUsageData = true snap.Raw["tool_usage_api"] = "ok" return nil @@ -723,410 +722,3 @@ func (p *Provider) finalizeStatusAndMessage(snap *core.UsageSnapshot, state *pro snap.Message = "OK" } - -func applyModelUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { - today := time.Now().UTC().Format("2006-01-02") - hasNamedModelRows := false - for _, sample := range samples { - if strings.TrimSpace(sample.Name) != "" { - hasNamedModelRows = true - break - } - } - - total := usageRollup{} - todayRollup := usageRollup{} - modelTotals := make(map[string]*usageRollup) - clientTotals := make(map[string]*usageRollup) - sourceTotals := make(map[string]*usageRollup) - providerTotals := make(map[string]*usageRollup) - interfaceTotals := make(map[string]*usageRollup) - endpointTotals := make(map[string]*usageRollup) - languageTotals := make(map[string]*usageRollup) - dailyCost := make(map[string]float64) - dailyReq := make(map[string]float64) - dailyTokens := make(map[string]float64) - modelDailyTokens := make(map[string]map[string]float64) - clientDailyReq := make(map[string]map[string]float64) - sourceDailyReq := make(map[string]map[string]float64) - sourceTodayReq := make(map[string]float64) - - for _, sample := range samples { - modelName := strings.TrimSpace(sample.Name) - useRow := !hasNamedModelRows || modelName != "" - if !useRow { - lang := normalizeUsageDimension(sample.Language) - if lang != "" { - accumulateUsageRollup(languageTotals, lang, sample) - } - if client := normalizeUsageDimension(sample.Client); client != "" { - accumulateUsageRollup(clientTotals, client, sample) - if sample.Date != "" { - if _, ok := clientDailyReq[client]; !ok { - clientDailyReq[client] = make(map[string]float64) - } - clientDailyReq[client][sample.Date] += sample.Requests - } - } - if source := normalizeUsageDimension(sample.Source); source != "" { - accumulateUsageRollup(sourceTotals, source, sample) - if sample.Date == today { - sourceTodayReq[source] += sample.Requests - } - if sample.Date != "" { - if _, ok := sourceDailyReq[source]; !ok { - sourceDailyReq[source] = make(map[string]float64) - } - sourceDailyReq[source][sample.Date] += sample.Requests - } - } - if provider := normalizeUsageDimension(sample.Provider); provider != "" { - accumulateUsageRollup(providerTotals, provider, sample) - } - if iface := normalizeUsageDimension(sample.Interface); iface != "" { - accumulateUsageRollup(interfaceTotals, iface, sample) - } - if endpoint := normalizeUsageDimension(sample.Endpoint); endpoint != "" { - accumulateUsageRollup(endpointTotals, endpoint, sample) - } - continue - } - accumulateRollupValues(&total, sample) - if modelName != "" { - accumulateUsageRollup(modelTotals, modelName, sample) - } - - if sample.Date == today { - accumulateRollupValues(&todayRollup, sample) - } - - if sample.Date != "" && modelName != "" { - dailyCost[sample.Date] += sample.CostUSD - dailyReq[sample.Date] += sample.Requests - dailyTokens[sample.Date] += sample.Total - if _, ok := modelDailyTokens[modelName]; !ok { - modelDailyTokens[modelName] = make(map[string]float64) - } - modelDailyTokens[modelName][sample.Date] += sample.Total - } - - if client := normalizeUsageDimension(sample.Client); client != "" { - accumulateUsageRollup(clientTotals, client, sample) - if sample.Date != "" { - if _, ok := clientDailyReq[client]; !ok { - clientDailyReq[client] = make(map[string]float64) - } - clientDailyReq[client][sample.Date] += sample.Requests - } - } - - if source := normalizeUsageDimension(sample.Source); source != "" { - accumulateUsageRollup(sourceTotals, source, sample) - if sample.Date == today { - sourceTodayReq[source] += sample.Requests - } - if sample.Date != "" { - if _, ok := sourceDailyReq[source]; !ok { - sourceDailyReq[source] = make(map[string]float64) - } - sourceDailyReq[source][sample.Date] += sample.Requests - } - } - - if provider := normalizeUsageDimension(sample.Provider); provider != "" { - accumulateUsageRollup(providerTotals, provider, sample) - } - if iface := normalizeUsageDimension(sample.Interface); iface != "" { - accumulateUsageRollup(interfaceTotals, iface, sample) - } - if endpoint := normalizeUsageDimension(sample.Endpoint); endpoint != "" { - accumulateUsageRollup(endpointTotals, endpoint, sample) - } - lang := normalizeUsageDimension(sample.Language) - if lang == "" { - lang = inferModelUsageLanguage(modelName) - } - if lang != "" { - accumulateUsageRollup(languageTotals, lang, sample) - } - } - - setUsedMetric(snap, "today_requests", todayRollup.Requests, "requests", "today") - setUsedMetric(snap, "requests_today", todayRollup.Requests, "requests", "today") - setUsedMetric(snap, "today_input_tokens", todayRollup.Input, "tokens", "today") - setUsedMetric(snap, "today_output_tokens", todayRollup.Output, "tokens", "today") - setUsedMetric(snap, "today_reasoning_tokens", todayRollup.Reasoning, "tokens", "today") - setUsedMetric(snap, "today_tokens", todayRollup.Total, "tokens", "today") - setUsedMetric(snap, "today_api_cost", todayRollup.CostUSD, "USD", "today") - setUsedMetric(snap, "today_cost", todayRollup.CostUSD, "USD", "today") - - setUsedMetric(snap, "7d_requests", total.Requests, "requests", "7d") - setUsedMetric(snap, "7d_tokens", total.Total, "tokens", "7d") - setUsedMetric(snap, "7d_api_cost", total.CostUSD, "USD", "7d") - setUsedMetric(snap, "window_requests", total.Requests, "requests", "7d") - setUsedMetric(snap, "window_tokens", total.Total, "tokens", "7d") - setUsedMetric(snap, "window_cost", total.CostUSD, "USD", "7d") - - setUsedMetric(snap, "active_models", float64(len(modelTotals)), "models", "7d") - snap.Raw["model_usage_window"] = "7d" - snap.Raw["activity_models"] = strconv.Itoa(len(modelTotals)) - snap.SetAttribute("activity_models", strconv.Itoa(len(modelTotals))) - - modelKeys := core.SortedStringKeys(modelTotals) - - for _, model := range modelKeys { - stats := modelTotals[model] - slug := sanitizeMetricSlug(model) - setUsedMetric(snap, "model_"+slug+"_requests", stats.Requests, "requests", "7d") - setUsedMetric(snap, "model_"+slug+"_input_tokens", stats.Input, "tokens", "7d") - setUsedMetric(snap, "model_"+slug+"_output_tokens", stats.Output, "tokens", "7d") - setUsedMetric(snap, "model_"+slug+"_total_tokens", stats.Total, "tokens", "7d") - setUsedMetric(snap, "model_"+slug+"_cost_usd", stats.CostUSD, "USD", "7d") - snap.Raw["model_"+slug+"_name"] = model - - rec := core.ModelUsageRecord{ - RawModelID: model, - RawSource: "api", - Window: "7d", - } - if stats.Input > 0 { - rec.InputTokens = core.Float64Ptr(stats.Input) - } - if stats.Output > 0 { - rec.OutputTokens = core.Float64Ptr(stats.Output) - } - if stats.Reasoning > 0 { - rec.ReasoningTokens = core.Float64Ptr(stats.Reasoning) - } - if stats.Total > 0 { - rec.TotalTokens = core.Float64Ptr(stats.Total) - } - if stats.CostUSD > 0 { - rec.CostUSD = core.Float64Ptr(stats.CostUSD) - } - if stats.Requests > 0 { - rec.Requests = core.Float64Ptr(stats.Requests) - } - snap.AppendModelUsage(rec) - } - - clientKeys := sortedUsageRollupKeys(clientTotals) - for _, client := range clientKeys { - stats := clientTotals[client] - slug := sanitizeMetricSlug(client) - setUsedMetric(snap, "client_"+slug+"_total_tokens", stats.Total, "tokens", "7d") - setUsedMetric(snap, "client_"+slug+"_input_tokens", stats.Input, "tokens", "7d") - setUsedMetric(snap, "client_"+slug+"_output_tokens", stats.Output, "tokens", "7d") - setUsedMetric(snap, "client_"+slug+"_reasoning_tokens", stats.Reasoning, "tokens", "7d") - setUsedMetric(snap, "client_"+slug+"_requests", stats.Requests, "requests", "7d") - snap.Raw["client_"+slug+"_name"] = client - } - - sourceKeys := sortedUsageRollupKeys(sourceTotals) - for _, source := range sourceKeys { - stats := sourceTotals[source] - slug := sanitizeMetricSlug(source) - setUsedMetric(snap, "source_"+slug+"_requests", stats.Requests, "requests", "7d") - if reqToday := sourceTodayReq[source]; reqToday > 0 { - setUsedMetric(snap, "source_"+slug+"_requests_today", reqToday, "requests", "1d") - } - } - - providerKeys := sortedUsageRollupKeys(providerTotals) - for _, provider := range providerKeys { - stats := providerTotals[provider] - slug := sanitizeMetricSlug(provider) - setUsedMetric(snap, "provider_"+slug+"_cost_usd", stats.CostUSD, "USD", "7d") - setUsedMetric(snap, "provider_"+slug+"_requests", stats.Requests, "requests", "7d") - setUsedMetric(snap, "provider_"+slug+"_input_tokens", stats.Input, "tokens", "7d") - setUsedMetric(snap, "provider_"+slug+"_output_tokens", stats.Output, "tokens", "7d") - snap.Raw["provider_"+slug+"_name"] = provider - } - - interfaceKeys := sortedUsageRollupKeys(interfaceTotals) - for _, iface := range interfaceKeys { - stats := interfaceTotals[iface] - slug := sanitizeMetricSlug(iface) - setUsedMetric(snap, "interface_"+slug, stats.Requests, "calls", "7d") - } - - endpointKeys := sortedUsageRollupKeys(endpointTotals) - for _, endpoint := range endpointKeys { - stats := endpointTotals[endpoint] - slug := sanitizeMetricSlug(endpoint) - setUsedMetric(snap, "endpoint_"+slug+"_requests", stats.Requests, "requests", "7d") - } - - languageKeys := sortedUsageRollupKeys(languageTotals) - languageReqSummary := make(map[string]float64, len(languageKeys)) - for _, lang := range languageKeys { - stats := languageTotals[lang] - slug := sanitizeMetricSlug(lang) - value := stats.Requests - if value <= 0 { - value = stats.Total - } - setUsedMetric(snap, "lang_"+slug, value, "requests", "7d") - languageReqSummary[lang] = stats.Requests - } - setUsedMetric(snap, "active_languages", float64(len(languageTotals)), "languages", "7d") - setUsedMetric(snap, "activity_providers", float64(len(providerTotals)), "providers", "7d") - - snap.DailySeries["cost"] = core.SortedTimePoints(dailyCost) - snap.DailySeries["requests"] = core.SortedTimePoints(dailyReq) - snap.DailySeries["tokens"] = core.SortedTimePoints(dailyTokens) - - type modelTotal struct { - name string - tokens float64 - } - var ranked []modelTotal - for model, stats := range modelTotals { - ranked = append(ranked, modelTotal{name: model, tokens: stats.Total}) - } - sort.Slice(ranked, func(i, j int) bool { return ranked[i].tokens > ranked[j].tokens }) - if len(ranked) > 3 { - ranked = ranked[:3] - } - for _, entry := range ranked { - if dayMap, ok := modelDailyTokens[entry.name]; ok { - key := "tokens_" + sanitizeMetricSlug(entry.name) - snap.DailySeries[key] = core.SortedTimePoints(dayMap) - } - } - - for client, dayMap := range clientDailyReq { - if len(dayMap) == 0 { - continue - } - snap.DailySeries["usage_client_"+sanitizeMetricSlug(client)] = core.SortedTimePoints(dayMap) - } - for source, dayMap := range sourceDailyReq { - if len(dayMap) == 0 { - continue - } - snap.DailySeries["usage_source_"+sanitizeMetricSlug(source)] = core.SortedTimePoints(dayMap) - } - - modelShare := make(map[string]float64, len(modelTotals)) - modelUnit := "tok" - for model, stats := range modelTotals { - if stats.Total > 0 { - modelShare[model] = stats.Total - continue - } - if stats.Requests > 0 { - modelShare[model] = stats.Requests - modelUnit = "req" - } - } - if summary := summarizeShareUsage(modelShare, 6); summary != "" { - snap.Raw["model_usage"] = summary - snap.Raw["model_usage_unit"] = modelUnit - } - clientShare := make(map[string]float64, len(clientTotals)) - for client, stats := range clientTotals { - if stats.Total > 0 { - clientShare[client] = stats.Total - } else if stats.Requests > 0 { - clientShare[client] = stats.Requests - } - } - if summary := summarizeShareUsage(clientShare, 6); summary != "" { - snap.Raw["client_usage"] = summary - } - sourceShare := make(map[string]float64, len(sourceTotals)) - for source, stats := range sourceTotals { - if stats.Requests > 0 { - sourceShare[source] = stats.Requests - } - } - if summary := summarizeCountUsage(sourceShare, "req", 6); summary != "" { - snap.Raw["source_usage"] = summary - } - providerShare := make(map[string]float64, len(providerTotals)) - for provider, stats := range providerTotals { - if stats.CostUSD > 0 { - providerShare[provider] = stats.CostUSD - } else if stats.Requests > 0 { - providerShare[provider] = stats.Requests - } - } - if summary := summarizeShareUsage(providerShare, 6); summary != "" { - snap.Raw["provider_usage"] = summary - } - if summary := summarizeCountUsage(languageReqSummary, "req", 8); summary != "" { - snap.Raw["language_usage"] = summary - } - - snap.Raw["activity_days"] = strconv.Itoa(len(dailyReq)) - snap.Raw["activity_clients"] = strconv.Itoa(len(clientTotals)) - snap.Raw["activity_sources"] = strconv.Itoa(len(sourceTotals)) - snap.Raw["activity_providers"] = strconv.Itoa(len(providerTotals)) - snap.Raw["activity_languages"] = strconv.Itoa(len(languageTotals)) - snap.Raw["activity_endpoints"] = strconv.Itoa(len(endpointTotals)) - snap.SetAttribute("activity_days", snap.Raw["activity_days"]) - snap.SetAttribute("activity_clients", snap.Raw["activity_clients"]) - snap.SetAttribute("activity_sources", snap.Raw["activity_sources"]) - snap.SetAttribute("activity_providers", snap.Raw["activity_providers"]) - snap.SetAttribute("activity_languages", snap.Raw["activity_languages"]) - snap.SetAttribute("activity_endpoints", snap.Raw["activity_endpoints"]) -} - -func applyToolUsageSamples(samples []usageSample, snap *core.UsageSnapshot) { - today := time.Now().UTC().Format("2006-01-02") - totalCalls := 0.0 - todayCalls := 0.0 - toolTotals := make(map[string]*usageRollup) - dailyCalls := make(map[string]float64) - - for _, sample := range samples { - tool := sample.Name - if tool == "" { - tool = "unknown" - } - - acc, ok := toolTotals[tool] - if !ok { - acc = &usageRollup{} - toolTotals[tool] = acc - } - acc.Requests += sample.Requests - acc.CostUSD += sample.CostUSD - - totalCalls += sample.Requests - if sample.Date == today { - todayCalls += sample.Requests - } - if sample.Date != "" { - dailyCalls[sample.Date] += sample.Requests - } - } - - setUsedMetric(snap, "tool_calls_today", todayCalls, "calls", "today") - setUsedMetric(snap, "today_tool_calls", todayCalls, "calls", "today") - setUsedMetric(snap, "7d_tool_calls", totalCalls, "calls", "7d") - - keys := core.SortedStringKeys(toolTotals) - for _, tool := range keys { - stats := toolTotals[tool] - slug := sanitizeMetricSlug(tool) - setUsedMetric(snap, "tool_"+slug, stats.Requests, "calls", "7d") - setUsedMetric(snap, "toolcost_"+slug+"_usd", stats.CostUSD, "USD", "7d") - snap.Raw["tool_"+slug+"_name"] = tool - } - - if len(dailyCalls) > 0 { - snap.DailySeries["tool_calls"] = core.SortedTimePoints(dailyCalls) - } - - toolSummary := make(map[string]float64, len(toolTotals)) - for tool, stats := range toolTotals { - if stats.Requests > 0 { - toolSummary[tool] = stats.Requests - } - } - if summary := summarizeCountUsage(toolSummary, "calls", 8); summary != "" { - snap.Raw["tool_usage"] = summary - } -}