diff --git a/.cursor/tickets/notification-capture-audit.md b/.cursor/tickets/notification-capture-audit.md new file mode 100644 index 0000000..5eb93ef --- /dev/null +++ b/.cursor/tickets/notification-capture-audit.md @@ -0,0 +1,280 @@ +# Notification Capture — Forensic Audit + +Audit of `feat/notification-capture` against the scope at +`/Users/brendanplayford/teslashibe/agent-setup/.worktrees/notification-capture/.cursor/tickets/notification-capture-scope.md`. + +Performed under `.claude/rules/issue-audit-user-stories.mdc`. + +--- + +## 1. Findings (severity-ordered, evidence first) + +### High — none open +The two issues that would have shipped as data-loss bugs were caught +during the audit and fixed in this same branch. + +### Medium — closed in this branch + +#### M1. Network failure dropped notifications permanently — FIXED +- **Evidence:** `mobile/services/notificationSync.ts::flushNow` previously + called `drainBuffer()` (which empties the native store atomically) + then iterated upload chunks. If any `uploadBatch(chunk)` threw, the + remaining events were already gone. +- **Impact:** An agent driving between properties on flaky cell would + silently lose hours of notifications. Exactly the user the feature + is built for. +- **Fix:** Added `requeueEvents` to the native module + (`NotificationStore.kt`, `NotificationCaptureModule.kt`, + `src/index.ts`). On upload failure, `flushNow` requeues the unsent + tail back into the native buffer (older entries are dropped first if + the requeue would overflow MAX_BUFFER_SIZE = 500). Verified by + `npm run typecheck`. + +#### M2. Backend would 500 if `mcp` dispatch lost user_id for credential-less platforms — FIXED +- **Evidence:** `internal/mcp/server.go::resolveClient` previously + unconditionally called `decryptCredentials(userID, platform)`. Three + built-in platforms (`xviral`, `redditviral`, `codegen`) and the new + `notifications` platform have no per-user credentials, so this would + always emit `credential_missing` and refuse to call the tool. +- **Fix:** `PlatformBinding` gained a `NoCredentials bool` flag. + `resolveClient` skips decryption when set, and stamps user_id into + context via `withUserID` so tools can recover it via + `UserIDFromContext`. Covered by + `internal/mcp/context_test.go`, + `internal/mcp/platforms/notifications_test.go`. + +### Medium — open (deferred outside this branch) + +#### M3. Pre-existing template dependency drift +- **Evidence:** `npx expo-doctor` reports: + - Missing peer dep `react-native-worklets` (required by + `react-native-reanimated`). + - Duplicate `expo-font` (14.0.11 vs nested 55.0.6). + - 7 packages out of date vs SDK 55. +- **Why deferred:** Pre-existing in `main`, unrelated to notification + capture. Touching them widens this PR's surface area beyond the + feature scope. Recommend a follow-up PR `chore/sdk-55-realign` that + runs `npx expo install --check` and resolves duplicates. +- **Risk to APK build:** `react-native-worklets` is required by + `react-native-reanimated`, which ships in this template. EAS Build + will likely auto-install via `expo install` during the build phase, + but if it does not, the APK may crash on first launch. Mitigation: + run `npx expo install react-native-worklets` before triggering EAS + Build. + +### Low — design choices to be aware of + +#### L1. `notifications_pending_actions` ranking is heuristic, not learned +- **Where:** `backend/internal/notifications/service.go::classify`. +- **Behaviour:** Coarse priority via regex over urgency keywords + + `?` + `category=call`. The agent re-ranks at the LLM layer, so this + is intentionally a pre-filter, not the final ordering. +- **Risk:** False positives (e.g. "?" in a Zillow price-drop alert). + The agent will down-rank. Track in production logs; revisit if the + user complains the rollup buries real follow-ups. + +#### L2. Threading is by `(app_package, title)` +- **Where:** `backend/internal/notifications/store.go::GroupThreads`. +- **Behaviour:** Groups SMS/WhatsApp messages by sender name (which + Android puts in the notification title). Works well for 1:1 threads, + groups multiple senders into one row when an app shows + "3 new messages" in the title. +- **Mitigation:** The agent gets the raw `notifications_search` tool + for ad-hoc lookups, so this is recoverable at query time. + +#### L3. iOS not implemented +- **Why:** iOS does not allow third-party apps to read other apps' + notifications without MDM enrolment. This is documented as + out-of-scope for V1 in the scope doc and the mobile module declares + `platforms: ["android"]` so there is no surprise at build time. +- **Risk:** Zero — `isCaptureAvailable` short-circuits the React + surface to `FALLBACK_VALUE` on iOS. + +--- + +## 2. Gaps vs scope intent + +| Scope item | Status | Notes | +|---|---|---| +| `00003_notification_events.sql` migration | ✅ ships, inert if `NOTIFICATIONS_ENABLED=false` | Idempotent CREATE TABLE / INDEX / hypertable. | +| `notifications` platform on the MCP server | ✅ conditional plugin in `cmd/server/main.go` | Not in `platforms.All()`, by design — keeps `All()` signature stable. | +| 5 MCP tools (`list`, `search`, `threads`, `apps`, `pending_actions`) | ✅ implemented + schema-validated by `mcp_test.go` | All prefixed `notifications_`. | +| Expo native module | ✅ `modules/notification-capture` | Autolinking confirmed via `npx expo-modules-autolinking search --platform android`. | +| Mobile sync loop | ✅ + requeue-on-failure | Foreground only by design; background WorkManager is V2. | +| Settings UI | ✅ `app/(app)/capture.tsx` + entry row in `settings.tsx` | Hidden tab in `_layout.tsx` so deep-linking works. | +| Feature flag plumbing | ✅ `NOTIFICATIONS_ENABLED` (server) + `EXPO_PUBLIC_NOTIFICATIONS_ENABLED` (client) | Default OFF on both sides. | +| Provisioner system prompt addendum | ✅ `agent.NotificationsSystemPrompt()` | Only attached when `cfg.NotificationsEnabled`. | +| APK build | ⚠️ scaffolded only | No local Android SDK / JDK on this Mac. `eas.json` ships with `preview` and `production-apk` profiles + matching `npm run build:apk:*` scripts. Ship instructions below. | + +### Gaps not closed in this PR (V2) + +- Background sync via WorkManager — currently foreground-only. +- Encryption-at-rest for the on-device buffer (SharedPreferences are + app-private; root access required to read them, but full disk + encryption is the only OS-level guarantee). +- Per-app redaction rules (e.g. blur OTP codes before upload). +- Daily digest push notification driven by the agent. + +--- + +## 3. User stories + +### US-1 — Real-estate agent rollup +> As a real estate agent on Android, I want my texts, WhatsApp, +> Zillow, and email notifications captured passively so the agent can +> tell me what happened today and what needs follow-up without me +> retyping anything. + +### US-2 — Privacy control +> As a user installing the APK, I want a single master switch and an +> explicit allowlist so I decide which apps the agent can see, and I +> want to revoke at any time from the settings screen. + +### US-3 — Operator confidence +> As the operator forking this template for a non-realtor client, I +> want to ship the same template with `NOTIFICATIONS_ENABLED=false` +> and have zero notification rows, MCP tools, or UI surfaces appear, +> so the feature stays invisible until I opt in. + +### US-4 — Reliability under flaky network +> As a real estate agent driving between properties, I want unsent +> notifications to survive a dropped cell signal so my evening rollup +> is complete even if the day's connectivity was patchy. + +### US-5 — Agent rollup quality +> As a user asking the agent "what happened today", I want a +> per-contact thread list, an action-item list ranked by urgency, and +> the ability to drill into specific contacts or topics with a +> follow-up question. + +--- + +## 4. Acceptance criteria + +### AC for US-1 — Capture & ingest +- **Given** the user has installed the APK and granted Notification + Access, **when** an SMS, WhatsApp, Gmail, or Zillow notification + posts on the device, **then** the event is appended to the local + buffer with `app_package`, `app_label`, `title`, `content`, + `category`, and `captured_at` populated. +- **Given** the device buffers ≥1 event, **when** the app + foregrounds OR 5 minutes elapse, **then** events are POSTed to + `/api/notifications/batch` and `markSynced` is called. +- **Negative:** **Given** the user has not granted Notification + Access, **then** `hasPermission()` returns false and the settings + screen shows the "Open system settings" CTA. + +### AC for US-2 — Privacy control +- **Given** the master switch is OFF, **when** a notification posts, + **then** the `NotificationCaptureService` returns early and nothing + is written to the buffer (verified by reading + `NotificationCaptureService.kt::onNotificationPosted`). +- **Given** an app's package is NOT in the allowlist, **when** that + app posts a notification, **then** the event is dropped at the + service layer. +- **Given** the user toggles the master switch OFF in settings, + **then** `setEnabled(false)` is called and `stopSync()` runs; + pending events stay in the buffer until either the user re-enables + capture or uninstalls the app. + +### AC for US-3 — Default-off opt-in +- **Given** `NOTIFICATIONS_ENABLED=false` (the default in + `.env.example`), **when** the backend boots, **then** + - the `/api/notifications/*` routes are not mounted (verified by + the `if cfg.NotificationsEnabled` guard in `cmd/server/main.go`), + - the `notifications` MCP plugin is not appended to the dispatcher + plugin list, + - the agent provisioner uses `defaultSystemPrompt` (no rollup + addendum), + - the migration still runs but creates an idle hypertable. +- **Given** `EXPO_PUBLIC_NOTIFICATIONS_ENABLED` is not set, **when** + the mobile app boots, **then** the Settings screen omits the + "Notification Capture" card and the `/capture` route renders the + "feature disabled" empty state. + +### AC for US-4 — Reliability +- **Given** the buffer holds N events and the device has no network, + **when** `flushNow` runs, **then** the upload throws, all N events + are requeued via `requeueEvents`, and `markSynced` is NOT called. +- **Given** the buffer holds 250 events and the second chunk fails, + **when** `flushNow` runs, **then** the first 200 are accepted, the + remaining 50 are requeued, and `flushNow` returns 200. +- **Given** the buffer would exceed 500 events after a requeue, + **then** older events are dropped first (freshest preserved). + +### AC for US-5 — Agent rollup quality +- **Given** events exist for the requested time window, **when** the + agent calls `notifications_threads`, **then** results are returned + with `contact`, `app_label`, `message_count`, `last_at`, and + `preview` fields ordered by most-recent activity. +- **Given** an event whose content matches an urgency keyword (e.g. + "tonight", "showing at"), **when** the agent calls + `notifications_pending_actions`, **then** the row appears with + `priority="high"` and `reason="time_sensitive"`. +- **Negative:** **Given** an empty time window, **when** the agent + calls any `notifications_*` tool, **then** the response is the + empty result for that shape (`{events:[],count:0}` / + `{threads:[]}` / `{actions:[]}`), not an error. + +--- + +## 5. Risks and follow-up actions + +### Pre-merge / pre-ship + +- **R1 (medium).** APK build cannot be exercised on this Mac (no JDK, + no Android SDK). + - Mitigation: `expo prebuild --platform android` was dry-run and + succeeded; `eas.json` ships APK profiles. The remaining step is + cloud-build via `eas build --platform android --profile preview` + after `eas login`. +- **R2 (medium).** Pre-existing `expo-doctor` failures (M3 above). + Recommend running `npx expo install react-native-worklets` before + the first APK build. +- **R3 (low).** Database migration `00003` requires the + `timescaledb` extension. The previous two migrations already do, so + any environment that boots the existing template has it. New + deployments should verify `CREATE EXTENSION IF NOT EXISTS + timescaledb;` runs in `00001_init.sql`. + +### Post-ship operational + +- **R4.** Tail `notification_events` cardinality. If a single user + exceeds ~1M rows in a month, set up the Timescale retention policy + the scope doc calls out (drop chunks > 90 days). +- **R5.** Watch the agent's tool-call traces for `pending_actions` + false positives. If the heuristic misranks > 20% of items, replace + the regex pre-filter with an LLM call against the last hour's + events. +- **R6.** SharedPreferences buffer is plaintext. If the user enrols + in MDM or roots their phone the buffer is readable. If we ship to + enterprise clients, swap to `EncryptedSharedPreferences` (Jetpack + Security). + +### Ship checklist (run in order from `mobile/`) + +1. `npm install` +2. `npx expo install react-native-worklets` *(addresses R2)* +3. `eas login` *(if not already)* +4. `eas build --platform android --profile preview --non-interactive` + — produces a sideloadable APK URL when complete (~15 min cloud build). +5. Email/AirDrop the APK to the real estate agent, walk them through + enabling Notification Access in Settings → Apps → Special access. +6. Backend: set `NOTIFICATIONS_ENABLED=true` on the agent's instance, + apply migration 00003, restart server. +7. Watch `notification_events` row count grow as the device flushes. + +--- + +## 6. Verification matrix + +| Gate | Command | Result | +|---|---|---| +| Backend `go vet` | `cd backend && go vet ./...` | ✅ no diagnostics | +| Backend tests | `cd backend && go test ./internal/notifications/... ./internal/mcp/...` | ✅ all packages pass | +| Backend build | `cd backend && go build ./...` | ✅ | +| Mobile typecheck | `cd mobile && npm run typecheck` | ✅ | +| Native android scaffold | `cd mobile && npx expo prebuild --platform android --no-install --clean` | ✅ | +| Autolink discovery | `cd mobile && npx expo-modules-autolinking search --platform android` | ✅ `notification-capture` listed | +| APK build | `cd mobile && eas build --platform android --profile preview` | ⏸ requires `eas login` (no local SDK on this Mac) | +| Expo doctor | `cd mobile && npx expo-doctor` | ⚠ 3 pre-existing failures (R2/M3) | diff --git a/.cursor/tickets/notification-capture-scope.md b/.cursor/tickets/notification-capture-scope.md new file mode 100644 index 0000000..e650080 --- /dev/null +++ b/.cursor/tickets/notification-capture-scope.md @@ -0,0 +1,851 @@ +# `agent-setup` — Android Notification Capture + Communication Rollup Scope + +**Repo:** `github.com/teslashibe/agent-setup` +**Branch:** `feat/notification-capture` +**Affected packages:** `backend/internal/{notifications,mcp/platforms,config,db/migrations}`, `mobile/{modules/notification-capture,app,services}` +**Mirrors:** existing `agent-setup` conventions (Fiber + pgx + Goose, Expo Router + NativeWind, magic-link auth, MCP tool registry) +**Purpose:** Add passive Android notification capture to the Expo app so the Claude agent can produce daily communication rollups ("what happened" + "what needs to be done") from a user's real communication channels — texts, WhatsApp, email, Zillow, phone calls, etc. — without requiring per-platform API integrations. + +--- + +## Goals + +1. A thin **Expo Module** (Kotlin native + JS bridge) captures Android notifications passively via `NotificationListenerService`. +2. Captured notifications are batched and shipped to the Go backend over the existing authenticated API. +3. Notifications are stored in a **TimescaleDB hypertable** (time-series) for efficient time-range queries. +4. The Claude agent accesses notification data through **MCP tools** registered in the standard `platforms.All()` registry — no new wire protocol, no special orchestration. +5. The user configures which apps to monitor via a simple settings screen. Everything else is automatic. +6. **Zero behavior change required** from the end user. They keep using their existing apps. The system observes and summarises. + +## Non-Goals (V1) + +- iOS notification capture (iOS does not expose a `NotificationListenerService` equivalent without an app extension + heavy restrictions; out of scope for V1). +- macOS companion app (Phase 2 — `NSUserNotificationCenter` / Accessibility-based observer). +- Full message body capture beyond notification content (no SMS `READ_SMS` permission, no email IMAP integration — notification previews are sufficient for rollup quality). +- Real-time streaming to the agent (V1 is batch ingest → on-demand or scheduled rollup). +- Automatic action execution (e.g. "reply to Sarah for me") — V1 is read-only intelligence, not write-back. +- Play Store submission (V1 ships as a sideloaded APK for pilot users; Play Store compliance is a separate effort). + +--- + +## Background — Why Notification Capture + +The first pilot customer is a real estate agent in SF. Her pain: communication is fragmented across 6+ channels (SMS, WhatsApp, Zillow messages, email, phone calls, other real estate apps), and at the end of a busy day she can't reconstruct what happened or what needs follow-up. Dropped leads cost commissions. + +The notification listener approach solves this without platform-specific integrations. Android's `NotificationListenerService` is a system-level API that receives every notification from every app once the user grants the "Notification Access" permission. A single permission replaces the need for Gmail API, WhatsApp Business API, Zillow API, carrier SMS APIs, etc. The data quality (sender, subject/title, message preview, timestamp, source app) is sufficient for daily rollup and action item extraction. + +This is also the thinnest possible wedge into a larger product: once the user trusts the daily rollup, proactive features (missed response alerts, lead scoring, deal stage tracking) are incremental additions to the same data pipeline. + +--- + +## Architecture Decisions + +### 1. Expo Module, not a standalone native app + +The notification capture lives inside the existing `agent-setup` Expo app as a local Expo Module. This keeps authentication, navigation, and the chat interface unified. The user opens one app, configures capture in settings, and asks the agent for their rollup in the same chat they already use. + +**Rejected:** Separate native Android app that ships data to the same backend. Doubles the auth surface, doubles the app install, splits the UX for no benefit. + +### 2. NotificationListenerService, not Accessibility Service + +Android's `NotificationListenerService` is the narrowest permission that captures notification content from all apps. It requires no root, no special device admin, and no Play Store approval beyond a policy declaration. The user grants "Notification Access" once in system settings. + +**Rejected:** Accessibility Service — captures richer screen content but triggers Play Store review flags, requires justification, and is disproportionate for V1's rollup-only use case. Can revisit for V2 "full screen observation" if needed. + +**Rejected:** `READ_SMS` + IMAP + per-platform APIs — higher fidelity per channel but requires N integrations, each with its own auth and maintenance burden. The notification path captures all channels with one permission. + +### 3. Batch ingest, not real-time streaming + +The device batches captured notifications locally and flushes to the backend periodically (default: every 5 minutes) or on app foreground. This is simpler, more battery-friendly, and sufficient for end-of-day rollup. + +**Rejected:** WebSocket / SSE push from device to backend. Adds complexity, battery drain, and connectivity edge cases for a feature that doesn't need sub-minute latency. + +### 4. Notification data is stored per-user, accessed via MCP tools + +Notifications land in a dedicated `notification_events` table, and the Claude agent reads them through MCP tools (`notifications_list`, `notifications_search`, etc.) registered in the standard `platforms.All()` registry. This means: + +- The agent discovers notification tools the same way it discovers LinkedIn or Reddit tools — via `tools/list` on the MCP server. +- No special orchestration, no scheduled cron, no separate "rollup service." The user asks "What happened today?" in the chat, and the agent calls the tools. +- The system prompt teaches the agent how to structure a rollup from notification data. + +**Rejected:** Dedicated rollup endpoint that runs Claude server-side on a schedule and sends a push notification. Cleaner UX for the "6pm daily summary" use case, but adds a new execution path outside the managed agent system. Can be added later as a thin cron that creates a session and sends a prompt — the MCP tools are the same either way. + +### 5. No credentials needed — internal platform binding + +Unlike every other platform in `platforms.All()`, notification capture doesn't need user-supplied credentials. The data is pushed by the user's own device, already authenticated via their JWT. The platform binding uses `nullValidator` (like `xviral` or `codegen`) and the `NewClient` constructor receives the database pool + user ID, not a credential blob. + +### 6. App allowlist is stored on-device, not on the server + +The list of apps the user chooses to monitor (e.g. "Gmail, WhatsApp, Messages, Zillow") is stored locally on the device (AsyncStorage / SecureStore). The backend receives notifications only for allowed apps — filtering happens at the source. This keeps the privacy posture simple: the server never sees notifications from apps the user didn't explicitly opt in. + +**Rejected:** Server-side allowlist. Adds a round-trip on every notification and a settings sync problem. The device is the right place for this gate. + +--- + +## Domain Model + +### New table + +```sql +-- +goose Up + +CREATE TABLE notification_events ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + app_package TEXT NOT NULL, -- e.g. "com.google.android.gm" + app_label TEXT NOT NULL, -- human-readable, e.g. "Gmail" + title TEXT, -- notification title (sender name, email subject, etc.) + content TEXT, -- notification body (message preview) + category TEXT, -- Android notification category if available (msg, email, call, etc.) + captured_at TIMESTAMPTZ NOT NULL, -- device-side timestamp + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +SELECT create_hypertable('notification_events', 'captured_at'); + +CREATE INDEX idx_notif_user_time ON notification_events (user_id, captured_at DESC); +CREATE INDEX idx_notif_user_app ON notification_events (user_id, app_package, captured_at DESC); +CREATE INDEX idx_notif_content ON notification_events USING gin (to_tsvector('english', coalesce(title, '') || ' ' || coalesce(content, ''))); + +-- +goose Down +DROP TABLE IF EXISTS notification_events; +``` + +**Why TimescaleDB hypertable:** Every query is time-ranged ("give me today's notifications," "what happened in the last 4 hours"). Hypertable chunking gives efficient range scans and automatic chunk-based retention (e.g. drop data older than 90 days) without manual partition management. + +**Why GIN full-text index:** The `notifications_search` MCP tool needs to find notifications by keyword ("find messages from Sarah about the Sunset listing"). Postgres `to_tsvector` + `ts_query` is sufficient for V1; no external search engine needed. + +### Go types (`backend/internal/notifications/model.go`) + +```go +package notifications + +import "time" + +type Event struct { + ID int64 `json:"id"` + UserID string `json:"user_id"` + AppPackage string `json:"app_package"` + AppLabel string `json:"app_label"` + Title string `json:"title,omitempty"` + Content string `json:"content,omitempty"` + Category string `json:"category,omitempty"` + CapturedAt time.Time `json:"captured_at"` + CreatedAt time.Time `json:"created_at"` +} + +type BatchInput struct { + Events []EventInput `json:"events"` +} + +type EventInput struct { + AppPackage string `json:"app_package"` + AppLabel string `json:"app_label"` + Title string `json:"title,omitempty"` + Content string `json:"content,omitempty"` + Category string `json:"category,omitempty"` + CapturedAt time.Time `json:"captured_at"` +} +``` + +--- + +## API Surface + +### Notification ingest + +| Method | Path | Auth | Description | +|---|---|---|---| +| `POST` | `/api/notifications/batch` | Bearer JWT | Receive a batch of notification events from the device. Body: `BatchInput`. Deduplicates on `(user_id, app_package, captured_at, title)` using `ON CONFLICT DO NOTHING`. Returns `{ "accepted": }`. | + +Rate limit: **60 requests/minute per user** (at 5-minute flush intervals, this is 12x headroom). Configurable via `NOTIFICATIONS_INGEST_RATE_LIMIT`. + +### Notification query (REST, for the mobile app's own UI if desired) + +| Method | Path | Auth | Description | +|---|---|---|---| +| `GET` | `/api/notifications` | Bearer JWT | List recent notifications for the authenticated user. Query params: `since` (RFC3339), `until` (RFC3339), `app` (filter by app_package), `q` (full-text search), `limit` (default 50, max 200). | +| `GET` | `/api/notifications/apps` | Bearer JWT | List distinct apps that have sent notifications, with count per app. Used by the settings screen to show "these apps are being captured." | + +These REST endpoints are optional — the agent uses MCP tools, not REST — but they're useful for a future mobile "notification log" screen and for debugging. + +--- + +## MCP Tools + +Notification tools are registered in the MCP registry alongside platform tools. The agent discovers them via `tools/list` and calls them like any other tool. Tool names are prefixed `notifications_` following the existing `{platform}_{action}` convention. + +### Tool definitions + +| Tool name | Input | Output | Description | +|---|---|---|---| +| `notifications_list` | `since?: string` (RFC3339), `until?: string`, `app_package?: string`, `limit?: int` (default 50) | `PageOf[Event]` | List notifications in reverse chronological order, optionally filtered by time range and app. Primary tool for building rollups. | +| `notifications_search` | `query: string`, `since?: string`, `until?: string`, `app_package?: string`, `limit?: int` | `PageOf[Event]` | Full-text search across notification title + content. "Find all messages from Sarah about the Sunset listing." | +| `notifications_threads` | `since?: string`, `until?: string`, `app_package?: string`, `group_by?: string` ("contact" \| "app", default "contact") | `[]Thread` | Group notifications into conversation-like threads by extracting contact names from titles and clustering by app + contact + time proximity. Returns threads with message count, last message time, and preview. | +| `notifications_apps` | `since?: string`, `until?: string` | `[]AppSummary` | List all apps that sent notifications in the time range, with count and latest notification time per app. Meta-tool for the agent to understand what data is available. | +| `notifications_pending_actions` | `since?: string`, `until?: string` | `[]ActionItem` | Heuristic extraction of likely action items: unanswered messages (notification with no outbound reply within N hours), questions detected in content, time-sensitive keywords ("deadline," "offer expires," "showing at"). Returns ranked items with source notification reference. | + +### Thread and ActionItem types + +```go +type Thread struct { + Contact string `json:"contact"` + AppLabel string `json:"app_label"` + AppPackage string `json:"app_package"` + MessageCount int `json:"message_count"` + FirstAt string `json:"first_at"` + LastAt string `json:"last_at"` + Preview string `json:"preview"` +} + +type AppSummary struct { + AppPackage string `json:"app_package"` + AppLabel string `json:"app_label"` + Count int `json:"count"` + LastAt string `json:"last_at"` +} + +type ActionItem struct { + Priority string `json:"priority"` // "high", "medium", "low" + Summary string `json:"summary"` + Contact string `json:"contact,omitempty"` + AppLabel string `json:"app_label"` + CapturedAt string `json:"captured_at"` + Reason string `json:"reason"` // "unanswered", "question", "time_sensitive", "follow_up" +} +``` + +### Provider pattern + +The notification MCP tools live in `backend/internal/notifications/mcp/` (not a separate `*-go` repo, since there's no external API client to wrap). The `Provider` implements `mcptool.Provider` with `Platform() == "notifications"`. + +```go +// backend/internal/notifications/mcp/mcp.go +package notificationsmcp + +import "github.com/teslashibe/mcptool" + +type Provider struct{} + +func (Provider) Platform() string { return "notifications" } + +func (Provider) Tools() []mcptool.Tool { + return append(append(append(append( + listTools, + searchTools...), + threadTools...), + appTools...), + actionTools..., + ) +} +``` + +### Client shape + +Unlike platform bindings where `NewClient` builds an API client from credentials, the notification "client" is a thin wrapper around the store that's scoped to the current user. User ID is resolved from the MCP request context (JWT in path), same as other platforms. + +```go +// backend/internal/notifications/mcp/client.go +type Client struct { + Store *notifications.Store + UserID string +} +``` + +The `NewClient` in `platforms.go` constructs this from the request context: + +```go +func Notifications(pool *pgxpool.Pool) Plugin { + return Plugin{ + Binding: mcp.PlatformBinding{ + Provider: notificationsmcp.Provider{}, + NewClient: func(ctx context.Context, _ json.RawMessage) (any, error) { + userID := mcp.UserIDFromContext(ctx) + return ¬ificationsmcp.Client{ + Store: notifications.NewStore(pool), + UserID: userID, + }, nil + }, + }, + Validator: nullValidator{platform: "notifications"}, + } +} +``` + +**Note:** This requires threading `userID` through to `NewClient` via context. Currently `Server.CallTool` already has `userID` from the JWT resolution. Verify that `NewClient` receives a context carrying the user ID — if not, the context propagation from `transport.go → server.go → NewClient` needs a small plumbing addition. Check `mcp.Server.CallTool` to confirm `userID` is available on the context passed to `NewClient`. + +--- + +## Android Notification Capture Module + +### Expo Module structure + +``` +mobile/modules/notification-capture/ +├── expo-module.config.json +├── src/ +│ ├── index.ts # re-export +│ └── NotificationCaptureModule.ts # JS bridge: start/stop, getApps, setAllowlist, flush +└── android/ + ├── build.gradle.kts # Expo Module Gradle config + └── src/main/ + ├── AndroidManifest.xml # NotificationListenerService declaration + └── java/com/teslashibe/notificationcapture/ + ├── NotificationCaptureModule.kt # Expo Module definition — exposes JS API + ├── NotificationCaptureService.kt # NotificationListenerService impl — captures notifications + └── NotificationStore.kt # Local SQLite buffer for batching before upload +``` + +### expo-module.config.json + +```json +{ + "platforms": ["android"], + "android": { + "modules": ["com.teslashibe.notificationcapture.NotificationCaptureModule"] + } +} +``` + +### Expo app config plugin addition + +`mobile/app.config.ts` — add the local module to `plugins`: + +```ts +plugins: [ + "expo-router", + "expo-web-browser", + "expo-secure-store", + "./modules/notification-capture", +], +``` + +### Android NotificationListenerService + +The core Kotlin class: + +```kotlin +// NotificationCaptureService.kt +class NotificationCaptureService : NotificationListenerService() { + + override fun onNotificationPosted(sbn: StatusBarNotification) { + val pkg = sbn.packageName + if (!isAllowlisted(pkg)) return + if (sbn.isOngoing) return // skip persistent/ongoing notifications + + val extras = sbn.notification.extras + val event = NotificationEvent( + appPackage = pkg, + appLabel = getAppLabel(pkg), + title = extras.getCharSequence(Notification.EXTRA_TITLE)?.toString(), + content = extras.getCharSequence(Notification.EXTRA_TEXT)?.toString(), + category = sbn.notification.category, + capturedAt = Instant.ofEpochMilli(sbn.postTime).toString(), + ) + + NotificationStore.insert(event) + } +} +``` + +Key behaviors: + +- **Allowlist filtering** happens immediately in `onNotificationPosted` — non-allowlisted apps are discarded before storage. +- **Ongoing notifications** (music players, navigation, etc.) are skipped. +- **Deduplication:** StatusBarNotification has a `key` field. The local store deduplicates on `(key, postTime)` to handle notification updates without creating duplicate events. +- **Local buffer:** `NotificationStore` is a simple SQLite table on device that accumulates events between flushes. + +### JS bridge API + +```ts +// NotificationCaptureModule.ts +import { NativeModule, requireNativeModule } from 'expo-modules-core'; + +interface NotificationEvent { + app_package: string; + app_label: string; + title: string | null; + content: string | null; + category: string | null; + captured_at: string; // ISO8601 +} + +interface NotificationCaptureModule extends NativeModule { + isServiceEnabled(): boolean; + openNotificationAccessSettings(): void; + getInstalledApps(): Promise>; + setAllowlist(packages: string[]): void; + getAllowlist(): string[]; + getPendingCount(): number; + flush(): Promise; // returns events and clears local buffer + setEnabled(enabled: boolean): void; +} + +export default requireNativeModule('NotificationCapture'); +``` + +### Flush and upload flow + +The mobile app runs a flush loop: + +```ts +// services/notifications.ts +const FLUSH_INTERVAL_MS = 5 * 60 * 1000; // 5 minutes + +async function flushNotifications(): Promise { + const events = await NotificationCaptureModule.flush(); + if (events.length === 0) return; + await api.post('/api/notifications/batch', { events }); +} +``` + +This runs: +- On a `setInterval` when the app is in the foreground. +- On `AppState` change to "active" (when the app is foregrounded). +- Before the user opens the chat screen (so the agent has the latest data). + +**No background task scheduler in V1.** Expo's managed workflow doesn't support arbitrary background tasks without `expo-task-manager` + `expo-background-fetch`, which are coarse-grained (minimum 15-minute intervals, OS-controlled). For V1, foreground-only flush is sufficient — the user opens the app to ask for their rollup, which triggers a flush. Background upload is a V2 enhancement. + +--- + +## Mobile UI + +### New screen: `app/(app)/capture.tsx` + +A settings screen for notification capture configuration. Accessible from the Settings tab. + +**Layout:** + +1. **Status banner** — shows whether Notification Access is granted. If not, a prominent button: "Enable Notification Access" → calls `NotificationCaptureModule.openNotificationAccessSettings()` which opens the Android system settings page. + +2. **Master toggle** — "Capture notifications" on/off. Calls `setEnabled(boolean)`. + +3. **App selector** — List of installed apps (from `getInstalledApps()`), each with a toggle. Defaults to all OFF; user opts in per app. Common communication apps are surfaced at the top with suggested labels: + - Messages (com.google.android.apps.messaging) + - WhatsApp (com.whatsapp) + - Gmail (com.google.android.gm) + - Outlook (com.microsoft.office.outlook) + - Phone (com.google.android.dialer) + - Zillow (com.zillow.android.zillowmap) + + The rest appear alphabetically below. User's selections are persisted to AsyncStorage. + +4. **Stats footer** — "X notifications captured today · Last sync: Y minutes ago" + +### Updated screen: `app/(app)/settings.tsx` + +Add a row: "Notification Capture" with a chevron → navigates to `/(app)/capture`. + +### Updated layout: `app/(app)/_layout.tsx` + +Add `capture` as a hidden route (like `chat` and `platforms` — navigable but not a visible tab): + +```ts + +``` + +--- + +## Agent System Prompt Addition + +The provisioner's system prompt (`provision.go` `defaultSystemPrompt`) is extended to teach the agent about notification tools: + +``` +You also have access to the user's captured device notifications via tools prefixed +with notifications_. These contain communication activity from the user's phone — texts, +emails, WhatsApp messages, app notifications, missed calls, etc. + +When the user asks for a "rollup," "summary," "what happened today," or "what do I need +to do," use these tools to build a structured response: + +1. Call notifications_apps to understand which channels had activity. +2. Call notifications_threads to group activity by contact and conversation. +3. Call notifications_pending_actions to surface items that may need follow-up. + +Structure your rollup as: +- **What happened** — organised by contact or conversation, most important first. +- **What needs attention** — action items ranked by urgency, with the source message quoted. + +Keep summaries concise. Quote specific message content when it adds clarity (e.g. "Sarah +asked: 'Can you send the comps for 742 Evergreen?'"). Flag time-sensitive items prominently. +``` + +--- + +## Backend Implementation Plan + +### New packages + +``` +backend/internal/ +├── notifications/ +│ ├── model.go # Event, BatchInput, EventInput, Thread, AppSummary, ActionItem +│ ├── store.go # pgx queries: InsertBatch, List, Search, GroupThreads, ListApps +│ ├── service.go # Business logic: dedup, full-text query building, action extraction +│ ├── handler.go # Fiber: POST /api/notifications/batch, GET /api/notifications, GET .../apps +│ └── mcp/ +│ ├── mcp.go # Provider implementation +│ ├── client.go # Client (store wrapper scoped to user) +│ ├── list.go # notifications_list tool +│ ├── search.go # notifications_search tool +│ ├── threads.go # notifications_threads tool +│ ├── apps.go # notifications_apps tool +│ └── actions.go # notifications_pending_actions tool +``` + +### Store API + +```go +type Store struct { + pool *pgxpool.Pool +} + +func NewStore(pool *pgxpool.Pool) *Store + +func (s *Store) InsertBatch(ctx context.Context, userID string, events []EventInput) (int, error) +func (s *Store) List(ctx context.Context, userID string, opts ListOpts) ([]Event, error) +func (s *Store) Search(ctx context.Context, userID, query string, opts ListOpts) ([]Event, error) +func (s *Store) GroupThreads(ctx context.Context, userID string, opts ThreadOpts) ([]Thread, error) +func (s *Store) ListApps(ctx context.Context, userID string, since, until time.Time) ([]AppSummary, error) +func (s *Store) PendingActions(ctx context.Context, userID string, opts ActionOpts) ([]ActionItem, error) + +type ListOpts struct { + Since *time.Time + Until *time.Time + AppPackage string + Limit int +} + +type ThreadOpts struct { + Since *time.Time + Until *time.Time + AppPackage string + GroupBy string // "contact" | "app" +} + +type ActionOpts struct { + Since *time.Time + Until *time.Time +} +``` + +### Thread grouping logic (`store.go`) + +Threads are grouped by extracting the contact name from the notification `title` field (most messaging apps put the sender name in the title) and clustering by `(app_package, title, time_bucket)`. The SQL: + +```sql +SELECT + app_package, + app_label, + title AS contact, + count(*) AS message_count, + min(captured_at) AS first_at, + max(captured_at) AS last_at, + (array_agg(content ORDER BY captured_at DESC))[1] AS preview +FROM notification_events +WHERE user_id = $1 + AND captured_at >= $2 + AND captured_at <= $3 +GROUP BY app_package, app_label, title +ORDER BY max(captured_at) DESC; +``` + +This is a rough heuristic — "title" in most messaging apps is the contact name, but in email it's the subject line. The agent's natural language processing smooths over these inconsistencies when producing the rollup. + +### Action item extraction (`store.go` / `service.go`) + +V1 action extraction is heuristic, not LLM-based (keeps it fast and free of extra API calls): + +1. **Unanswered messages:** Notifications from messaging apps where no outbound notification from the same app followed within a configurable window (default 2 hours). Heuristic: if the user's own outbound message would trigger a "message sent" notification, its absence implies no reply. +2. **Questions:** Content containing `?` from known communication apps. +3. **Time-sensitive keywords:** Content matching patterns like `deadline`, `expires`, `by tomorrow`, `showing at`, `offer`, `closing`, `inspection`, `ASAP`, `urgent`. +4. **Missed calls:** Notifications from the phone dialer app with category `call` or title matching "Missed call." + +Priority ranking: missed calls and time-sensitive keywords → unanswered questions → other unanswered messages. + +--- + +## Platform Registration + +### `platforms.go` addition + +```go +func Notifications(pool *pgxpool.Pool) Plugin { + return Plugin{ + Binding: mcp.PlatformBinding{ + Provider: notificationsmcp.Provider{}, + NewClient: func(ctx context.Context, _ json.RawMessage) (any, error) { + userID := mcp.UserIDFromContext(ctx) + if userID == "" { + return nil, errors.New("notifications: user ID not available in context") + } + return ¬ificationsmcp.Client{ + Store: notifications.NewStore(pool), + UserID: userID, + }, nil + }, + }, + Validator: nullValidator{platform: "notifications"}, + } +} +``` + +### `All()` addition + +```go +func All(pool *pgxpool.Pool) []Plugin { + return []Plugin{ + LinkedIn(), + X(), + // ... existing platforms ... + Codegen(), + Notifications(pool), // new — no credentials, uses DB directly + } +} +``` + +**No breaking change to `All()`.** `Notifications(pool)` is **not** added to `All()`. Instead, it is conditionally appended in `cmd/server/main.go` only when `NOTIFICATIONS_ENABLED=true`: + +```go +plugins := platforms.All() +if cfg.NotificationsEnabled { + plugins = append(plugins, platforms.Notifications(pool)) +} +``` + +This keeps `All()` zero-argument (no signature change), keeps the notification dependency out of forks that don't need it, and follows the same pattern as the `TEAMS_ENABLED` gate on team routes. Forks that never set the flag never touch this code path. + +### Context propagation for `userID` + +`mcp.Server.CallTool` currently receives `userID` as a parameter and uses it for credential lookup. The `NewClient` function receives `ctx` and `raw` (credential JSON). To make `userID` available in `NewClient`'s context, add it to the context in `Server.CallTool` before calling `NewClient`: + +```go +// In server.go CallTool, before NewClient: +ctx = withUserID(ctx, userID) +``` + +This is a one-line addition. Define `withUserID` / `UserIDFromContext` as context key helpers in the `mcp` package. + +--- + +## Configuration + +New env vars (additive to `backend/.env.example`): + +```bash +# Notification Capture +NOTIFICATIONS_ENABLED=true # gates /api/notifications/* routes and MCP tools +NOTIFICATIONS_INGEST_RATE_LIMIT=60 # max batch uploads per minute per user +NOTIFICATIONS_RETENTION_DAYS=90 # TimescaleDB chunk retention (0 = keep forever) +NOTIFICATIONS_ACTION_REPLY_WINDOW_HOURS=2 # how long before an unreplied message is flagged +NOTIFICATIONS_DEFAULT_PAGE_SIZE=50 # default limit for list/search queries +NOTIFICATIONS_MAX_PAGE_SIZE=200 # hard cap on limit +``` + +When `NOTIFICATIONS_ENABLED=false` (the default): +- `/api/notifications/*` routes are not mounted (not 404 — the routes simply don't exist). +- Notification MCP tools are not registered — the agent never sees them in `tools/list`. +- The mobile capture settings screen shows "Notification capture is not available for this deployment." +- The `Notifications(pool)` plugin is not appended to the platform list. + +**Migration note:** The `00003_notification_events.sql` migration ships in the template and runs on all forks (Goose runs all migrations in order). The table is created but remains empty and adds zero overhead for forks that don't enable the feature. This is the same pattern as `teams` and `team_invites` tables which exist in every fork regardless of `TEAMS_ENABLED`. If a fork wants to explicitly skip it, they can delete the migration file before first deploy — but there's no cost to leaving it. + +`backend/internal/config/config.go` adds these as typed fields on `Config`. `NOTIFICATIONS_ENABLED` defaults to `false` in `Load()` so existing forks are unaffected. + +--- + +## Mobile Implementation Plan + +### New service module: `services/notifications.ts` + +```ts +export interface NotificationEvent { + app_package: string; + app_label: string; + title: string | null; + content: string | null; + category: string | null; + captured_at: string; +} + +export interface AppSummary { + app_package: string; + app_label: string; + count: number; + last_at: string; +} + +// Ingest +export async function uploadBatch(events: NotificationEvent[]): Promise<{ accepted: number }>; + +// Query (optional — agent uses MCP tools, but useful for a future log screen) +export async function listNotifications(opts?: { + since?: string; + until?: string; + app?: string; + q?: string; + limit?: number; +}): Promise; + +export async function listCapturedApps(): Promise; +``` + +### Flush manager: `services/notificationSync.ts` + +```ts +// Manages the flush interval and app lifecycle hooks. +// Started by the capture settings screen or on app boot if capture is enabled. + +export function startSync(): void; // begins setInterval + AppState listener +export function stopSync(): void; // clears interval +export function flushNow(): Promise; // immediate flush (called before opening chat) +``` + +### New provider: `providers/NotificationCaptureProvider.tsx` + +Wraps the native module state and sync manager. Sits inside `AuthSessionProvider` (needs JWT for uploads). Exposes: + +```ts +type NotificationCaptureContextValue = { + isAvailable: boolean; // Android only + isEnabled: boolean; // master toggle + hasPermission: boolean; // Notification Access granted + allowlist: string[]; // app packages being monitored + pendingCount: number; // events in local buffer + lastSyncAt: Date | null; + setEnabled: (enabled: boolean) => void; + setAllowlist: (packages: string[]) => void; + openPermissionSettings: () => void; + flushNow: () => Promise; +}; +``` + +On iOS / web, `isAvailable` is `false` and all other fields are no-ops. + +--- + +## Privacy and Security + +### Data in transit +All notification data is sent over HTTPS to the backend, authenticated with the user's JWT. Same transport security as every other API call. + +### Data at rest +Notification content is stored in plaintext in Postgres (not encrypted at rest beyond disk-level encryption). This is consistent with how agent session messages are stored today. If a deployment requires field-level encryption, the same `CREDENTIALS_ENCRYPTION_KEY` + `crypto.go` pattern from the credentials package can be applied — but this is out of scope for V1. + +### Data retention +`NOTIFICATIONS_RETENTION_DAYS` controls automatic cleanup via TimescaleDB's `drop_chunks()` policy: + +```sql +SELECT add_retention_policy('notification_events', INTERVAL '90 days'); +``` + +This is set up in the migration or as a post-migration step. + +### User control +- The user explicitly grants Notification Access in Android system settings. +- The user explicitly selects which apps to monitor. +- The user can disable capture at any time via the in-app toggle. +- The user can revoke Notification Access in system settings, which immediately stops all capture. + +### Pilot deployment +V1 is a sideloaded APK for a single pilot user. No Play Store review, no broad distribution. The privacy posture is a direct trust relationship with a known user. + +--- + +## Implementation Order + +Suggested commit/PR sequence — each item independently testable: + +1. **Schema + migration** (`00003_notification_events.sql`) + Goose smoke test. +2. **`internal/notifications` package** — model, store (`InsertBatch`, `List`, `Search`, `GroupThreads`, `ListApps`, `PendingActions`), service. Unit tests for dedup, full-text search, thread grouping, action extraction heuristics. +3. **`internal/notifications/handler.go`** — `POST /api/notifications/batch`, `GET /api/notifications`, `GET /api/notifications/apps`. Wiring in `cmd/server/main.go` under the auth middleware. +4. **Context propagation** — Add `UserIDFromContext` / `withUserID` to `internal/mcp` package. Update `Server.CallTool` to inject user ID into context before `NewClient`. +5. **`internal/notifications/mcp/`** — Provider, Client, all five tool definitions (`list`, `search`, `threads`, `apps`, `pending_actions`). Follow `mcptool.Define[*Client, Input]` pattern. +6. **Platform registration** — Add `Notifications(pool)` to `platforms.go`, update `All()` signature, update call site in `main.go`. +7. **System prompt update** — Add notification-aware instructions to `provision.go` `defaultSystemPrompt`. +8. **Expo Module: native Kotlin** — `NotificationCaptureService.kt`, `NotificationCaptureModule.kt`, `NotificationStore.kt`, `AndroidManifest.xml`, `build.gradle.kts`. +9. **Expo Module: JS bridge** — `NotificationCaptureModule.ts`, `index.ts`, `expo-module.config.json`. +10. **Mobile services** — `services/notifications.ts`, `services/notificationSync.ts`. +11. **Mobile provider** — `providers/NotificationCaptureProvider.tsx`, wire into `app/_layout.tsx`. +12. **Mobile UI** — `app/(app)/capture.tsx` settings screen, settings row in `app/(app)/settings.tsx`. +13. **App config** — Update `app.config.ts` plugins array. +14. **Integration test** — End-to-end: mock notification → local store → flush → backend ingest → MCP tool query → verify rollup data. +15. **README update** — New "Notification Capture" section. + +Estimated effort: **5–7 working days** (backend 2–3, native module 2, mobile UI/integration 1–2). + +--- + +## Testing Plan + +### Backend + +- **Store unit tests:** + - `InsertBatch` deduplication: same `(user_id, app_package, captured_at, title)` inserted twice → only one row. + - `List` with time range filters. + - `Search` full-text: "Sarah Sunset listing" matches a notification with those words in title/content. + - `GroupThreads` clusters correctly by contact. + - `PendingActions` extracts questions, missed calls, time-sensitive keywords. + +- **Handler tests:** + - Batch upload with valid JWT → 200 + accepted count. + - Batch upload without auth → 401. + - Batch upload with empty events array → 200, accepted: 0. + - Rate limiting: exceed 60 requests/minute → 429. + +- **MCP tool tests:** + - Register notification tools in a test registry → `tools/list` includes them. + - `tools/call` `notifications_list` → returns stored events. + - `tools/call` `notifications_search` with query → returns matching events. + +### Mobile + +- `npm run typecheck` clean across new modules and screens. +- Manual smoke test on Android (physical device or emulator): + - Install APK → grant Notification Access → select apps → send a test SMS → verify notification captured → open chat → ask "what happened?" → agent produces rollup. + - Disable capture → send SMS → verify no new notifications captured. + - Revoke Notification Access in system settings → verify app shows "permission not granted" banner. + +### Integration + +- Full loop: Android emulator with notification injection (`adb shell service call notification`) → flush → backend query → MCP tool response verification. + +--- + +## Out of Scope (V1) + +- iOS notification capture (no equivalent API without app extension + restricted background modes). +- macOS companion app. +- Full SMS body capture (requires `READ_SMS` permission + additional privacy considerations). +- Email IMAP integration (notification previews are sufficient for V1 rollup quality). +- Background sync via `expo-background-fetch` (foreground-only flush for V1). +- Scheduled daily rollup push notification (the user asks the agent; there's no autonomous trigger). +- Write-back actions ("reply to Sarah for me"). +- Play Store submission and policy compliance review. +- Field-level encryption of notification content at rest. +- Multi-device notification deduplication (V1 assumes one device per user). +- Web/desktop notification capture (browser Notification API is too restricted). + +--- + +## Decision Points (confirm or override) + +1. **Expo Module vs bare native app.** Plan uses a local Expo Module inside the existing app. *Alternative: separate native Android app with its own auth.* Confirm. +2. **NotificationListenerService only (no AccessibilityService, no READ_SMS).** Notification previews are sufficient for rollup. *Alternative: add READ_SMS for full message bodies.* Confirm. +3. **Batch ingest with foreground-only flush.** No background sync in V1. *Alternative: add `expo-background-fetch` for 15-minute background flushes.* Confirm. +4. **App allowlist stored on-device.** Server never sees notifications from non-allowlisted apps. *Alternative: server-side allowlist with sync.* Confirm. +5. **MCP tools for agent access (no dedicated rollup endpoint/cron).** The user triggers rollups by asking the agent. *Alternative: scheduled cron that auto-generates a daily rollup and sends a push notification.* Confirm. +6. **Heuristic action extraction (no LLM calls in the tool).** `pending_actions` uses keyword/pattern matching, not Claude. The agent's own reasoning handles nuance. *Alternative: call Claude inside the tool for higher-quality extraction.* Confirm. +7. **Conditional registration outside `All()`.** `Notifications(pool)` is appended in `main.go` only when `NOTIFICATIONS_ENABLED=true`, not added to `All()`. *Alternative: add to `All()` unconditionally and let `nullValidator` handle the rest.* Confirm. +8. **90-day retention default.** Configurable via `NOTIFICATIONS_RETENTION_DAYS`. *Alternative: no auto-retention (keep everything).* Confirm. +9. **No credential requirement.** Notification tools use `nullValidator` — no entry in the Platform Connections UI. *Alternative: show a "Device" entry in connections with an on/off toggle.* Confirm. +10. **Single device per user.** No dedup logic for the same notification arriving from multiple devices. *Alternative: dedup on `(user_id, app_package, captured_at, title, content)` which handles multi-device naturally.* Confirm. + +--- + +## Open Questions (need answers before commit 1) + +- **Notification content truncation.** Android truncates notification text in `EXTRA_TEXT` (typically 1-2 lines for messaging apps). For longer messages, `EXTRA_BIG_TEXT` or `EXTRA_TEXT_LINES` may contain more. Should the service attempt to read expanded notification styles (`BigTextStyle`, `InboxStyle`, `MessagingStyle`)? This is more code but significantly better content quality for email and group chats. +- **Outbound message detection.** The `pending_actions` tool flags "unanswered" messages, but detecting outbound replies via notifications is unreliable (not all apps generate "message sent" notifications). Should we accept this limitation in V1, or add `READ_SMS` specifically for SMS outbound detection? +- **Team scoping.** Should notification data be team-scoped (like sessions) or user-scoped (always private)? Recommendation: user-scoped only — notification data is inherently personal and should not be visible to team admins. +- **Pilot user onboarding.** Should we build a one-time setup wizard (grant permission → select apps → confirm) or just the settings screen? The wizard is better UX for a non-technical user but more UI work. diff --git a/.gitignore b/.gitignore index 558bc85..5bd15de 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,9 @@ mobile/.expo-shared/ mobile/dist/ mobile/web-build/ mobile/.env.local +# Native projects are generated by `expo prebuild` and not checked in. +mobile/android/ +mobile/ios/ # git worktrees .worktrees/ diff --git a/backend/.env.example b/backend/.env.example index 4d491f0..bf75410 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -38,3 +38,17 @@ TEAMS_ENABLED=true TEAMS_DEFAULT_MAX_SEATS=25 TEAMS_INVITE_TTL_HOURS=168 TEAMS_INVITE_FROM_NAME=Agent App + +# Notifications — Android NotificationListenerService capture pipeline. +# When NOTIFICATIONS_ENABLED is false (default) the /api/notifications/* +# routes are not mounted, the notifications_* MCP tools are not registered +# on the per-user agent, and the mobile capture screen renders an +# "unavailable" state. The 00003_notification_events migration ships in +# every fork; the table simply stays empty. Enable this for clients who +# need the communication-rollup feature (e.g. real-estate-agent fork). +NOTIFICATIONS_ENABLED=false +NOTIFICATIONS_INGEST_RATE_LIMIT=60 +NOTIFICATIONS_INGEST_RATE_WINDOW_SECONDS=60 +NOTIFICATIONS_DEFAULT_PAGE_SIZE=50 +NOTIFICATIONS_MAX_PAGE_SIZE=200 +NOTIFICATIONS_ACTION_REPLY_WINDOW_HOURS=2 diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go index c28ce50..dc6e6ed 100644 --- a/backend/cmd/server/main.go +++ b/backend/cmd/server/main.go @@ -30,6 +30,7 @@ import ( "github.com/teslashibe/agent-setup/backend/internal/invites" mcppkg "github.com/teslashibe/agent-setup/backend/internal/mcp" "github.com/teslashibe/agent-setup/backend/internal/mcp/platforms" + "github.com/teslashibe/agent-setup/backend/internal/notifications" "github.com/teslashibe/agent-setup/backend/internal/teams" ) @@ -156,7 +157,37 @@ func main() { }, })) - if err := mountMCP(app, api, authMW, cfg, pool, magicSvc, agentSvc); err != nil { + // Notification capture: opt-in by env (defaults off). When enabled, + // mount the ingest + query routes under the existing auth middleware + // and prepare the per-user service for the MCP plugin below. + var notifSvc *notifications.Service + if cfg.NotificationsEnabled { + notifSvc = notifications.NewService(notifications.NewStore(pool), notifications.ServiceConfig{ + DefaultPageSize: cfg.NotificationsDefaultPageSize, + MaxPageSize: cfg.NotificationsMaxPageSize, + ReplyWindowHrs: cfg.NotificationsReplyWindowHrs, + }) + ingestLimiter := limiter.New(limiter.Config{ + Max: cfg.NotificationsIngestRateLimit, + Expiration: cfg.NotificationsIngestRateWindow, + KeyGenerator: func(c *fiber.Ctx) string { + if id, ok := c.Locals("user_id").(string); ok && id != "" { + return "notif-ingest:" + id + } + return "notif-ingest:" + c.IP() + }, + LimitReached: func(c *fiber.Ctx) error { + return c.Status(fiber.StatusTooManyRequests).JSON( + fiber.Map{"error": "notification ingest rate limit exceeded — please slow down"}) + }, + }) + notifications.NewHandler(notifSvc).Mount(api, ingestLimiter) + log.Printf("notifications: enabled (rate=%d/%s, default_page=%d, max_page=%d)", + cfg.NotificationsIngestRateLimit, cfg.NotificationsIngestRateWindow, + cfg.NotificationsDefaultPageSize, cfg.NotificationsMaxPageSize) + } + + if err := mountMCP(app, api, authMW, cfg, pool, magicSvc, agentSvc, notifSvc); err != nil { log.Fatalf("mcp: %v", err) } @@ -211,6 +242,11 @@ func newInviteEmailSender(cfg config.Config) invites.EmailSender { // CREDENTIALS_ENCRYPTION_KEY is configured. When the key is missing the // helper logs a warning and returns nil so local-dev workflows that don't // need MCP can still come up. +// +// notifSvc is non-nil only when cfg.NotificationsEnabled. When non-nil the +// internal "notifications" platform is appended to the plugin list so the +// per-user agent gains the 5 notifications_* tools and a system prompt +// addendum that teaches it how to use them for daily rollups. func mountMCP( app *fiber.App, api fiber.Router, @@ -219,6 +255,7 @@ func mountMCP( pool *pgxpool.Pool, magicSvc *magiclink.Service, agentSvc *agent.Service, + notifSvc *notifications.Service, ) error { if cfg.CredentialsEncryptionKey == "" { log.Printf("mcp: CREDENTIALS_ENCRYPTION_KEY not set — MCP routes and per-user provisioner disabled") @@ -230,6 +267,9 @@ func mountMCP( } plugins := platforms.All() + if cfg.NotificationsEnabled && notifSvc != nil { + plugins = append(plugins, platforms.Notifications(notifSvc)) + } validators := make([]credentials.Validator, 0, len(plugins)) bindings := make([]mcppkg.PlatformBinding, 0, len(plugins)) for _, pl := range plugins { @@ -263,7 +303,11 @@ func mountMCP( if err != nil { return fmt.Errorf("mcp endpoint factory: %w", err) } - provisioner, err := agent.NewProvisioner(cfg, agentSvc.Client(), pool, endpointFn, agent.ProvisionerOptions{}) + provOpts := agent.ProvisionerOptions{} + if cfg.NotificationsEnabled { + provOpts.SystemPrompt = agent.NotificationsSystemPrompt() + } + provisioner, err := agent.NewProvisioner(cfg, agentSvc.Client(), pool, endpointFn, provOpts) if err != nil { return fmt.Errorf("agent provisioner: %w", err) } diff --git a/backend/internal/agent/provision.go b/backend/internal/agent/provision.go index 3e0416f..7c39a87 100644 --- a/backend/internal/agent/provision.go +++ b/backend/internal/agent/provision.go @@ -91,6 +91,34 @@ When a tool requires platform credentials that the user has not yet connected, t Prefer fine-grained tool calls and small page sizes. Always summarise tool output before presenting it to the user.` +// notificationsSystemPromptAddendum is appended to the default prompt when +// cfg.NotificationsEnabled. Keeps the rollup behaviour latent in the +// template so forks that turn the feature off don't waste tokens teaching +// the agent about tools it can't see. +const notificationsSystemPromptAddendum = ` + +You also have access to the user's captured device notifications via tools prefixed with notifications_. These contain communication activity from the user's phone — texts, emails, WhatsApp messages, app notifications, missed calls, etc. + +When the user asks for a "rollup", "summary", "what happened today", or "what do I need to do", use these tools to build a structured response: + +1. Call notifications_apps to understand which channels had activity in the time range. +2. Call notifications_threads to group activity by contact and conversation. +3. Call notifications_pending_actions to surface items that may need follow-up. +4. Call notifications_search to dig into specific contacts or topics the user mentions. + +Structure your rollup as: +- "What happened" — organised by contact or conversation, most important first. +- "What needs attention" — action items ranked by urgency, with the source message quoted. + +Keep summaries concise. Quote specific message content when it adds clarity (e.g. "Sarah asked: 'Can you send the comps for 742 Evergreen?'"). Flag time-sensitive items prominently. Default the time range to "today so far" when the user does not specify one.` + +// NotificationsSystemPrompt returns the default system prompt extended with +// the notification rollup instructions. Used by main.go to opt-in the +// addendum based on cfg.NotificationsEnabled. +func NotificationsSystemPrompt() string { + return defaultSystemPrompt + notificationsSystemPromptAddendum +} + // EnsureForUser returns the cached UserAgent for userID, provisioning a new // pair if missing. Concurrent calls for the same user are serialised so we // only ever create one Agent per user. diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 6d9b1a6..9df725d 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -69,6 +69,32 @@ type Config struct { // MCPMaxResponseBytes caps the total compact-JSON byte size of any // tool response. Set to 0 to disable. Default 32 KiB. MCPMaxResponseBytes int + + // NotificationsEnabled gates the Android notification capture feature. + // When true, the backend mounts /api/notifications/* routes, registers + // the notifications_* MCP tools (5 of them) on the per-user agent, and + // extends the system prompt with rollup instructions. The mobile app's + // capture screen and provider also light up. + // + // Defaults to false so existing forks pick up the new migration without + // any runtime behaviour change. Set to true (and run the standard + // migrations) to enable. + NotificationsEnabled bool + + // NotificationsIngestRateLimit caps POST /api/notifications/batch + // uploads per user per window. At the default 5-minute device flush + // cadence the configured 60/minute gives 12x headroom. + NotificationsIngestRateLimit int + NotificationsIngestRateWindow time.Duration + + // NotificationsDefaultPageSize is applied when MCP/REST callers omit a + // limit. NotificationsMaxPageSize is the hard cap. + NotificationsDefaultPageSize int + NotificationsMaxPageSize int + + // NotificationsReplyWindowHrs is the unanswered-message threshold used + // by the notifications_pending_actions classifier. + NotificationsReplyWindowHrs int } func Load() Config { @@ -105,6 +131,13 @@ func Load() Config { MCPMaxItemsPerPage: getEnvInt("MCP_MAX_ITEMS_PER_PAGE", 50), MCPMaxStringLen: getEnvInt("MCP_MAX_STRING_LEN", 800), MCPMaxResponseBytes: getEnvInt("MCP_MAX_RESPONSE_BYTES", 32*1024), + + NotificationsEnabled: getEnvBool("NOTIFICATIONS_ENABLED", false), + NotificationsIngestRateLimit: getEnvInt("NOTIFICATIONS_INGEST_RATE_LIMIT", 60), + NotificationsIngestRateWindow: time.Duration(getEnvInt("NOTIFICATIONS_INGEST_RATE_WINDOW_SECONDS", 60)) * time.Second, + NotificationsDefaultPageSize: getEnvInt("NOTIFICATIONS_DEFAULT_PAGE_SIZE", 50), + NotificationsMaxPageSize: getEnvInt("NOTIFICATIONS_MAX_PAGE_SIZE", 200), + NotificationsReplyWindowHrs: getEnvInt("NOTIFICATIONS_ACTION_REPLY_WINDOW_HOURS", 2), } } diff --git a/backend/internal/db/migrations/00003_notification_events.sql b/backend/internal/db/migrations/00003_notification_events.sql new file mode 100644 index 0000000..00d4a53 --- /dev/null +++ b/backend/internal/db/migrations/00003_notification_events.sql @@ -0,0 +1,73 @@ +-- +goose Up +-- +goose StatementBegin + +-- notification_events stores per-user notifications captured by the mobile +-- app's NotificationListenerService. The Claude agent reads these via the +-- notifications_* MCP tools to produce daily communication rollups across +-- texts, WhatsApp, email, Zillow, etc. without needing per-platform API +-- integrations. +-- +-- Schema is opt-in: the table is created on every fork, but stays empty +-- (and adds zero overhead) when NOTIFICATIONS_ENABLED=false. Same posture as +-- the teams tables which exist regardless of TEAMS_ENABLED. +-- +-- Stored as a TimescaleDB hypertable (chunked on captured_at) because every +-- access pattern is a time range scan ("today's notifications", "the last +-- 4 hours"). Hypertable chunking gives efficient range scans and lets us +-- attach a retention policy (drop chunks older than N days) without manual +-- partition management. +CREATE TABLE notification_events ( + id BIGINT GENERATED ALWAYS AS IDENTITY, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + app_package TEXT NOT NULL, + app_label TEXT NOT NULL DEFAULT '', + title TEXT NOT NULL DEFAULT '', + content TEXT NOT NULL DEFAULT '', + category TEXT NOT NULL DEFAULT '', + captured_at TIMESTAMPTZ NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + -- Composite PK includes captured_at because TimescaleDB requires the + -- partition column to be part of any unique constraint on a hypertable. + PRIMARY KEY (id, captured_at) +); + +-- Convert to hypertable. chunk_time_interval defaults to 7 days for +-- notification volume scale (1 user × ~1k notifications/day = 7k rows/chunk). +SELECT create_hypertable( + 'notification_events', + 'captured_at', + chunk_time_interval => INTERVAL '7 days', + if_not_exists => TRUE +); + +-- Dedupe index: a single (user, app, captured_at, title) tuple identifies a +-- notification. Used by InsertBatch's ON CONFLICT DO NOTHING. Must include +-- captured_at because the hypertable requires it on every unique constraint. +CREATE UNIQUE INDEX uq_notif_event_dedup + ON notification_events (user_id, app_package, captured_at, title); + +-- Hot path: list / paginate notifications by time for a user. +CREATE INDEX idx_notif_user_time + ON notification_events (user_id, captured_at DESC); + +-- Filter-by-app pattern (notifications_list with app_package, ListApps). +CREATE INDEX idx_notif_user_app + ON notification_events (user_id, app_package, captured_at DESC); + +-- Full-text search across title + content for the notifications_search tool. +-- Postgres' built-in to_tsvector + GIN is sufficient for V1; no external +-- search engine required. +CREATE INDEX idx_notif_content_fts + ON notification_events + USING gin (to_tsvector('english', coalesce(title, '') || ' ' || coalesce(content, ''))); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_notif_content_fts; +DROP INDEX IF EXISTS idx_notif_user_app; +DROP INDEX IF EXISTS idx_notif_user_time; +DROP INDEX IF EXISTS uq_notif_event_dedup; +DROP TABLE IF EXISTS notification_events; +-- +goose StatementEnd diff --git a/backend/internal/mcp/context.go b/backend/internal/mcp/context.go new file mode 100644 index 0000000..4e63e34 --- /dev/null +++ b/backend/internal/mcp/context.go @@ -0,0 +1,42 @@ +package mcp + +import "context" + +// userIDKey is the unexported context key the MCP server uses to thread the +// authenticated user ID into per-request client constructors. Plug-in +// platforms that need user-scoped state (e.g. internal/notifications) pull +// it back out via UserIDFromContext. +type userIDKey struct{} + +// withUserID stores userID on ctx using the package-private key. Called by +// Server.CallTool just before the per-request NewClient factory runs. +func withUserID(ctx context.Context, userID string) context.Context { + if userID == "" { + return ctx + } + return context.WithValue(ctx, userIDKey{}, userID) +} + +// UserIDFromContext returns the authenticated user ID stored on ctx by the +// MCP server, or "" when missing. Safe to call from any PlatformBinding.NewClient +// callback that needs to scope per-request state to the calling user. +// +// Bindings that require a user ID should treat "" as a configuration error +// (the server only fails to set it for unauthenticated transports, which +// agent-setup does not currently expose). +func UserIDFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + v, _ := ctx.Value(userIDKey{}).(string) + return v +} + +// WithUserIDForTest is a test-only escape hatch that lets external test +// packages (e.g. internal/mcp/platforms) build a context that +// UserIDFromContext will read back. Production code MUST NOT call this — +// the server stamps the user id via the unexported withUserID helper as +// part of CallTool. +func WithUserIDForTest(ctx context.Context, userID string) context.Context { + return withUserID(ctx, userID) +} diff --git a/backend/internal/mcp/context_test.go b/backend/internal/mcp/context_test.go new file mode 100644 index 0000000..13c3632 --- /dev/null +++ b/backend/internal/mcp/context_test.go @@ -0,0 +1,39 @@ +package mcp + +import ( + "context" + "testing" +) + +// TestUserIDFromContext_RoundTrip verifies the canonical plumbing path: +// withUserID stores a user id and UserIDFromContext reads it back. This +// is the tight loop every credential-less platform binding (e.g. +// notifications) depends on, so the test is deliberately small and stable. +func TestUserIDFromContext_RoundTrip(t *testing.T) { + ctx := withUserID(context.Background(), "user_abc") + if got := UserIDFromContext(ctx); got != "user_abc" { + t.Errorf("UserIDFromContext returned %q; want %q", got, "user_abc") + } +} + +// TestUserIDFromContext_MissingReturnsEmpty pins the contract that callers +// can safely call UserIDFromContext on any context (including nil) and +// get back a zero value — never a panic. +func TestUserIDFromContext_MissingReturnsEmpty(t *testing.T) { + if got := UserIDFromContext(context.Background()); got != "" { + t.Errorf("UserIDFromContext on bare context returned %q; want empty", got) + } + if got := UserIDFromContext(nil); got != "" { //nolint:staticcheck // intentional nil + t.Errorf("UserIDFromContext(nil) returned %q; want empty", got) + } +} + +// TestWithUserID_EmptyIsNoop guarantees we don't pollute ctx with empty +// user-id values that would later confuse the "did the server set a user" +// check inside platform bindings. +func TestWithUserID_EmptyIsNoop(t *testing.T) { + ctx := withUserID(context.Background(), "") + if got := UserIDFromContext(ctx); got != "" { + t.Errorf("withUserID(\"\") leaked %q into context; want empty", got) + } +} diff --git a/backend/internal/mcp/platforms/notifications_test.go b/backend/internal/mcp/platforms/notifications_test.go new file mode 100644 index 0000000..9d46841 --- /dev/null +++ b/backend/internal/mcp/platforms/notifications_test.go @@ -0,0 +1,122 @@ +package platforms + +import ( + "context" + "encoding/json" + "testing" + + "github.com/teslashibe/agent-setup/backend/internal/mcp" + "github.com/teslashibe/agent-setup/backend/internal/notifications" + notificationsmcp "github.com/teslashibe/agent-setup/backend/internal/notifications/mcp" +) + +// TestNotificationsPluginShape pins the wire-up of the opt-in +// notifications platform. Even though it isn't in All() (it's appended +// conditionally in cmd/server/main.go) the construction must obey the +// same invariants as the other plugins, otherwise mcp.NewRegistry will +// reject it at server boot. +func TestNotificationsPluginShape(t *testing.T) { + svc := notifications.NewService(nil, notifications.ServiceConfig{}) + plugin := Notifications(svc) + + if plugin.Binding.Provider == nil { + t.Fatal("Notifications plugin has nil Provider") + } + if got := plugin.Binding.Provider.Platform(); got != "notifications" { + t.Errorf("Provider.Platform() = %q; want notifications", got) + } + if !plugin.Binding.NoCredentials { + t.Error("Notifications binding must set NoCredentials=true (data is pushed by the device)") + } + if plugin.Binding.NewClient == nil { + t.Fatal("NewClient is required") + } + if plugin.Validator == nil { + t.Fatal("Validator is required even for credential-less platforms") + } + if got := plugin.Validator.Platform(); got != "notifications" { + t.Errorf("Validator.Platform() = %q; want notifications", got) + } +} + +// TestNotificationsNewClientRequiresUserID demonstrates the safety check +// that prevents a tool from running without an authenticated caller. +// Without mcp.UserIDFromContext set, NewClient must error. +func TestNotificationsNewClientRequiresUserID(t *testing.T) { + svc := notifications.NewService(nil, notifications.ServiceConfig{}) + plugin := Notifications(svc) + + _, err := plugin.Binding.NewClient(context.Background(), json.RawMessage(`null`)) + if err == nil { + t.Fatal("NewClient must error when ctx has no userID") + } +} + +// TestNotificationsNewClientPropagatesUser verifies the happy path: when +// the MCP server stamps a userID on the context, NewClient returns a +// notificationsmcp.Client wired to that user. +func TestNotificationsNewClientPropagatesUser(t *testing.T) { + svc := notifications.NewService(nil, notifications.ServiceConfig{}) + plugin := Notifications(svc) + + ctx := mcpUserCtx("user_xyz") + c, err := plugin.Binding.NewClient(ctx, nil) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + client, ok := c.(*notificationsmcp.Client) + if !ok { + t.Fatalf("NewClient returned %T; want *notificationsmcp.Client", c) + } + if client.UserID != "user_xyz" { + t.Errorf("client.UserID = %q; want user_xyz", client.UserID) + } + if client.Svc != svc { + t.Errorf("client.Svc not the one we passed in") + } +} + +// TestNotificationsRegistryComposes proves the plugin can be added to a +// registry alongside All() — i.e. there are no duplicate platform names +// or tool names. This mirrors the production wiring in cmd/server/main.go. +func TestNotificationsRegistryComposes(t *testing.T) { + svc := notifications.NewService(nil, notifications.ServiceConfig{}) + plugins := append(All(), Notifications(svc)) + bindings := make([]mcp.PlatformBinding, 0, len(plugins)) + for _, p := range plugins { + bindings = append(bindings, p.Binding) + } + registry, err := mcp.NewRegistry(bindings...) + if err != nil { + t.Fatalf("NewRegistry: %v", err) + } + if got := len(registry.Platforms()); got != len(plugins) { + t.Errorf("registry has %d platforms; want %d", got, len(plugins)) + } + // Confirm the 5 notification tools made it through. + want := map[string]bool{ + "notifications_list": false, + "notifications_search": false, + "notifications_threads": false, + "notifications_apps": false, + "notifications_pending_actions": false, + } + for _, tool := range registry.Tools() { + if _, expected := want[tool.Name]; expected { + want[tool.Name] = true + } + } + for name, seen := range want { + if !seen { + t.Errorf("registry missing tool %q after appending Notifications plugin", name) + } + } +} + +// mcpUserCtx is a tiny helper mirroring the production server's call to +// mcp.withUserID. We use the package-internal helper through the exported +// UserIDFromContext path: build a context that satisfies +// mcp.UserIDFromContext == userID. +func mcpUserCtx(userID string) context.Context { + return mcp.WithUserIDForTest(context.Background(), userID) +} diff --git a/backend/internal/mcp/platforms/platforms.go b/backend/internal/mcp/platforms/platforms.go index fe01619..bb8bef3 100644 --- a/backend/internal/mcp/platforms/platforms.go +++ b/backend/internal/mcp/platforms/platforms.go @@ -50,6 +50,8 @@ import ( "github.com/teslashibe/agent-setup/backend/internal/credentials" "github.com/teslashibe/agent-setup/backend/internal/mcp" + "github.com/teslashibe/agent-setup/backend/internal/notifications" + notificationsmcp "github.com/teslashibe/agent-setup/backend/internal/notifications/mcp" ) // Plugin pairs a single platform's MCP binding with its credential validator. @@ -518,6 +520,36 @@ func Codegen() Plugin { } } +// Notifications binds the internal notifications platform: a per-user view +// of device-captured notifications used by the daily-rollup tools. +// +// Unlike every other plugin in this file, Notifications is *not* in +// All() — agent-setup's main.go appends it conditionally on +// cfg.NotificationsEnabled so forks that don't ship the Android capture +// feature pay zero overhead. The binding sets NoCredentials=true because +// data is pushed by the user's own authenticated device, not pulled from +// an external service that needs cookies/tokens. +// +// The per-request user ID lives on ctx via mcp.UserIDFromContext (set by +// the MCP server before calling NewClient). +func Notifications(svc *notifications.Service) Plugin { + const platform = "notifications" + return Plugin{ + Binding: mcp.PlatformBinding{ + Provider: notificationsmcp.Provider{}, + NoCredentials: true, + NewClient: func(ctx context.Context, _ json.RawMessage) (any, error) { + userID := mcp.UserIDFromContext(ctx) + if userID == "" { + return nil, errors.New("notifications: missing authenticated user id on MCP request context") + } + return ¬ificationsmcp.Client{Svc: svc, UserID: userID}, nil + }, + }, + Validator: nullValidator{platform: platform}, + } +} + // simpleValidator is a generic credential-shape validator. It does not test // the credential against the upstream service (that happens lazily at // tool-call time); it only enforces structural completeness. diff --git a/backend/internal/mcp/registry.go b/backend/internal/mcp/registry.go index f1fc645..31e9f69 100644 --- a/backend/internal/mcp/registry.go +++ b/backend/internal/mcp/registry.go @@ -42,10 +42,16 @@ import ( // - ValidateCredential is optional; when present, the credential service // calls it before persisting the credential so users get fast feedback // on bad cookie blobs. +// - NoCredentials, when true, instructs Server.CallTool to skip the +// credential lookup entirely and pass nil to NewClient. Use this for +// bindings whose state lives on the server itself (e.g. the internal +// notifications platform whose data is pushed directly by the device). +// The authenticated user ID is still available via mcp.UserIDFromContext. type PlatformBinding struct { Provider mcptool.Provider NewClient func(ctx context.Context, credential json.RawMessage) (any, error) ValidateCredential func(credential json.RawMessage) error + NoCredentials bool } // Platform returns the platform identifier for this binding (delegates to diff --git a/backend/internal/mcp/server.go b/backend/internal/mcp/server.go index 4cdf298..0f56a3c 100644 --- a/backend/internal/mcp/server.go +++ b/backend/internal/mcp/server.go @@ -92,6 +92,10 @@ func (s *Server) CallTool(ctx context.Context, userID, name string, input json.R // resolveClient looks up the per-request client for (user, platform), using // the per-request client cache to avoid creating multiple clients in the same // request. +// +// Bindings flagged with NoCredentials skip the credentials lookup and are +// invoked with a nil credential blob; the authenticated user ID is always +// available to the NewClient callback via mcp.UserIDFromContext(ctx). func (s *Server) resolveClient(ctx context.Context, userID string, binding PlatformBinding) (any, error) { if binding.NewClient == nil { return nil, &mcptool.Error{ @@ -103,20 +107,27 @@ func (s *Server) resolveClient(ctx context.Context, userID string, binding Platf if c, ok := s.cache.get(ctx, key); ok { return c, nil } - credBlob, err := s.creds.Decrypted(ctx, userID, binding.Platform()) - if err != nil { - if errors.Is(err, credentials.ErrNotFound) { + ctx = withUserID(ctx, userID) + + var credBlob json.RawMessage + if !binding.NoCredentials { + blob, err := s.creds.Decrypted(ctx, userID, binding.Platform()) + if err != nil { + if errors.Is(err, credentials.ErrNotFound) { + return nil, &mcptool.Error{ + Code: "credential_missing", + Message: fmt.Sprintf("no %s credential connected for this user — connect it in Settings", binding.Platform()), + Data: map[string]any{"platform": binding.Platform()}, + } + } return nil, &mcptool.Error{ - Code: "credential_missing", - Message: fmt.Sprintf("no %s credential connected for this user — connect it in Settings", binding.Platform()), - Data: map[string]any{"platform": binding.Platform()}, + Code: "credential_unreadable", + Message: fmt.Sprintf("could not decrypt %s credential: %v", binding.Platform(), err), } } - return nil, &mcptool.Error{ - Code: "credential_unreadable", - Message: fmt.Sprintf("could not decrypt %s credential: %v", binding.Platform(), err), - } + credBlob = blob } + c, err := binding.NewClient(ctx, credBlob) if err != nil { return nil, &mcptool.Error{ diff --git a/backend/internal/notifications/handler.go b/backend/internal/notifications/handler.go new file mode 100644 index 0000000..9bd97db --- /dev/null +++ b/backend/internal/notifications/handler.go @@ -0,0 +1,144 @@ +package notifications + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/gofiber/fiber/v2" + + "github.com/teslashibe/agent-setup/backend/internal/apperrors" +) + +// Handler exposes the REST surface for notification ingest and (optional) +// query. Callers mount it on an authenticated Fiber group. +type Handler struct { + svc *Service +} + +// NewHandler wires a Handler around a Service. +func NewHandler(svc *Service) *Handler { return &Handler{svc: svc} } + +// Mount installs the notification routes on the given (already-authenticated) +// Fiber router. ingestLimiter, when non-nil, is applied to the high-volume +// POST /batch route only — read endpoints are unbounded because the agent +// itself drives them at a low pace. +// +// api := app.Group("/api", authMW.RequireAuth()) +// notifications.NewHandler(svc).Mount(api, batchLimiter) +// +// Routes: +// +// POST /api/notifications/batch - device flush ingest +// GET /api/notifications - paginated list (debug + future log UI) +// GET /api/notifications/apps - per-app summary (mobile settings stat) +func (h *Handler) Mount(api fiber.Router, ingestLimiter fiber.Handler) { + g := api.Group("/notifications") + if ingestLimiter != nil { + g.Post("/batch", ingestLimiter, h.ingestBatch) + } else { + g.Post("/batch", h.ingestBatch) + } + g.Get("/", h.listEvents) + g.Get("/apps", h.listApps) +} + +func (h *Handler) ingestBatch(c *fiber.Ctx) error { + userID := apperrors.UserID(c) + if userID == "" { + return apperrors.ErrUnauthorized + } + var in BatchInput + if err := c.BodyParser(&in); err != nil { + return apperrors.New(http.StatusBadRequest, "invalid request body") + } + res, err := h.svc.IngestBatch(c.UserContext(), userID, in) + if err != nil { + return apperrors.New(http.StatusInternalServerError, "ingest failed: "+err.Error()) + } + return c.JSON(res) +} + +func (h *Handler) listEvents(c *fiber.Ctx) error { + userID := apperrors.UserID(c) + if userID == "" { + return apperrors.ErrUnauthorized + } + opts := ListOpts{ + AppPackage: strings.TrimSpace(c.Query("app")), + Limit: atoiOrZero(c.Query("limit")), + } + if since := strings.TrimSpace(c.Query("since")); since != "" { + t, err := time.Parse(time.RFC3339, since) + if err != nil { + return apperrors.New(http.StatusBadRequest, "invalid 'since' (want RFC3339)") + } + opts.Since = &t + } + if until := strings.TrimSpace(c.Query("until")); until != "" { + t, err := time.Parse(time.RFC3339, until) + if err != nil { + return apperrors.New(http.StatusBadRequest, "invalid 'until' (want RFC3339)") + } + opts.Until = &t + } + q := strings.TrimSpace(c.Query("q")) + var ( + events []Event + err error + ) + if q == "" { + events, err = h.svc.List(c.UserContext(), userID, opts) + } else { + events, err = h.svc.Search(c.UserContext(), userID, q, opts) + } + if err != nil { + return apperrors.New(http.StatusInternalServerError, "list failed: "+err.Error()) + } + if events == nil { + events = []Event{} + } + return c.JSON(fiber.Map{"events": events, "count": len(events)}) +} + +func (h *Handler) listApps(c *fiber.Ctx) error { + userID := apperrors.UserID(c) + if userID == "" { + return apperrors.ErrUnauthorized + } + var since, until *time.Time + if s := strings.TrimSpace(c.Query("since")); s != "" { + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return apperrors.New(http.StatusBadRequest, "invalid 'since' (want RFC3339)") + } + since = &t + } + if u := strings.TrimSpace(c.Query("until")); u != "" { + t, err := time.Parse(time.RFC3339, u) + if err != nil { + return apperrors.New(http.StatusBadRequest, "invalid 'until' (want RFC3339)") + } + until = &t + } + apps, err := h.svc.ListApps(c.UserContext(), userID, since, until) + if err != nil { + return apperrors.New(http.StatusInternalServerError, "list apps failed: "+err.Error()) + } + if apps == nil { + apps = []AppSummary{} + } + return c.JSON(fiber.Map{"apps": apps, "count": len(apps)}) +} + +func atoiOrZero(raw string) int { + if raw == "" { + return 0 + } + n, err := strconv.Atoi(raw) + if err != nil { + return 0 + } + return n +} diff --git a/backend/internal/notifications/mcp/actions.go b/backend/internal/notifications/mcp/actions.go new file mode 100644 index 0000000..ace598f --- /dev/null +++ b/backend/internal/notifications/mcp/actions.go @@ -0,0 +1,52 @@ +package notificationsmcp + +import ( + "context" + "time" + + "github.com/teslashibe/mcptool" + + "github.com/teslashibe/agent-setup/backend/internal/notifications" +) + +// ActionsInput is the typed input for notifications_pending_actions. +type ActionsInput struct { + Since string `json:"since,omitempty" jsonschema:"description=RFC3339 lower bound on captured_at"` + Until string `json:"until,omitempty" jsonschema:"description=RFC3339 upper bound on captured_at"` + ReplyWindowHrs int `json:"reply_window_hours,omitempty" jsonschema:"description=hours after which an unanswered message becomes 'follow_up',minimum=1,maximum=72,default=2"` + Limit int `json:"limit,omitempty" jsonschema:"description=cap on returned action items,minimum=1,maximum=200,default=50"` +} + +func runActions(ctx context.Context, c *Client, in ActionsInput) (any, error) { + if err := c.requireUser(); err != nil { + return nil, err + } + opts := notifications.ActionOpts{ + ReplyWindowHrs: in.ReplyWindowHrs, + Limit: in.Limit, + } + if in.Since != "" { + t, err := time.Parse(time.RFC3339, in.Since) + if err != nil { + return nil, &mcptool.Error{Code: "invalid_input", Message: "invalid 'since' (want RFC3339): " + err.Error()} + } + opts.Since = &t + } + if in.Until != "" { + t, err := time.Parse(time.RFC3339, in.Until) + if err != nil { + return nil, &mcptool.Error{Code: "invalid_input", Message: "invalid 'until' (want RFC3339): " + err.Error()} + } + opts.Until = &t + } + return c.Svc.PendingActions(ctx, c.UserID, opts) +} + +var actionTools = []mcptool.Tool{ + mcptool.Define[*Client, ActionsInput]( + "notifications_pending_actions", + "Heuristic candidate to-dos extracted from notifications: missed calls, time-sensitive keywords, questions", + "PendingActions", + runActions, + ), +} diff --git a/backend/internal/notifications/mcp/apps.go b/backend/internal/notifications/mcp/apps.go new file mode 100644 index 0000000..0ac390b --- /dev/null +++ b/backend/internal/notifications/mcp/apps.go @@ -0,0 +1,45 @@ +package notificationsmcp + +import ( + "context" + "time" + + "github.com/teslashibe/mcptool" +) + +// AppsInput is the typed input for notifications_apps. +type AppsInput struct { + Since string `json:"since,omitempty" jsonschema:"description=RFC3339 lower bound on captured_at"` + Until string `json:"until,omitempty" jsonschema:"description=RFC3339 upper bound on captured_at"` +} + +func runApps(ctx context.Context, c *Client, in AppsInput) (any, error) { + if err := c.requireUser(); err != nil { + return nil, err + } + var since, until *time.Time + if in.Since != "" { + t, err := time.Parse(time.RFC3339, in.Since) + if err != nil { + return nil, &mcptool.Error{Code: "invalid_input", Message: "invalid 'since' (want RFC3339): " + err.Error()} + } + since = &t + } + if in.Until != "" { + t, err := time.Parse(time.RFC3339, in.Until) + if err != nil { + return nil, &mcptool.Error{Code: "invalid_input", Message: "invalid 'until' (want RFC3339): " + err.Error()} + } + until = &t + } + return c.Svc.ListApps(ctx, c.UserID, since, until) +} + +var appTools = []mcptool.Tool{ + mcptool.Define[*Client, AppsInput]( + "notifications_apps", + "Summarise which apps sent notifications in the time range (count + last seen). Use to scope the rollup", + "ListApps", + runApps, + ), +} diff --git a/backend/internal/notifications/mcp/client.go b/backend/internal/notifications/mcp/client.go new file mode 100644 index 0000000..1bf16bc --- /dev/null +++ b/backend/internal/notifications/mcp/client.go @@ -0,0 +1,33 @@ +package notificationsmcp + +import ( + "errors" + + "github.com/teslashibe/agent-setup/backend/internal/notifications" +) + +// Client is the per-request handle every notifications_* tool receives via +// mcptool.Define[*Client, …]. The MCP server constructs one per (user, +// request) by reading the authenticated user ID off the request context +// (mcp.UserIDFromContext) — see platforms.Notifications in platforms.go. +// +// The Client intentionally holds *only* the things the tool handlers need: +// a service handle and the user ID. Cross-request state lives on the +// service itself. +type Client struct { + Svc *notifications.Service + UserID string +} + +// ErrMissingUserID is returned when the platform binding is wired without a +// user ID. Should never happen in production — the MCP server always sets +// it from the JWT — but the check turns a class of latent bugs into a +// loud error. +var ErrMissingUserID = errors.New("notifications: missing authenticated user ID on context") + +func (c *Client) requireUser() error { + if c == nil || c.UserID == "" { + return ErrMissingUserID + } + return nil +} diff --git a/backend/internal/notifications/mcp/list.go b/backend/internal/notifications/mcp/list.go new file mode 100644 index 0000000..aca59b6 --- /dev/null +++ b/backend/internal/notifications/mcp/list.go @@ -0,0 +1,59 @@ +package notificationsmcp + +import ( + "context" + "time" + + "github.com/teslashibe/mcptool" + + "github.com/teslashibe/agent-setup/backend/internal/notifications" +) + +// ListInput is the typed input for notifications_list. +type ListInput struct { + Since string `json:"since,omitempty" jsonschema:"description=RFC3339 lower bound on captured_at (e.g. 2026-04-22T00:00:00Z)"` + Until string `json:"until,omitempty" jsonschema:"description=RFC3339 upper bound on captured_at"` + AppPackage string `json:"app_package,omitempty" jsonschema:"description=Restrict to a single app package id (e.g. com.whatsapp)"` + Limit int `json:"limit,omitempty" jsonschema:"description=cap on returned events,minimum=1,maximum=200,default=50"` +} + +func runList(ctx context.Context, c *Client, in ListInput) (any, error) { + if err := c.requireUser(); err != nil { + return nil, err + } + opts, err := buildListOpts(in.Since, in.Until, in.AppPackage, in.Limit) + if err != nil { + return nil, err + } + return c.Svc.List(ctx, c.UserID, opts) +} + +// buildListOpts is shared between list and search so time-range parsing +// stays in one place. +func buildListOpts(since, until, app string, limit int) (notifications.ListOpts, error) { + opts := notifications.ListOpts{AppPackage: app, Limit: limit} + if since != "" { + t, err := time.Parse(time.RFC3339, since) + if err != nil { + return opts, &mcptool.Error{Code: "invalid_input", Message: "invalid 'since' (want RFC3339): " + err.Error()} + } + opts.Since = &t + } + if until != "" { + t, err := time.Parse(time.RFC3339, until) + if err != nil { + return opts, &mcptool.Error{Code: "invalid_input", Message: "invalid 'until' (want RFC3339): " + err.Error()} + } + opts.Until = &t + } + return opts, nil +} + +var listTools = []mcptool.Tool{ + mcptool.Define[*Client, ListInput]( + "notifications_list", + "List the user's captured notifications in reverse chronological order; primary tool for daily rollups", + "List", + runList, + ), +} diff --git a/backend/internal/notifications/mcp/mcp.go b/backend/internal/notifications/mcp/mcp.go new file mode 100644 index 0000000..e5f9046 --- /dev/null +++ b/backend/internal/notifications/mcp/mcp.go @@ -0,0 +1,31 @@ +// Package notificationsmcp exposes the per-user notification corpus to +// the Claude agent as a set of MCP tools. Unlike the other platform-bound +// providers in agent-setup (which wrap external scrapers), this provider +// queries the local notification_events hypertable directly through a +// per-request notifications.Service. +// +// Registration is gated on cfg.NotificationsEnabled in cmd/server/main.go; +// when off, the provider is never appended to the platform list and the +// agent never sees these tools in tools/list. +package notificationsmcp + +import "github.com/teslashibe/mcptool" + +// Provider implements mcptool.Provider for the internal notifications +// platform. Zero value is ready to use. +type Provider struct{} + +// Platform returns "notifications". Tool names are prefixed accordingly. +func (Provider) Platform() string { return "notifications" } + +// Tools returns every notifications_* MCP tool exposed by this provider. +// Order here is purely cosmetic; the registry sorts by name. +func (Provider) Tools() []mcptool.Tool { + out := make([]mcptool.Tool, 0, 5) + out = append(out, listTools...) + out = append(out, searchTools...) + out = append(out, threadTools...) + out = append(out, appTools...) + out = append(out, actionTools...) + return out +} diff --git a/backend/internal/notifications/mcp/mcp_test.go b/backend/internal/notifications/mcp/mcp_test.go new file mode 100644 index 0000000..20b1954 --- /dev/null +++ b/backend/internal/notifications/mcp/mcp_test.go @@ -0,0 +1,177 @@ +package notificationsmcp + +import ( + "context" + "sort" + "strings" + "testing" + + "github.com/teslashibe/mcptool" + + "github.com/teslashibe/agent-setup/backend/internal/notifications" +) + +// TestProviderToolNames pins the exact set of notifications_* tools +// exposed to Claude. Adding or removing a tool must come with a deliberate +// edit here so the system prompt + provisioning stay in sync. +func TestProviderToolNames(t *testing.T) { + p := Provider{} + got := toolNames(p.Tools()) + want := []string{ + "notifications_apps", + "notifications_list", + "notifications_pending_actions", + "notifications_search", + "notifications_threads", + } + sort.Strings(got) + if len(got) != len(want) { + t.Fatalf("Provider tool count: got %d %v; want %d %v", len(got), got, len(want), want) + } + for i := range got { + if got[i] != want[i] { + t.Errorf("tool[%d]: got %q; want %q", i, got[i], want[i]) + } + } +} + +// TestProviderTools_HavePlatformPrefix enforces the agent-setup convention +// that every tool name starts with its provider platform string. This +// keeps tool listings predictable for the agent. +func TestProviderTools_HavePlatformPrefix(t *testing.T) { + p := Provider{} + platform := p.Platform() + if platform == "" { + t.Fatal("Provider.Platform() returned empty string") + } + for _, tool := range p.Tools() { + if !strings.HasPrefix(tool.Name, platform+"_") { + t.Errorf("tool %q must be prefixed with %q_", tool.Name, platform) + } + if tool.Description == "" { + t.Errorf("tool %q has empty description", tool.Name) + } + if tool.InputSchema == nil { + t.Errorf("tool %q has nil InputSchema", tool.Name) + } + if tool.Invoke == nil { + t.Errorf("tool %q has nil Invoke", tool.Name) + } + } +} + +// TestProviderTools_PassValidation runs each tool through mcptool's +// validator so any malformed input schema or missing field is caught at +// build time rather than at runtime when Claude calls the tool. +func TestProviderTools_PassValidation(t *testing.T) { + p := Provider{} + if err := mcptool.ValidateTools(p.Tools()); err != nil { + t.Fatalf("Provider tools failed mcptool validation: %v", err) + } +} + +// TestRequireUser verifies the safety check that prevents accidental +// cross-user data leaks: every tool must call requireUser() before +// touching the service. We test this on the Client directly. +func TestRequireUser(t *testing.T) { + cases := []struct { + name string + client *Client + wantErr bool + }{ + {name: "nil_client", client: nil, wantErr: true}, + {name: "empty_user_id", client: &Client{UserID: ""}, wantErr: true}, + {name: "valid_user_id", client: &Client{UserID: "user_123"}, wantErr: false}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := tc.client.requireUser() + if tc.wantErr && err == nil { + t.Errorf("requireUser() should error for %s", tc.name) + } + if !tc.wantErr && err != nil { + t.Errorf("requireUser() should succeed for %s; got %v", tc.name, err) + } + }) + } +} + +// TestBuildListOpts_Validation pins the RFC3339 contract for since/until +// inputs. Bad timestamps are returned as a typed mcptool.Error with code +// "invalid_input" so the agent surfaces a helpful message rather than a +// generic 500. +func TestBuildListOpts_Validation(t *testing.T) { + t.Run("valid_inputs", func(t *testing.T) { + opts, err := buildListOpts("2026-04-22T00:00:00Z", "2026-04-22T23:59:59Z", "com.whatsapp", 50) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if opts.Since == nil || opts.Until == nil { + t.Fatal("expected since+until populated") + } + if opts.AppPackage != "com.whatsapp" { + t.Errorf("AppPackage: got %q; want com.whatsapp", opts.AppPackage) + } + if opts.Limit != 50 { + t.Errorf("Limit: got %d; want 50", opts.Limit) + } + }) + t.Run("bad_since", func(t *testing.T) { + _, err := buildListOpts("not a date", "", "", 0) + if err == nil { + t.Fatal("expected error for bad since") + } + me, ok := err.(*mcptool.Error) + if !ok { + t.Fatalf("expected *mcptool.Error; got %T", err) + } + if me.Code != "invalid_input" { + t.Errorf("error code: got %q; want invalid_input", me.Code) + } + }) + t.Run("bad_until", func(t *testing.T) { + _, err := buildListOpts("", "still not a date", "", 0) + if err == nil { + t.Fatal("expected error for bad until") + } + me, ok := err.(*mcptool.Error) + if !ok { + t.Fatalf("expected *mcptool.Error; got %T", err) + } + if me.Code != "invalid_input" { + t.Errorf("error code: got %q; want invalid_input", me.Code) + } + }) +} + +// TestNoUserShortcircuit confirms that calling any tool with a Client that +// has no UserID returns ErrMissingUserID without touching the service. +// This is our defence-in-depth against missing context propagation. +func TestNoUserShortcircuit(t *testing.T) { + emptyClient := &Client{Svc: notifications.NewService(nil, notifications.ServiceConfig{}), UserID: ""} + ctx := context.Background() + + if _, err := runList(ctx, emptyClient, ListInput{}); err == nil { + t.Error("runList should error on empty UserID") + } + if _, err := runSearch(ctx, emptyClient, SearchInput{Query: "anything"}); err == nil { + t.Error("runSearch should error on empty UserID") + } + if _, err := runThreads(ctx, emptyClient, ThreadsInput{}); err == nil { + t.Error("runThreads should error on empty UserID") + } + if _, err := runApps(ctx, emptyClient, AppsInput{}); err == nil { + t.Error("runApps should error on empty UserID") + } + if _, err := runActions(ctx, emptyClient, ActionsInput{}); err == nil { + t.Error("runActions should error on empty UserID") + } +} + +func toolNames(ts []mcptool.Tool) []string { + out := make([]string, 0, len(ts)) + for _, t := range ts { + out = append(out, t.Name) + } + return out +} diff --git a/backend/internal/notifications/mcp/search.go b/backend/internal/notifications/mcp/search.go new file mode 100644 index 0000000..2d549df --- /dev/null +++ b/backend/internal/notifications/mcp/search.go @@ -0,0 +1,36 @@ +package notificationsmcp + +import ( + "context" + + "github.com/teslashibe/mcptool" +) + +// SearchInput is the typed input for notifications_search. +type SearchInput struct { + Query string `json:"query" jsonschema:"description=Full-text query against title + content (Postgres plainto_tsquery),required"` + Since string `json:"since,omitempty" jsonschema:"description=RFC3339 lower bound on captured_at"` + Until string `json:"until,omitempty" jsonschema:"description=RFC3339 upper bound on captured_at"` + AppPackage string `json:"app_package,omitempty" jsonschema:"description=Restrict to a single app package id"` + Limit int `json:"limit,omitempty" jsonschema:"description=cap on returned events,minimum=1,maximum=200,default=50"` +} + +func runSearch(ctx context.Context, c *Client, in SearchInput) (any, error) { + if err := c.requireUser(); err != nil { + return nil, err + } + opts, err := buildListOpts(in.Since, in.Until, in.AppPackage, in.Limit) + if err != nil { + return nil, err + } + return c.Svc.Search(ctx, c.UserID, in.Query, opts) +} + +var searchTools = []mcptool.Tool{ + mcptool.Define[*Client, SearchInput]( + "notifications_search", + "Full-text search across notification titles and bodies (e.g. 'Sarah Sunset listing')", + "Search", + runSearch, + ), +} diff --git a/backend/internal/notifications/mcp/threads.go b/backend/internal/notifications/mcp/threads.go new file mode 100644 index 0000000..4787a6c --- /dev/null +++ b/backend/internal/notifications/mcp/threads.go @@ -0,0 +1,54 @@ +package notificationsmcp + +import ( + "context" + "time" + + "github.com/teslashibe/mcptool" + + "github.com/teslashibe/agent-setup/backend/internal/notifications" +) + +// ThreadsInput is the typed input for notifications_threads. +type ThreadsInput struct { + Since string `json:"since,omitempty" jsonschema:"description=RFC3339 lower bound on captured_at"` + Until string `json:"until,omitempty" jsonschema:"description=RFC3339 upper bound on captured_at"` + AppPackage string `json:"app_package,omitempty" jsonschema:"description=Restrict to a single app package id"` + GroupBy string `json:"group_by,omitempty" jsonschema:"description=Cluster key,enum=contact,enum=app,default=contact"` + Limit int `json:"limit,omitempty" jsonschema:"description=cap on returned threads,minimum=1,maximum=200,default=50"` +} + +func runThreads(ctx context.Context, c *Client, in ThreadsInput) (any, error) { + if err := c.requireUser(); err != nil { + return nil, err + } + opts := notifications.ThreadOpts{ + AppPackage: in.AppPackage, + GroupBy: in.GroupBy, + Limit: in.Limit, + } + if in.Since != "" { + t, err := time.Parse(time.RFC3339, in.Since) + if err != nil { + return nil, &mcptool.Error{Code: "invalid_input", Message: "invalid 'since' (want RFC3339): " + err.Error()} + } + opts.Since = &t + } + if in.Until != "" { + t, err := time.Parse(time.RFC3339, in.Until) + if err != nil { + return nil, &mcptool.Error{Code: "invalid_input", Message: "invalid 'until' (want RFC3339): " + err.Error()} + } + opts.Until = &t + } + return c.Svc.GroupThreads(ctx, c.UserID, opts) +} + +var threadTools = []mcptool.Tool{ + mcptool.Define[*Client, ThreadsInput]( + "notifications_threads", + "Group notifications into conversation-like threads by app + contact (or by app) for the rollup view", + "GroupThreads", + runThreads, + ), +} diff --git a/backend/internal/notifications/model.go b/backend/internal/notifications/model.go new file mode 100644 index 0000000..4528d4e --- /dev/null +++ b/backend/internal/notifications/model.go @@ -0,0 +1,116 @@ +// Package notifications stores and queries device-captured notification +// events ingested from the Expo NotificationListenerService module on +// Android. The Claude agent reads this data through the MCP tools defined +// in subpackage notifications/mcp to produce daily communication rollups +// across SMS, WhatsApp, email, Zillow, etc. +// +// The package is opt-in at runtime: callers gate route mounting and MCP +// registration on cfg.NotificationsEnabled. The migration ships in every +// fork; the table simply stays empty when the feature is off. +package notifications + +import "time" + +// Event is a single notification observation, stored in the +// notification_events hypertable. +type Event struct { + ID int64 `json:"id"` + UserID string `json:"user_id"` + AppPackage string `json:"app_package"` + AppLabel string `json:"app_label"` + Title string `json:"title,omitempty"` + Content string `json:"content,omitempty"` + Category string `json:"category,omitempty"` + CapturedAt time.Time `json:"captured_at"` + CreatedAt time.Time `json:"created_at"` +} + +// EventInput is the per-event payload accepted by POST /api/notifications/batch. +// It is the device's view: no server-assigned IDs, no user_id (taken from JWT), +// no created_at (assigned at insert time). +type EventInput struct { + AppPackage string `json:"app_package"` + AppLabel string `json:"app_label"` + Title string `json:"title,omitempty"` + Content string `json:"content,omitempty"` + Category string `json:"category,omitempty"` + CapturedAt time.Time `json:"captured_at"` +} + +// BatchInput wraps a slice of EventInput. The mobile app flushes its local +// SQLite buffer in batches (default every 5 minutes) so the server side +// keeps the round-trip count low. +type BatchInput struct { + Events []EventInput `json:"events"` +} + +// BatchResult is the response shape from POST /api/notifications/batch. We +// return the count of accepted (non-duplicate) rows so the mobile client +// can surface a "X notifications captured today" stat. +type BatchResult struct { + Accepted int `json:"accepted"` +} + +// Thread is a contact-or-app-grouped cluster of notifications used by the +// notifications_threads MCP tool to give the agent a conversation view. +type Thread struct { + Contact string `json:"contact"` + AppLabel string `json:"app_label"` + AppPackage string `json:"app_package"` + MessageCount int `json:"message_count"` + FirstAt time.Time `json:"first_at"` + LastAt time.Time `json:"last_at"` + Preview string `json:"preview,omitempty"` +} + +// AppSummary is the aggregate per-app view used by notifications_apps and +// the GET /api/notifications/apps REST endpoint that powers the mobile +// settings screen's "captured apps" count. +type AppSummary struct { + AppPackage string `json:"app_package"` + AppLabel string `json:"app_label"` + Count int `json:"count"` + LastAt time.Time `json:"last_at"` +} + +// ActionItem is a heuristic-extracted candidate task surfaced by the +// notifications_pending_actions MCP tool. The agent ranks and presents +// these in the "What needs attention" half of the rollup. +type ActionItem struct { + Priority string `json:"priority"` + Summary string `json:"summary"` + Contact string `json:"contact,omitempty"` + AppLabel string `json:"app_label"` + AppPackage string `json:"app_package"` + CapturedAt time.Time `json:"captured_at"` + Reason string `json:"reason"` + EventID int64 `json:"event_id"` +} + +// ListOpts is the shared time-range / app / pagination filter struct used +// across List and Search. +type ListOpts struct { + Since *time.Time + Until *time.Time + AppPackage string + Limit int +} + +// ThreadOpts narrows GroupThreads to a time range and optional app, with +// configurable grouping (currently "contact" or "app"). +type ThreadOpts struct { + Since *time.Time + Until *time.Time + AppPackage string + GroupBy string + Limit int +} + +// ActionOpts narrows PendingActions to a time range with a configurable +// reply-window threshold for the "unanswered" heuristic. +type ActionOpts struct { + Since *time.Time + Until *time.Time + ReplyWindowHrs int + Limit int +} diff --git a/backend/internal/notifications/service.go b/backend/internal/notifications/service.go new file mode 100644 index 0000000..c7e737d --- /dev/null +++ b/backend/internal/notifications/service.go @@ -0,0 +1,183 @@ +package notifications + +import ( + "context" + "regexp" + "sort" + "strings" + "time" +) + +// Service wraps Store with business logic that doesn't belong in raw SQL: +// action-item ranking, payload normalisation, and limit clamping. +type Service struct { + store *Store + cfg ServiceConfig +} + +// ServiceConfig is the runtime knob set used by the service. Defaults are +// applied via WithDefaults so callers can pass a zero-value struct. +type ServiceConfig struct { + // DefaultPageSize bounds list/search results when the caller omits Limit. + DefaultPageSize int + // MaxPageSize is the hard cap on Limit regardless of caller request. + MaxPageSize int + // ReplyWindowHrs is the unanswered-message threshold for action items. + ReplyWindowHrs int +} + +// WithDefaults fills in safe defaults for any zero field. +func (c ServiceConfig) WithDefaults() ServiceConfig { + if c.DefaultPageSize <= 0 { + c.DefaultPageSize = 50 + } + if c.MaxPageSize <= 0 { + c.MaxPageSize = 200 + } + if c.ReplyWindowHrs <= 0 { + c.ReplyWindowHrs = 2 + } + return c +} + +// NewService constructs a Service. +func NewService(store *Store, cfg ServiceConfig) *Service { + return &Service{store: store, cfg: cfg.WithDefaults()} +} + +// IngestBatch validates, normalises, and persists a batch of EventInput +// rows. Empty input is accepted (returns 0). Inputs with empty +// app_package are skipped silently — the device occasionally fires +// "system" notifications without one. +func (s *Service) IngestBatch(ctx context.Context, userID string, in BatchInput) (BatchResult, error) { + clean := make([]EventInput, 0, len(in.Events)) + for _, ev := range in.Events { + if strings.TrimSpace(ev.AppPackage) == "" { + continue + } + clean = append(clean, ev) + } + n, err := s.store.InsertBatch(ctx, userID, clean) + if err != nil { + return BatchResult{}, err + } + return BatchResult{Accepted: n}, nil +} + +// List clamps limit and forwards to the store. +func (s *Service) List(ctx context.Context, userID string, opts ListOpts) ([]Event, error) { + opts.Limit = s.clampLimit(opts.Limit) + return s.store.List(ctx, userID, opts) +} + +// Search clamps limit and forwards to the store. +func (s *Service) Search(ctx context.Context, userID, query string, opts ListOpts) ([]Event, error) { + opts.Limit = s.clampLimit(opts.Limit) + return s.store.Search(ctx, userID, query, opts) +} + +// GroupThreads clamps limit and forwards to the store. +func (s *Service) GroupThreads(ctx context.Context, userID string, opts ThreadOpts) ([]Thread, error) { + opts.Limit = s.clampLimit(opts.Limit) + return s.store.GroupThreads(ctx, userID, opts) +} + +// ListApps forwards to the store; no clamping (app cardinality is naturally +// small). +func (s *Service) ListApps(ctx context.Context, userID string, since, until *time.Time) ([]AppSummary, error) { + return s.store.ListApps(ctx, userID, since, until) +} + +// PendingActions queries candidate rows and ranks them. The store does the +// SQL filter; the service applies prioritisation + dedup. +func (s *Service) PendingActions(ctx context.Context, userID string, opts ActionOpts) ([]ActionItem, error) { + if opts.ReplyWindowHrs <= 0 { + opts.ReplyWindowHrs = s.cfg.ReplyWindowHrs + } + opts.Limit = s.clampLimit(opts.Limit) + rows, err := s.store.PendingActions(ctx, userID, opts) + if err != nil { + return nil, err + } + out := make([]ActionItem, 0, len(rows)) + for _, e := range rows { + out = append(out, classify(e)) + } + sort.SliceStable(out, func(i, j int) bool { + return priorityRank(out[i].Priority) < priorityRank(out[j].Priority) + }) + return out, nil +} + +func (s *Service) clampLimit(limit int) int { + if limit <= 0 { + return s.cfg.DefaultPageSize + } + if limit > s.cfg.MaxPageSize { + return s.cfg.MaxPageSize + } + return limit +} + +// urgencyRE matches the time-sensitive keyword set used by classify and the +// store's WHERE clause. Kept in one place so the two can't drift. +var urgencyRE = regexp.MustCompile(`(?i)\b(deadline|expires|by tomorrow|showing at|offer|closing|inspection|asap|urgent|tonight|today)\b`) + +// missedCallRE matches phone-dialer notifications. +var missedCallRE = regexp.MustCompile(`(?i)\bmissed\b`) + +// classify turns an Event into a ranked ActionItem. The agent re-ranks +// these with its own reasoning, but having a coarse priority + reason in +// the data lets the agent skim quickly. +func classify(e Event) ActionItem { + item := ActionItem{ + Priority: "low", + Summary: summarise(e), + Contact: e.Title, + AppLabel: e.AppLabel, + AppPackage: e.AppPackage, + CapturedAt: e.CapturedAt, + EventID: e.ID, + } + switch { + case e.Category == "call" || missedCallRE.MatchString(e.Title): + item.Priority = "high" + item.Reason = "missed_call" + case urgencyRE.MatchString(e.Content) || urgencyRE.MatchString(e.Title): + item.Priority = "high" + item.Reason = "time_sensitive" + case strings.Contains(e.Content, "?"): + item.Priority = "medium" + item.Reason = "question" + default: + item.Priority = "low" + item.Reason = "follow_up" + } + return item +} + +func summarise(e Event) string { + t := strings.TrimSpace(e.Title) + c := strings.TrimSpace(e.Content) + switch { + case t != "" && c != "": + return t + ": " + c + case t != "": + return t + case c != "": + return c + default: + return e.AppLabel + } +} + +func priorityRank(p string) int { + switch p { + case "high": + return 0 + case "medium": + return 1 + default: + return 2 + } +} diff --git a/backend/internal/notifications/service_test.go b/backend/internal/notifications/service_test.go new file mode 100644 index 0000000..c8063a3 --- /dev/null +++ b/backend/internal/notifications/service_test.go @@ -0,0 +1,139 @@ +package notifications + +import ( + "testing" + "time" +) + +// TestClassifyPriority pins the action-item heuristic so the agent's +// "what needs attention" half of the rollup stays consistent. If we tweak +// the regex set we want the test to fail loudly. +func TestClassifyPriority(t *testing.T) { + now := time.Now() + cases := []struct { + name string + ev Event + priority string + reason string + }{ + { + name: "missed_call_via_category", + ev: Event{Category: "call", Title: "Phone", Content: "+1 415 555 0102", CapturedAt: now}, + priority: "high", + reason: "missed_call", + }, + { + name: "missed_call_via_title", + ev: Event{Category: "msg", Title: "Missed call from Sarah", Content: "", CapturedAt: now}, + priority: "high", + reason: "missed_call", + }, + { + name: "time_sensitive_keyword", + ev: Event{Title: "Sarah", Content: "Showing at 3pm tomorrow — confirm please", CapturedAt: now}, + priority: "high", + reason: "time_sensitive", + }, + { + name: "question_mark_message", + ev: Event{Title: "Mom", Content: "Are you free Sunday?", CapturedAt: now}, + priority: "medium", + reason: "question", + }, + { + name: "default_low_priority", + ev: Event{Title: "News", Content: "BART weekend service update", CapturedAt: now}, + priority: "low", + reason: "follow_up", + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := classify(tc.ev) + if got.Priority != tc.priority { + t.Errorf("priority: got %q, want %q (event=%+v)", got.Priority, tc.priority, tc.ev) + } + if got.Reason != tc.reason { + t.Errorf("reason: got %q, want %q (event=%+v)", got.Reason, tc.reason, tc.ev) + } + if got.Summary == "" { + t.Errorf("summary should never be empty for %+v", tc.ev) + } + }) + } +} + +// TestPriorityRankOrder enforces the "high before medium before low" +// ordering used by Service.PendingActions to sort the response. +func TestPriorityRankOrder(t *testing.T) { + if priorityRank("high") >= priorityRank("medium") { + t.Errorf("high should rank before medium") + } + if priorityRank("medium") >= priorityRank("low") { + t.Errorf("medium should rank before low") + } + if priorityRank("unknown") != priorityRank("low") { + t.Errorf("unknown priorities should rank as low (got %d, low=%d)", + priorityRank("unknown"), priorityRank("low")) + } +} + +// TestServiceConfigDefaults guarantees that a zero-value ServiceConfig +// produces sensible runtime knobs. Anything that downgrades these limits +// will overflow agent token budgets fast. +func TestServiceConfigDefaults(t *testing.T) { + cfg := ServiceConfig{}.WithDefaults() + if cfg.DefaultPageSize <= 0 { + t.Errorf("DefaultPageSize must default to >0; got %d", cfg.DefaultPageSize) + } + if cfg.MaxPageSize < cfg.DefaultPageSize { + t.Errorf("MaxPageSize (%d) must be >= DefaultPageSize (%d)", + cfg.MaxPageSize, cfg.DefaultPageSize) + } + if cfg.ReplyWindowHrs <= 0 { + t.Errorf("ReplyWindowHrs must default to >0; got %d", cfg.ReplyWindowHrs) + } +} + +// TestServiceClampLimit pins the "page caller asked for more than max ⇒ +// return max; asked for nothing ⇒ return default" contract used by every +// MCP query path. +func TestServiceClampLimit(t *testing.T) { + s := &Service{cfg: ServiceConfig{DefaultPageSize: 25, MaxPageSize: 100}} + cases := []struct { + in, want int + }{ + {0, 25}, + {-1, 25}, + {50, 50}, + {100, 100}, + {500, 100}, + } + for _, tc := range cases { + got := s.clampLimit(tc.in) + if got != tc.want { + t.Errorf("clampLimit(%d) = %d; want %d", tc.in, got, tc.want) + } + } +} + +// TestSummariseFallback verifies the fallback chain (title+content > +// title-only > content-only > app label) so we never emit an empty action +// summary even on garbage input. +func TestSummariseFallback(t *testing.T) { + cases := []struct { + ev Event + want string + }{ + {Event{Title: "T", Content: "C"}, "T: C"}, + {Event{Title: "T"}, "T"}, + {Event{Content: "C"}, "C"}, + {Event{AppLabel: "WhatsApp"}, "WhatsApp"}, + } + for _, tc := range cases { + got := summarise(tc.ev) + if got != tc.want { + t.Errorf("summarise(%+v) = %q; want %q", tc.ev, got, tc.want) + } + } +} diff --git a/backend/internal/notifications/store.go b/backend/internal/notifications/store.go new file mode 100644 index 0000000..f95ed32 --- /dev/null +++ b/backend/internal/notifications/store.go @@ -0,0 +1,299 @@ +package notifications + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +// Store is the pgx-backed persistence layer for notification_events. +type Store struct { + pool *pgxpool.Pool +} + +// NewStore wires a Store to a pgx pool. +func NewStore(pool *pgxpool.Pool) *Store { return &Store{pool: pool} } + +// InsertBatch inserts every event in the batch under userID, deduplicating +// on (user_id, app_package, captured_at, title) via ON CONFLICT DO NOTHING. +// Returns the number of rows actually inserted. +// +// The dedup key matches uq_notif_event_dedup in the migration. +func (s *Store) InsertBatch(ctx context.Context, userID string, events []EventInput) (int, error) { + if len(events) == 0 { + return 0, nil + } + const q = ` + INSERT INTO notification_events + (user_id, app_package, app_label, title, content, category, captured_at) + VALUES + ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT ON CONSTRAINT uq_notif_event_dedup DO NOTHING` + + tx, err := s.pool.Begin(ctx) + if err != nil { + return 0, fmt.Errorf("begin: %w", err) + } + defer func() { _ = tx.Rollback(ctx) }() + + var accepted int + for _, ev := range events { + ct := ev.CapturedAt + if ct.IsZero() { + ct = time.Now().UTC() + } + tag, err := tx.Exec(ctx, q, + userID, + strings.TrimSpace(ev.AppPackage), + strings.TrimSpace(ev.AppLabel), + strings.TrimSpace(ev.Title), + strings.TrimSpace(ev.Content), + strings.TrimSpace(ev.Category), + ct.UTC(), + ) + if err != nil { + return 0, fmt.Errorf("insert: %w", err) + } + accepted += int(tag.RowsAffected()) + } + if err := tx.Commit(ctx); err != nil { + return 0, fmt.Errorf("commit: %w", err) + } + return accepted, nil +} + +// List returns events for userID matching opts in reverse chronological +// order (newest first). Empty/nil filters are treated as "no constraint". +func (s *Store) List(ctx context.Context, userID string, opts ListOpts) ([]Event, error) { + args := []any{userID} + clauses := []string{"user_id = $1"} + if opts.Since != nil { + args = append(args, opts.Since.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at >= $%d", len(args))) + } + if opts.Until != nil { + args = append(args, opts.Until.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at <= $%d", len(args))) + } + if app := strings.TrimSpace(opts.AppPackage); app != "" { + args = append(args, app) + clauses = append(clauses, fmt.Sprintf("app_package = $%d", len(args))) + } + limit := opts.Limit + if limit <= 0 { + limit = 50 + } + args = append(args, limit) + q := `SELECT id, user_id, app_package, app_label, title, content, category, captured_at, created_at + FROM notification_events + WHERE ` + strings.Join(clauses, " AND ") + ` + ORDER BY captured_at DESC, id DESC + LIMIT $` + fmt.Sprintf("%d", len(args)) + return s.queryEvents(ctx, q, args...) +} + +// Search runs a full-text query (Postgres plainto_tsquery) against title + +// content, restricted by the same opts as List. Empty query string falls +// through to List. +func (s *Store) Search(ctx context.Context, userID, query string, opts ListOpts) ([]Event, error) { + q := strings.TrimSpace(query) + if q == "" { + return s.List(ctx, userID, opts) + } + args := []any{userID, q} + clauses := []string{ + "user_id = $1", + "to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,'')) @@ plainto_tsquery('english', $2)", + } + if opts.Since != nil { + args = append(args, opts.Since.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at >= $%d", len(args))) + } + if opts.Until != nil { + args = append(args, opts.Until.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at <= $%d", len(args))) + } + if app := strings.TrimSpace(opts.AppPackage); app != "" { + args = append(args, app) + clauses = append(clauses, fmt.Sprintf("app_package = $%d", len(args))) + } + limit := opts.Limit + if limit <= 0 { + limit = 50 + } + args = append(args, limit) + sql := `SELECT id, user_id, app_package, app_label, title, content, category, captured_at, created_at + FROM notification_events + WHERE ` + strings.Join(clauses, " AND ") + ` + ORDER BY captured_at DESC, id DESC + LIMIT $` + fmt.Sprintf("%d", len(args)) + return s.queryEvents(ctx, sql, args...) +} + +// GroupThreads clusters events into Thread rows. With GroupBy=="contact" +// (default) it groups by (app_package, title) which approximates "messages +// from this contact in this app". With GroupBy=="app" it collapses to one +// row per app — useful as a coarse landscape view. +func (s *Store) GroupThreads(ctx context.Context, userID string, opts ThreadOpts) ([]Thread, error) { + args := []any{userID} + clauses := []string{"user_id = $1"} + if opts.Since != nil { + args = append(args, opts.Since.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at >= $%d", len(args))) + } + if opts.Until != nil { + args = append(args, opts.Until.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at <= $%d", len(args))) + } + if app := strings.TrimSpace(opts.AppPackage); app != "" { + args = append(args, app) + clauses = append(clauses, fmt.Sprintf("app_package = $%d", len(args))) + } + limit := opts.Limit + if limit <= 0 { + limit = 50 + } + args = append(args, limit) + + groupBy := strings.ToLower(strings.TrimSpace(opts.GroupBy)) + if groupBy == "" { + groupBy = "contact" + } + + var sql string + switch groupBy { + case "app": + sql = `SELECT '' AS contact, app_label, app_package, count(*)::int, min(captured_at), max(captured_at), + (array_agg(content ORDER BY captured_at DESC))[1] AS preview + FROM notification_events + WHERE ` + strings.Join(clauses, " AND ") + ` + GROUP BY app_package, app_label + ORDER BY max(captured_at) DESC + LIMIT $` + fmt.Sprintf("%d", len(args)) + default: // "contact" + sql = `SELECT title AS contact, app_label, app_package, count(*)::int, min(captured_at), max(captured_at), + (array_agg(content ORDER BY captured_at DESC))[1] AS preview + FROM notification_events + WHERE ` + strings.Join(clauses, " AND ") + ` + GROUP BY app_package, app_label, title + ORDER BY max(captured_at) DESC + LIMIT $` + fmt.Sprintf("%d", len(args)) + } + + rows, err := s.pool.Query(ctx, sql, args...) + if err != nil { + return nil, err + } + defer rows.Close() + var out []Thread + for rows.Next() { + var t Thread + var preview *string + if err := rows.Scan(&t.Contact, &t.AppLabel, &t.AppPackage, &t.MessageCount, &t.FirstAt, &t.LastAt, &preview); err != nil { + return nil, err + } + if preview != nil { + t.Preview = *preview + } + out = append(out, t) + } + return out, rows.Err() +} + +// ListApps returns the distinct apps that have sent notifications for the +// user, with row counts and last-seen timestamps. Powers both the +// notifications_apps MCP tool and the mobile settings screen's "captured +// apps" stat. +func (s *Store) ListApps(ctx context.Context, userID string, since, until *time.Time) ([]AppSummary, error) { + args := []any{userID} + clauses := []string{"user_id = $1"} + if since != nil { + args = append(args, since.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at >= $%d", len(args))) + } + if until != nil { + args = append(args, until.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at <= $%d", len(args))) + } + sql := `SELECT app_package, max(app_label), count(*)::int, max(captured_at) + FROM notification_events + WHERE ` + strings.Join(clauses, " AND ") + ` + GROUP BY app_package + ORDER BY max(captured_at) DESC` + rows, err := s.pool.Query(ctx, sql, args...) + if err != nil { + return nil, err + } + defer rows.Close() + var out []AppSummary + for rows.Next() { + var a AppSummary + if err := rows.Scan(&a.AppPackage, &a.AppLabel, &a.Count, &a.LastAt); err != nil { + return nil, err + } + out = append(out, a) + } + return out, rows.Err() +} + +// PendingActions returns events that match action-item heuristics +// (questions, time-sensitive keywords, missed calls). Ranking happens in +// the service layer so the SQL stays a simple WHERE-or filter. +func (s *Store) PendingActions(ctx context.Context, userID string, opts ActionOpts) ([]Event, error) { + args := []any{userID} + clauses := []string{"user_id = $1"} + if opts.Since != nil { + args = append(args, opts.Since.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at >= $%d", len(args))) + } + if opts.Until != nil { + args = append(args, opts.Until.UTC()) + clauses = append(clauses, fmt.Sprintf("captured_at <= $%d", len(args))) + } + // Heuristic OR: contains '?', or matches a urgency keyword, or category=call. + clauses = append(clauses, `( + content ILIKE '%?%' + OR content ~* '(deadline|expires|by tomorrow|showing at|offer|closing|inspection|ASAP|urgent|tonight|today)' + OR title ~* '(missed call|missed)' + OR category = 'call' + )`) + limit := opts.Limit + if limit <= 0 { + limit = 100 + } + args = append(args, limit) + sql := `SELECT id, user_id, app_package, app_label, title, content, category, captured_at, created_at + FROM notification_events + WHERE ` + strings.Join(clauses, " AND ") + ` + ORDER BY captured_at DESC + LIMIT $` + fmt.Sprintf("%d", len(args)) + return s.queryEvents(ctx, sql, args...) +} + +func (s *Store) queryEvents(ctx context.Context, sql string, args ...any) ([]Event, error) { + rows, err := s.pool.Query(ctx, sql, args...) + if err != nil { + return nil, err + } + defer rows.Close() + return scanEvents(rows) +} + +func scanEvents(rows pgx.Rows) ([]Event, error) { + var out []Event + for rows.Next() { + var e Event + if err := rows.Scan( + &e.ID, &e.UserID, &e.AppPackage, &e.AppLabel, + &e.Title, &e.Content, &e.Category, &e.CapturedAt, &e.CreatedAt, + ); err != nil { + return nil, err + } + out = append(out, e) + } + return out, rows.Err() +} diff --git a/mobile/app/(app)/_layout.tsx b/mobile/app/(app)/_layout.tsx index d009779..f607619 100644 --- a/mobile/app/(app)/_layout.tsx +++ b/mobile/app/(app)/_layout.tsx @@ -49,6 +49,10 @@ export default function AppLayout() { + {/* Capture screen is reached via Settings → Notification Capture; we + register it as a hidden tab so deep links work and expo-router + knows about the route. */} + ); } diff --git a/mobile/app/(app)/capture.tsx b/mobile/app/(app)/capture.tsx new file mode 100644 index 0000000..3801f06 --- /dev/null +++ b/mobile/app/(app)/capture.tsx @@ -0,0 +1,289 @@ +import { useCallback, useEffect, useMemo, useState } from "react"; +import { Alert, Pressable, ScrollView, Switch, View } from "react-native"; + +import { Button } from "@/components/ui/Button"; +import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/Card"; +import { Input } from "@/components/ui/Input"; +import { Text } from "@/components/ui/Text"; +import { useNotificationCapture } from "@/providers/NotificationCaptureProvider"; +import { listCapturedApps, type AppSummary } from "@/services/notifications"; + +/** + * Allowlist seed populated with the contact-heavy apps the pilot user + * actually relies on (real estate workflow). The list is intentionally + * editable below so other forks can tune it for their persona. + */ +const SUGGESTED_APPS: { pkg: string; label: string }[] = [ + { pkg: "com.google.android.apps.messaging", label: "Messages (Google)" }, + { pkg: "com.android.mms", label: "Messages (AOSP)" }, + { pkg: "com.whatsapp", label: "WhatsApp" }, + { pkg: "com.google.android.gm", label: "Gmail" }, + { pkg: "com.microsoft.office.outlook", label: "Outlook" }, + { pkg: "com.zillow.android.zillowmap", label: "Zillow" }, + { pkg: "com.zillow.android.rentals", label: "Zillow Rentals" }, + { pkg: "com.realtor.android", label: "Realtor.com" }, + { pkg: "com.facebook.orca", label: "Messenger" }, + { pkg: "com.android.dialer", label: "Phone (AOSP)" }, + { pkg: "com.google.android.dialer", label: "Phone (Google)" }, +]; + +function formatLastSync(d: Date | null): string { + if (!d) return "never"; + const diffMs = Date.now() - d.getTime(); + const minutes = Math.round(diffMs / 60_000); + if (minutes < 1) return "just now"; + if (minutes < 60) return `${minutes}m ago`; + const hours = Math.round(minutes / 60); + if (hours < 24) return `${hours}h ago`; + return d.toLocaleString(); +} + +export default function NotificationCaptureScreen() { + const capture = useNotificationCapture(); + const [customPkg, setCustomPkg] = useState(""); + const [apps, setApps] = useState([]); + const [appsError, setAppsError] = useState(null); + const [loadingApps, setLoadingApps] = useState(false); + const [syncing, setSyncing] = useState(false); + + const allowlist = capture.allowlist; + const allowlistSet = useMemo(() => new Set(allowlist), [allowlist]); + + const refreshApps = useCallback(async () => { + if (!capture.isAvailable) return; + setLoadingApps(true); + setAppsError(null); + try { + const next = await listCapturedApps(); + setApps(next); + } catch (err) { + setAppsError(err instanceof Error ? err.message : "Failed to load apps"); + } finally { + setLoadingApps(false); + } + }, [capture.isAvailable]); + + useEffect(() => { + void refreshApps(); + }, [refreshApps]); + + const toggleApp = useCallback( + (pkg: string) => { + const next = allowlistSet.has(pkg) + ? allowlist.filter((p) => p !== pkg) + : [...allowlist, pkg]; + capture.setAllowlist(next); + }, + [allowlist, allowlistSet, capture], + ); + + const addCustomApp = useCallback(() => { + const pkg = customPkg.trim(); + if (!pkg) return; + if (allowlistSet.has(pkg)) { + setCustomPkg(""); + return; + } + capture.setAllowlist([...allowlist, pkg]); + setCustomPkg(""); + }, [allowlist, allowlistSet, capture, customPkg]); + + const handleEnable = useCallback( + (next: boolean) => { + if (next && !capture.hasPermission) { + Alert.alert( + "Permission required", + "Capture needs Notification Access. We'll open the system settings page so you can grant it for Agent App.", + [ + { text: "Cancel", style: "cancel" }, + { + text: "Open settings", + onPress: () => capture.openPermissionSettings(), + }, + ], + ); + return; + } + capture.setEnabled(next); + }, + [capture], + ); + + const handleFlush = useCallback(async () => { + setSyncing(true); + try { + const accepted = await capture.flushNow(); + Alert.alert("Sync complete", `${accepted} new notification${accepted === 1 ? "" : "s"} uploaded.`); + await refreshApps(); + } catch (err) { + Alert.alert("Sync failed", err instanceof Error ? err.message : "Unknown error"); + } finally { + setSyncing(false); + } + }, [capture, refreshApps]); + + if (!capture.isAvailable) { + return ( + + + + Notification Capture + Not available for this build. + + + + The notification capture pipeline is Android-only and must be enabled in the + deployment via NOTIFICATIONS_ENABLED. iOS does not expose a comparable system API + without an app extension. + + + + + ); + } + + return ( + + + + + Capture status + + When enabled, the agent can produce a daily rollup across every monitored app. + + + + + + Enable capture + + Master switch. Disable to pause without revoking system permission. + + + + + + + Notification access + + {capture.hasPermission ? "Granted" : "Not granted — open settings to allow"} + + + + + + + Pending in local buffer + + {capture.pendingCount} + + Last sync: {formatLastSync(capture.lastSyncAt)} + + + + + + + + + Monitored apps + + Only notifications from these apps are captured and sent to the agent. + + + + {SUGGESTED_APPS.map((app) => { + const on = allowlistSet.has(app.pkg); + return ( + toggleApp(app.pkg)} + className="flex-row items-center justify-between rounded-lg border border-border p-3 active:opacity-80" + > + + {app.label} + + {app.pkg} + + + toggleApp(app.pkg)} /> + + ); + })} + + {allowlist + .filter((pkg) => !SUGGESTED_APPS.some((a) => a.pkg === pkg)) + .map((pkg) => ( + toggleApp(pkg)} + className="flex-row items-center justify-between rounded-lg border border-border p-3 active:opacity-80" + > + + {pkg} + + Custom + + + toggleApp(pkg)} /> + + ))} + + + + + + + + + + + Captured apps + What the backend has actually received recently. + + + {loadingApps ? ( + + Loading… + + ) : appsError ? ( + + {appsError} + + ) : apps.length === 0 ? ( + + Nothing captured yet. Enable capture, grant notification access, and trigger a + test notification. + + ) : ( + apps.map((app) => ( + + + {app.app_label || app.app_package} + + Last: {new Date(app.last_at).toLocaleString()} + + + {app.count} + + )) + )} + + + + + ); +} diff --git a/mobile/app/(app)/settings.tsx b/mobile/app/(app)/settings.tsx index b37dd96..2c2934a 100644 --- a/mobile/app/(app)/settings.tsx +++ b/mobile/app/(app)/settings.tsx @@ -9,13 +9,15 @@ import { Select, type SelectOption } from "@/components/ui/Select"; import { Separator } from "@/components/ui/Separator"; import { Text } from "@/components/ui/Text"; import { useAuthSession } from "@/providers/AuthSessionProvider"; +import { useNotificationCapture } from "@/providers/NotificationCaptureProvider"; import { useTeams } from "@/providers/TeamsProvider"; -import { TEAMS_ENABLED } from "@/config"; +import { NOTIFICATIONS_CAPTURE_ENABLED, TEAMS_ENABLED } from "@/config"; export default function SettingsScreen() { const router = useRouter(); const { user, logout } = useAuthSession(); const { active, memberships, setActive } = useTeams(); + const capture = useNotificationCapture(); // Team-switcher options live next to the row so we can show role + a "·" // separator the same way the inline picker on the home screen would. @@ -112,6 +114,24 @@ export default function SettingsScreen() { + {NOTIFICATIONS_CAPTURE_ENABLED && capture.isAvailable ? ( + + + Notification Capture + + + + {capture.isEnabled + ? `Capturing notifications from ${capture.allowlist.length} app${capture.allowlist.length === 1 ? "" : "s"}. The agent uses these to produce your daily rollup.` + : "Disabled. Enable to let the agent summarise your texts, WhatsApp, email and more."} + + + + + ) : null} + About diff --git a/mobile/app/_layout.tsx b/mobile/app/_layout.tsx index 46761d8..1b85176 100644 --- a/mobile/app/_layout.tsx +++ b/mobile/app/_layout.tsx @@ -10,6 +10,7 @@ import { Inter_500Medium, Inter_700Bold } from "@expo-google-fonts/inter"; import { SpaceGrotesk_700Bold } from "@expo-google-fonts/space-grotesk"; import { AuthSessionProvider } from "@/providers/AuthSessionProvider"; +import { NotificationCaptureProvider } from "@/providers/NotificationCaptureProvider"; import { TeamsProvider } from "@/providers/TeamsProvider"; export default function RootLayout() { @@ -32,8 +33,10 @@ export default function RootLayout() { - - + + + + diff --git a/mobile/config.ts b/mobile/config.ts index 112147e..97acc00 100644 --- a/mobile/config.ts +++ b/mobile/config.ts @@ -16,3 +16,13 @@ export const API_URL = // Spec: .cursor/tickets/teams-scope.md §"Configuration": // `TEAMS_ENABLED = process.env.EXPO_PUBLIC_TEAMS_ENABLED !== "false"` export const TEAMS_ENABLED = process.env.EXPO_PUBLIC_TEAMS_ENABLED !== "false"; + +// NOTIFICATIONS_CAPTURE_ENABLED mirrors the server-side NOTIFICATIONS_ENABLED +// flag. Default OFF — the feature is opt-in per the notification-capture +// scope so forks of the template that don't ship the Android capture +// pipeline pay zero UI overhead. +// +// When false, the capture settings screen, provider, and any tab entries +// remain inert. Set EXPO_PUBLIC_NOTIFICATIONS_ENABLED="true" to opt in. +export const NOTIFICATIONS_CAPTURE_ENABLED = + process.env.EXPO_PUBLIC_NOTIFICATIONS_ENABLED === "true"; diff --git a/mobile/eas.json b/mobile/eas.json new file mode 100644 index 0000000..9213c11 --- /dev/null +++ b/mobile/eas.json @@ -0,0 +1,38 @@ +{ + "cli": { + "version": ">= 13.0.0", + "appVersionSource": "local" + }, + "build": { + "development": { + "developmentClient": true, + "distribution": "internal", + "android": { + "buildType": "apk", + "gradleCommand": ":app:assembleDebug" + } + }, + "preview": { + "distribution": "internal", + "channel": "preview", + "android": { + "buildType": "apk" + } + }, + "production": { + "channel": "production", + "android": { + "buildType": "app-bundle" + } + }, + "production-apk": { + "channel": "production", + "android": { + "buildType": "apk" + } + } + }, + "submit": { + "production": {} + } +} diff --git a/mobile/modules/notification-capture/android/build.gradle b/mobile/modules/notification-capture/android/build.gradle new file mode 100644 index 0000000..38a46d3 --- /dev/null +++ b/mobile/modules/notification-capture/android/build.gradle @@ -0,0 +1,43 @@ +apply plugin: 'com.android.library' +apply plugin: 'kotlin-android' + +group = 'expo.modules.notificationcapture' +version = '0.1.0' + +def expoModulesCorePlugin = new File(project(":expo-modules-core").projectDir.absolutePath, "ExpoModulesCorePlugin.gradle") +apply from: expoModulesCorePlugin +applyKotlinExpoModulesCorePlugin() +useCoreDependencies() +useExpoPublishing() + +android { + namespace "expo.modules.notificationcapture" + + def agpVersion = com.android.Version.ANDROID_GRADLE_PLUGIN_VERSION + if (agpVersion.tokenize('.')[0].toInteger() < 8) { + compileSdkVersion safeExtGet("compileSdkVersion", 35) + } else { + compileSdk safeExtGet("compileSdkVersion", 35) + } + + defaultConfig { + minSdkVersion safeExtGet("minSdkVersion", 24) + targetSdkVersion safeExtGet("targetSdkVersion", 35) + versionCode 1 + versionName "0.1.0" + } + + lintOptions { + abortOnError false + } + + publishing { + singleVariant("release") { + withSourcesJar() + } + } +} + +def safeExtGet(prop, fallback) { + rootProject.ext.has(prop) ? rootProject.ext.get(prop) : fallback +} diff --git a/mobile/modules/notification-capture/android/src/main/AndroidManifest.xml b/mobile/modules/notification-capture/android/src/main/AndroidManifest.xml new file mode 100644 index 0000000..f741b8b --- /dev/null +++ b/mobile/modules/notification-capture/android/src/main/AndroidManifest.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + diff --git a/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationCaptureModule.kt b/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationCaptureModule.kt new file mode 100644 index 0000000..3848c74 --- /dev/null +++ b/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationCaptureModule.kt @@ -0,0 +1,80 @@ +package expo.modules.notificationcapture + +import android.content.ComponentName +import android.content.Intent +import android.provider.Settings +import expo.modules.kotlin.modules.Module +import expo.modules.kotlin.modules.ModuleDefinition + +/** + * Expo module bridge between JS and the NotificationListenerService. + * + * Exposed surface: + * - hasPermission(): Boolean — is Notification Access granted? + * - openSettings(): Void — deep-link the user to the system settings page + * - setEnabled(enabled: Boolean): Void — master capture switch + * - isEnabled(): Boolean — read the master switch + * - setAllowlist(packages: [String]): Void — set monitored apps + * - getAllowlist(): [String] — read monitored apps + * - drainBuffer(): String — fetch and clear pending events (JSON array) + * - requeueEvents(json: String): Void — prepend events back to the + * buffer when an upload fails, so we never lose data on flaky + * networks. JSON shape must match what drainBuffer() returns. + * - bufferSize(): Int — count pending events without draining + * - lastSyncAt(): Number — ms-since-epoch of the last flush, or 0 + * - markSynced(timestampMs: Number): Void — record a successful flush + * + * Everything is synchronous because the underlying NotificationStore + * uses SharedPreferences, which is fast and main-thread-safe at this + * volume. If we ever migrate to SQLite we'll switch to AsyncFunction. + */ +class NotificationCaptureModule : Module() { + + private val store: NotificationStore by lazy { + NotificationStore(appContext.reactContext!!.applicationContext) + } + + override fun definition() = ModuleDefinition { + Name("NotificationCapture") + + Function("hasPermission") { + val ctx = appContext.reactContext ?: return@Function false + val enabled = Settings.Secure.getString( + ctx.contentResolver, + "enabled_notification_listeners" + ) ?: return@Function false + val expected = ComponentName(ctx, NotificationCaptureService::class.java).flattenToString() + enabled.split(":").any { it == expected } + } + + Function("openSettings") { + val ctx = appContext.reactContext ?: return@Function + val intent = Intent(Settings.ACTION_NOTIFICATION_LISTENER_SETTINGS).apply { + addFlags(Intent.FLAG_ACTIVITY_NEW_TASK) + } + ctx.startActivity(intent) + } + + Function("isEnabled") { store.isEnabled() } + + Function("setEnabled") { enabled: Boolean -> store.setEnabled(enabled) } + + Function("getAllowlist") { store.getAllowlist().toList() } + + Function("setAllowlist") { packages: List -> + store.setAllowlist(packages.toSet()) + } + + Function("drainBuffer") { store.drainBuffer() } + + Function("requeueEvents") { eventsJson: String -> store.requeueEvents(eventsJson) } + + Function("bufferSize") { store.bufferSize() } + + Function("lastSyncAt") { store.getLastSyncAtMs().toDouble() } + + Function("markSynced") { timestampMs: Double -> + store.setLastSyncAtMs(timestampMs.toLong()) + } + } +} diff --git a/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationCaptureService.kt b/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationCaptureService.kt new file mode 100644 index 0000000..3a1b502 --- /dev/null +++ b/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationCaptureService.kt @@ -0,0 +1,98 @@ +package expo.modules.notificationcapture + +import android.app.Notification +import android.service.notification.NotificationListenerService +import android.service.notification.StatusBarNotification +import org.json.JSONObject + +/** + * NotificationListenerService implementation that captures cross-app + * notifications when the user has granted Notification Access in Android + * system settings. + * + * Lifecycle: bound by the OS at boot whenever the user has the access + * granted. We *do not* start it manually — the system manages it. + * + * Filtering happens in two places: + * 1. Master switch: NotificationStore.isEnabled() controls whether we + * record anything at all (so the user can pause without revoking + * Notification Access). + * 2. App allowlist: NotificationStore.getAllowlist() restricts capture + * to apps the user explicitly opted into. If the allowlist is empty + * we fall through to nothing being captured (safe default). + * + * Captured payload mirrors the backend's notifications.EventInput JSON + * shape so the JS layer can forward it without transformation. + */ +class NotificationCaptureService : NotificationListenerService() { + + private val store: NotificationStore by lazy { NotificationStore(applicationContext) } + + override fun onNotificationPosted(sbn: StatusBarNotification) { + if (!store.isEnabled()) return + + val pkg = sbn.packageName ?: return + val allowlist = store.getAllowlist() + if (allowlist.isEmpty() || !allowlist.contains(pkg)) return + + val notification = sbn.notification ?: return + val extras = notification.extras + + val title = extras?.getCharSequence(Notification.EXTRA_TITLE)?.toString().orEmpty() + val text = extractText(extras) + val category = notification.category.orEmpty() + val appLabel = resolveAppLabel(pkg) + + val event = JSONObject().apply { + put("app_package", pkg) + put("app_label", appLabel) + put("title", title) + put("content", text) + put("category", category) + put("captured_at", android.text.format.DateFormat.format( + "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'", + java.util.Calendar.getInstance(java.util.TimeZone.getTimeZone("UTC")).apply { + timeInMillis = sbn.postTime + } + ).toString()) + } + + store.bufferEvent(event) + } + + /** + * Extract human-readable text from the notification extras bundle. + * Tries EXTRA_BIG_TEXT first (full body for messaging apps) and falls + * back to EXTRA_TEXT (preview line). Joining EXTRA_TEXT_LINES handles + * inbox-style notifications (e.g. Gmail summaries). + */ + private fun extractText(extras: android.os.Bundle?): String { + if (extras == null) return "" + val big = extras.getCharSequence(Notification.EXTRA_BIG_TEXT)?.toString() + if (!big.isNullOrEmpty()) return big + + val short = extras.getCharSequence(Notification.EXTRA_TEXT)?.toString() + if (!short.isNullOrEmpty()) return short + + val lines = extras.getCharSequenceArray(Notification.EXTRA_TEXT_LINES) + if (lines != null && lines.isNotEmpty()) { + return lines.joinToString(separator = "\n") { it.toString() } + } + return "" + } + + /** + * Translate a package name to a user-readable app label using the + * PackageManager. Falls back to the raw package on failure (better + * than a blank field in the rollup). + */ + private fun resolveAppLabel(pkg: String): String { + return try { + val pm = packageManager + val info = pm.getApplicationInfo(pkg, 0) + pm.getApplicationLabel(info).toString() + } catch (_: Exception) { + pkg + } + } +} diff --git a/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationStore.kt b/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationStore.kt new file mode 100644 index 0000000..215e638 --- /dev/null +++ b/mobile/modules/notification-capture/android/src/main/java/expo/modules/notificationcapture/NotificationStore.kt @@ -0,0 +1,133 @@ +package expo.modules.notificationcapture + +import android.content.Context +import android.content.SharedPreferences +import org.json.JSONArray +import org.json.JSONObject + +/** + * Tiny persistence layer for captured notifications and runtime preferences. + * + * Why SharedPreferences instead of Room/SQLite? The total volume is small + * (≤ ~5 minutes of notifications between flushes, ~50 events worst case), + * and SharedPreferences gives us atomic file writes and zero-cost startup. + * If the buffer ever grows past a few hundred events we'll move to a + * proper SQLite cache, but for V1 this keeps the module dependency-free. + * + * Stored data: + * - "buffer" → JSON array of pending notification events + * - "allowlist" → JSON array of app package strings + * - "is_enabled" → boolean master switch + * - "last_sync_at_ms" → timestamp of the last successful flush + */ +class NotificationStore(context: Context) { + + private val prefs: SharedPreferences = context.applicationContext.getSharedPreferences( + PREFS_NAME, + Context.MODE_PRIVATE + ) + + fun isEnabled(): Boolean = prefs.getBoolean(KEY_ENABLED, false) + + fun setEnabled(enabled: Boolean) { + prefs.edit().putBoolean(KEY_ENABLED, enabled).apply() + } + + fun getAllowlist(): Set { + val raw = prefs.getString(KEY_ALLOWLIST, "[]") ?: "[]" + return try { + val arr = JSONArray(raw) + (0 until arr.length()).map { arr.getString(it) }.toSet() + } catch (_: Exception) { + emptySet() + } + } + + fun setAllowlist(packages: Collection) { + val arr = JSONArray() + packages.forEach { arr.put(it) } + prefs.edit().putString(KEY_ALLOWLIST, arr.toString()).apply() + } + + fun getLastSyncAtMs(): Long = prefs.getLong(KEY_LAST_SYNC, 0L) + + fun setLastSyncAtMs(ts: Long) { + prefs.edit().putLong(KEY_LAST_SYNC, ts).apply() + } + + /** + * Append a single notification event to the on-disk buffer. Called by + * NotificationCaptureService on every onNotificationPosted call. Bounded + * to MAX_BUFFER_SIZE entries; the oldest are dropped first so we don't + * grow without bound if the user revokes network access. + */ + @Synchronized + fun bufferEvent(event: JSONObject) { + val current = readBuffer() + current.put(event) + while (current.length() > MAX_BUFFER_SIZE) { + current.remove(0) + } + prefs.edit().putString(KEY_BUFFER, current.toString()).apply() + } + + /** + * Read and clear the in-memory buffer atomically. Returns the JSON + * array as a string so the JS side can pass it through unchanged to + * the API (no parse/re-stringify round-trip). + */ + @Synchronized + fun drainBuffer(): String { + val current = readBuffer() + prefs.edit().putString(KEY_BUFFER, "[]").apply() + return current.toString() + } + + @Synchronized + fun bufferSize(): Int = readBuffer().length() + + /** + * Prepend a batch of events back to the buffer. Used by the JS layer + * to recover from a failed upload without losing data. Older events + * are dropped first if the combined buffer would exceed MAX_BUFFER_SIZE + * — we always keep the freshest data because that's what the rollup + * cares about most. + */ + @Synchronized + fun requeueEvents(eventsJson: String) { + val incoming = try { + JSONArray(eventsJson) + } catch (_: Exception) { + return + } + if (incoming.length() == 0) return + + val current = readBuffer() + val combined = JSONArray() + for (i in 0 until incoming.length()) combined.put(incoming.get(i)) + for (i in 0 until current.length()) combined.put(current.get(i)) + + while (combined.length() > MAX_BUFFER_SIZE) { + combined.remove(0) + } + prefs.edit().putString(KEY_BUFFER, combined.toString()).apply() + } + + private fun readBuffer(): JSONArray { + val raw = prefs.getString(KEY_BUFFER, "[]") ?: "[]" + return try { + JSONArray(raw) + } catch (_: Exception) { + JSONArray() + } + } + + companion object { + private const val PREFS_NAME = "expo.modules.notificationcapture.prefs" + private const val KEY_ENABLED = "is_enabled" + private const val KEY_ALLOWLIST = "allowlist" + private const val KEY_BUFFER = "buffer" + private const val KEY_LAST_SYNC = "last_sync_at_ms" + private const val MAX_BUFFER_SIZE = 500 + } +} diff --git a/mobile/modules/notification-capture/expo-module.config.json b/mobile/modules/notification-capture/expo-module.config.json new file mode 100644 index 0000000..eab0671 --- /dev/null +++ b/mobile/modules/notification-capture/expo-module.config.json @@ -0,0 +1,8 @@ +{ + "platforms": ["android"], + "android": { + "modules": [ + "expo.modules.notificationcapture.NotificationCaptureModule" + ] + } +} diff --git a/mobile/modules/notification-capture/package.json b/mobile/modules/notification-capture/package.json new file mode 100644 index 0000000..7de74c6 --- /dev/null +++ b/mobile/modules/notification-capture/package.json @@ -0,0 +1,10 @@ +{ + "name": "notification-capture", + "version": "0.1.0", + "description": "Local Expo module that exposes Android NotificationListenerService events to JS for the agent-setup template's optional notification capture feature.", + "main": "src/index.ts", + "types": "src/index.ts", + "private": true, + "keywords": ["expo", "android", "notifications"], + "license": "UNLICENSED" +} diff --git a/mobile/modules/notification-capture/src/index.ts b/mobile/modules/notification-capture/src/index.ts new file mode 100644 index 0000000..f011ee3 --- /dev/null +++ b/mobile/modules/notification-capture/src/index.ts @@ -0,0 +1,133 @@ +import { Platform } from "react-native"; +import { requireOptionalNativeModule } from "expo"; + +/** + * Type contract exposed by the native NotificationCaptureModule. Mirrors + * the Kotlin functions one-to-one. All methods are synchronous (the + * underlying SharedPreferences ops are O(1) at the volumes we care about). + * + * On non-Android platforms `nativeModule` is null and the helper functions + * below short-circuit to safe defaults so calling code can stay + * platform-agnostic. + */ +export interface NotificationCaptureNativeModule { + hasPermission(): boolean; + openSettings(): void; + isEnabled(): boolean; + setEnabled(enabled: boolean): void; + getAllowlist(): string[]; + setAllowlist(packages: string[]): void; + drainBuffer(): string; + requeueEvents(eventsJson: string): void; + bufferSize(): number; + lastSyncAt(): number; + markSynced(timestampMs: number): void; +} + +/** + * The shape of one notification event captured by the native side. Field + * names must match `notifications.EventInput` on the backend so the JS + * layer can forward the buffer verbatim. + */ +export interface CapturedNotification { + app_package: string; + app_label: string; + title: string; + content: string; + category: string; + captured_at: string; +} + +const nativeModule = requireOptionalNativeModule( + "NotificationCapture" +); + +export const isCaptureAvailable = Platform.OS === "android" && nativeModule != null; + +function unavailable(fallback: T): T { + if (__DEV__ && Platform.OS === "android" && !nativeModule) { + console.warn( + "[notification-capture] Native module is missing — was the project rebuilt after adding the module?" + ); + } + return fallback; +} + +export function hasPermission(): boolean { + return nativeModule ? nativeModule.hasPermission() : unavailable(false); +} + +export function openSettings(): void { + if (nativeModule) { + nativeModule.openSettings(); + } +} + +export function isEnabled(): boolean { + return nativeModule ? nativeModule.isEnabled() : unavailable(false); +} + +export function setEnabled(enabled: boolean): void { + if (nativeModule) { + nativeModule.setEnabled(enabled); + } +} + +export function getAllowlist(): string[] { + return nativeModule ? nativeModule.getAllowlist() : unavailable([]); +} + +export function setAllowlist(packages: string[]): void { + if (nativeModule) { + nativeModule.setAllowlist(packages); + } +} + +/** + * Drain the on-disk buffer atomically. Returns an array of captured + * events ready to upload. The native side clears the buffer in the same + * call so callers must persist (or upload) the result; data is lost if + * the result is dropped. + */ +export function drainBuffer(): CapturedNotification[] { + if (!nativeModule) { + return []; + } + const raw = nativeModule.drainBuffer(); + try { + const parsed = JSON.parse(raw) as CapturedNotification[]; + return Array.isArray(parsed) ? parsed : []; + } catch (err) { + if (__DEV__) { + console.warn("[notification-capture] drainBuffer parse error:", err); + } + return []; + } +} + +/** + * Prepend events back to the native buffer. Call this on upload failure + * with the events that were in the chunk that failed. The native side + * preserves freshest-first ordering and caps at the buffer max — older + * entries are dropped if the requeue would overflow. + */ +export function requeueEvents(events: CapturedNotification[]): void { + if (!nativeModule || events.length === 0) return; + nativeModule.requeueEvents(JSON.stringify(events)); +} + +export function bufferSize(): number { + return nativeModule ? nativeModule.bufferSize() : 0; +} + +export function lastSyncAt(): Date | null { + if (!nativeModule) return null; + const ms = nativeModule.lastSyncAt(); + return ms > 0 ? new Date(ms) : null; +} + +export function markSynced(when: Date = new Date()): void { + if (nativeModule) { + nativeModule.markSynced(when.getTime()); + } +} diff --git a/mobile/package.json b/mobile/package.json index d79f6c9..085054f 100644 --- a/mobile/package.json +++ b/mobile/package.json @@ -8,7 +8,10 @@ "ios": "expo run:ios", "android": "expo run:android", "web": "expo start --web --port 8081", - "typecheck": "tsc --noEmit" + "typecheck": "tsc --noEmit", + "build:apk:preview": "eas build --platform android --profile preview --non-interactive", + "build:apk:production": "eas build --platform android --profile production-apk --non-interactive", + "prebuild:android": "expo prebuild --platform android --clean" }, "dependencies": { "@expo-google-fonts/inter": "^0.4.1", diff --git a/mobile/providers/NotificationCaptureProvider.tsx b/mobile/providers/NotificationCaptureProvider.tsx new file mode 100644 index 0000000..808a06e --- /dev/null +++ b/mobile/providers/NotificationCaptureProvider.tsx @@ -0,0 +1,206 @@ +import { + createContext, + useCallback, + useContext, + useEffect, + useMemo, + useState, + type ReactNode, +} from "react"; +import { AppState, type AppStateStatus } from "react-native"; + +import { + bufferSize, + getAllowlist, + hasPermission, + isCaptureAvailable, + isEnabled as nativeIsEnabled, + lastSyncAt as nativeLastSyncAt, + openSettings as nativeOpenSettings, + setAllowlist as nativeSetAllowlist, + setEnabled as nativeSetEnabled, +} from "../modules/notification-capture/src"; + +import { useAuthSession } from "@/providers/AuthSessionProvider"; +import { flushNow, startSync, stopSync } from "@/services/notificationSync"; +import { NOTIFICATIONS_CAPTURE_ENABLED } from "@/config"; + +type NotificationCaptureContextValue = { + isAvailable: boolean; + isEnabled: boolean; + hasPermission: boolean; + allowlist: string[]; + pendingCount: number; + lastSyncAt: Date | null; + setEnabled: (enabled: boolean) => void; + setAllowlist: (packages: string[]) => void; + openPermissionSettings: () => void; + flushNow: () => Promise; + refresh: () => void; +}; + +const NotificationCaptureContext = createContext( + null, +); + +const FALLBACK_VALUE: NotificationCaptureContextValue = { + isAvailable: false, + isEnabled: false, + hasPermission: false, + allowlist: [], + pendingCount: 0, + lastSyncAt: null, + setEnabled: () => undefined, + setAllowlist: () => undefined, + openPermissionSettings: () => undefined, + flushNow: async () => 0, + refresh: () => undefined, +}; + +/** + * NotificationCaptureProvider lifts the native module's session-shaped + * state into React. It runs only when: + * 1. The deployment opted into the feature (EXPO_PUBLIC_NOTIFICATIONS_ENABLED=true). + * 2. The native module is actually present (Android + a build that + * includes modules/notification-capture). + * 3. The user is authenticated (uploads need a JWT). + * + * When any of those checks fail the provider returns a frozen + * "unavailable" context so consumers can still render conditional UI + * without a separate availability hook. + * + * The provider is purely a React surface: persistence (allowlist, master + * switch) lives on the native side; transport (uploadBatch) lives in + * services/notificationSync.ts. The provider only orchestrates start/stop + * of the foreground flush loop based on the master switch + auth state. + */ +export function NotificationCaptureProvider({ children }: { children: ReactNode }) { + const { isAuthenticated } = useAuthSession(); + const [isEnabled, setIsEnabled] = useState(false); + const [permission, setPermission] = useState(false); + const [allowlist, setAllowlistState] = useState([]); + const [pending, setPending] = useState(0); + const [lastSync, setLastSync] = useState(null); + + const featureOn = NOTIFICATIONS_CAPTURE_ENABLED && isCaptureAvailable; + + const refresh = useCallback(() => { + if (!featureOn) { + setIsEnabled(false); + setPermission(false); + setAllowlistState([]); + setPending(0); + setLastSync(null); + return; + } + setIsEnabled(nativeIsEnabled()); + setPermission(hasPermission()); + setAllowlistState(getAllowlist()); + setPending(bufferSize()); + setLastSync(nativeLastSyncAt()); + }, [featureOn]); + + useEffect(() => { + refresh(); + }, [refresh]); + + useEffect(() => { + if (!featureOn) return; + const sub = AppState.addEventListener("change", (state: AppStateStatus) => { + if (state === "active") { + refresh(); + } + }); + return () => sub.remove(); + }, [featureOn, refresh]); + + useEffect(() => { + if (!featureOn || !isAuthenticated) { + stopSync(); + return; + } + if (isEnabled) { + startSync(); + void flushNow().then((accepted) => { + if (accepted > 0) { + refresh(); + } + }); + } else { + stopSync(); + } + return () => stopSync(); + }, [featureOn, isAuthenticated, isEnabled, refresh]); + + const setEnabled = useCallback( + (enabled: boolean) => { + if (!featureOn) return; + nativeSetEnabled(enabled); + setIsEnabled(enabled); + }, + [featureOn], + ); + + const setAllowlist = useCallback( + (packages: string[]) => { + if (!featureOn) return; + nativeSetAllowlist(packages); + setAllowlistState(packages); + }, + [featureOn], + ); + + const openPermissionSettings = useCallback(() => { + if (!featureOn) return; + nativeOpenSettings(); + }, [featureOn]); + + const flush = useCallback(async () => { + const accepted = await flushNow(); + refresh(); + return accepted; + }, [refresh]); + + const value = useMemo( + () => + featureOn + ? { + isAvailable: true, + isEnabled, + hasPermission: permission, + allowlist, + pendingCount: pending, + lastSyncAt: lastSync, + setEnabled, + setAllowlist, + openPermissionSettings, + flushNow: flush, + refresh, + } + : FALLBACK_VALUE, + [ + featureOn, + isEnabled, + permission, + allowlist, + pending, + lastSync, + setEnabled, + setAllowlist, + openPermissionSettings, + flush, + refresh, + ], + ); + + return ( + + {children} + + ); +} + +export function useNotificationCapture(): NotificationCaptureContextValue { + const ctx = useContext(NotificationCaptureContext); + return ctx ?? FALLBACK_VALUE; +} diff --git a/mobile/services/notificationSync.ts b/mobile/services/notificationSync.ts new file mode 100644 index 0000000..ee91974 --- /dev/null +++ b/mobile/services/notificationSync.ts @@ -0,0 +1,128 @@ +import { AppState, type AppStateStatus, type NativeEventSubscription } from "react-native"; + +import { + bufferSize, + drainBuffer, + isCaptureAvailable, + isEnabled, + markSynced, + requeueEvents, +} from "../modules/notification-capture/src"; + +import { uploadBatch } from "@/services/notifications"; + +/** + * Cadence at which the foreground sync loop runs. Five minutes balances + * "fresh enough that the rollup feels current" against "don't drain the + * battery" — the sync work is tiny but every wake costs. + */ +const FLUSH_INTERVAL_MS = 5 * 60 * 1000; + +/** + * Hard cap on events shipped per HTTP request. Above this we split into + * sequential POSTs. Matches NOTIFICATIONS_MAX_PAGE_SIZE on the server so + * we never get rejected for over-sized batches. + */ +const MAX_EVENTS_PER_REQUEST = 200; + +let intervalHandle: ReturnType | null = null; +let appStateSubscription: NativeEventSubscription | null = null; +let inflight = false; + +/** + * Singleton flush — guards against re-entrance from overlapping interval + * + appforegrounded events. Returns the number of accepted events for + * the caller to surface in the UI ("X notifications synced"). + * + * Failure handling: drainBuffer() empties the native store atomically. + * If a chunk upload throws (network drop, auth expiry, server 500) we + * requeue every event that hasn't been accepted yet so the next flush + * picks them up. This is the property a real-estate agent driving + * between properties on flaky cell needs — no notification is ever + * silently lost because of a bad signal. + */ +export async function flushNow(): Promise { + if (!isCaptureAvailable || !isEnabled() || inflight) { + return 0; + } + inflight = true; + try { + const events = drainBuffer(); + if (events.length === 0) { + return 0; + } + let accepted = 0; + for (let i = 0; i < events.length; i += MAX_EVENTS_PER_REQUEST) { + const chunk = events.slice(i, i + MAX_EVENTS_PER_REQUEST); + try { + const result = await uploadBatch(chunk); + accepted += result.accepted; + } catch (err) { + const remaining = events.slice(i); + requeueEvents(remaining); + if (__DEV__) { + console.warn( + `[notification-sync] upload failed at offset ${i}; requeued ${remaining.length} events`, + err, + ); + } + return accepted; + } + } + markSynced(new Date()); + return accepted; + } finally { + inflight = false; + } +} + +/** + * startSync wires the foreground flush loop. Idempotent — calling twice + * just resets the interval. The system suspends timers when the app + * backgrounds, so we also flush on AppState "active" transitions to + * cover the case of returning to foreground after an idle period. + */ +export function startSync(): void { + if (!isCaptureAvailable) return; + if (intervalHandle) { + clearInterval(intervalHandle); + } + intervalHandle = setInterval(() => { + void flushNow(); + }, FLUSH_INTERVAL_MS); + + if (appStateSubscription) { + appStateSubscription.remove(); + } + appStateSubscription = AppState.addEventListener( + "change", + (state: AppStateStatus) => { + if (state === "active") { + void flushNow(); + } + }, + ); +} + +/** + * stopSync tears down the timers and listeners. Safe to call from + * unmount paths or when the user disables capture in settings. + */ +export function stopSync(): void { + if (intervalHandle) { + clearInterval(intervalHandle); + intervalHandle = null; + } + if (appStateSubscription) { + appStateSubscription.remove(); + appStateSubscription = null; + } +} + +/** + * pendingCount is exported for the settings screen so the user can see + * "X events waiting to upload". Reads from the native buffer directly. + */ +export function pendingCount(): number { + return isCaptureAvailable ? bufferSize() : 0; +} diff --git a/mobile/services/notifications.ts b/mobile/services/notifications.ts new file mode 100644 index 0000000..5cf7234 --- /dev/null +++ b/mobile/services/notifications.ts @@ -0,0 +1,48 @@ +import { request } from "@/services/api"; + +import type { CapturedNotification } from "../modules/notification-capture/src"; + +export type { CapturedNotification }; + +export interface IngestResult { + accepted: number; +} + +export interface AppSummary { + app_package: string; + app_label: string; + count: number; + last_at: string; +} + +/** + * uploadBatch ships a freshly drained buffer to the backend. Returns the + * number of rows the server actually accepted (post-deduplication). + * + * The shape is intentionally minimal: the native side already produces + * payloads matching the backend's `notifications.EventInput` JSON schema, + * so this function is a thin POST and nothing more. + */ +export async function uploadBatch(events: CapturedNotification[]): Promise { + if (events.length === 0) { + return { accepted: 0 }; + } + return request("/api/notifications/batch", { + method: "POST", + body: JSON.stringify({ events }), + skipTeamHeader: true, + }); +} + +/** + * listCapturedApps powers the settings screen's "X apps captured" stat. + * Calls the same handler the agent's notifications_apps MCP tool wraps so + * the two views can never diverge. + */ +export async function listCapturedApps(): Promise { + const res = await request<{ apps: AppSummary[]; count: number }>( + "/api/notifications/apps", + { skipTeamHeader: true }, + ); + return res.apps ?? []; +}