From fe2bf829bc0daec3ad8161979c4319a90d0d1f0f Mon Sep 17 00:00:00 2001 From: hellno Date: Thu, 23 Apr 2026 19:10:52 +0200 Subject: [PATCH 01/18] db: signer hardening (RLS, authorization RPCs, grants) Adds three migrations for server-side signer security: - 1_rls_and_policies: search_path hardening, WITH CHECK tightening on accounts/accounts_to_channel/customers/profile, draft.created_by_user_id column + policy, auto-interaction ownership trigger, drop user INSERT on signing_audit_log, FORCE RLS on sensitive tables. - 2_rpcs: authorize_draft_publish / authorize_auto_interaction SECURITY DEFINER RPCs that enforce ownership invariants for cron paths. - 3_grants_and_audit: revoke decrypted view grants from anon/authenticated, add actor_user_id + source columns to signing_audit_log, revoke vault USAGE from authenticated, narrow pgsodium_keyiduser to service_role. Per-action JWT minting replaces cron service-role bypass (see signer commit). decrypted_account(uuid) EXECUTE deliberately retained for authenticated (RPC self-filters on auth.uid(); revoking would break the signer path). Phase 6 operator actions (KEK wrapping, service-role rotation, cron secret vault migration) are documented in .context/phase-6-operator-actions.md and left as inline comments at the bottom of migration 3. --- ...0001_signer_hardening_rls_and_policies.sql | 146 +++++++++++++++++ .../20260423000002_signer_hardening_rpcs.sql | 80 +++++++++ ...0003_signer_hardening_grants_and_audit.sql | 152 ++++++++++++++++++ 3 files changed, 378 insertions(+) create mode 100644 supabase/migrations/20260423000001_signer_hardening_rls_and_policies.sql create mode 100644 supabase/migrations/20260423000002_signer_hardening_rpcs.sql create mode 100644 supabase/migrations/20260423000003_signer_hardening_grants_and_audit.sql diff --git a/supabase/migrations/20260423000001_signer_hardening_rls_and_policies.sql b/supabase/migrations/20260423000001_signer_hardening_rls_and_policies.sql new file mode 100644 index 00000000..efc802a2 --- /dev/null +++ b/supabase/migrations/20260423000001_signer_hardening_rls_and_policies.sql @@ -0,0 +1,146 @@ +-- Signer-key hardening (Phase 0.3 + Phase 1): deterministic search_path, RLS tightening, +-- draft ownership column, auto-interaction ownership trigger, FORCE RLS on sensitive tables. +-- +-- Idempotent where reasonable. Policy names match what exists in the live schema +-- (see 20231201175719_schema_test.sql, 20240612125244_add_scheduled_casts.sql, +-- 20240722095848_add_customers.sql, 20250709155039_remote_schema.sql). + +-- ============================================================================ +-- Phase 0.3: deterministic search_path on currently-flagged functions +-- ============================================================================ + +ALTER FUNCTION public.update_modified_column() SET search_path = 'public','pg_catalog'; +ALTER FUNCTION public.sync_email_to_profile() SET search_path = 'public','pg_catalog'; +ALTER FUNCTION public.is_account_of_user(uuid, uuid) SET search_path = 'public','pg_catalog'; +ALTER FUNCTION public.trigger_process_auto_interactions() SET search_path = 'public','pg_catalog'; +ALTER FUNCTION public.accounts_encrypt_secret_private_key() SET search_path = 'public','pg_catalog'; +ALTER FUNCTION public.accounts_encrypt_secret_farcaster_api_key() SET search_path = 'public','pg_catalog'; + +-- ============================================================================ +-- Phase 1.1: tighten WITH CHECK on user-scoped tables so inserts/updates must +-- target a row the caller actually owns. Without WITH CHECK, authenticated +-- users can INSERT rows with any user_id value. +-- ============================================================================ + +ALTER POLICY "Enable access for users based on user_id" + ON public.accounts + WITH CHECK (auth.uid() = user_id); + +ALTER POLICY "Enable access to rows for users" + ON public.accounts_to_channel + WITH CHECK (public.is_account_of_user(auth.uid(), account_id)); + +ALTER POLICY "Enable access for users based on user_id" + ON public.customers + WITH CHECK (auth.uid() = user_id); + +ALTER POLICY "Enable access for users based on user_id" + ON public.profile + WITH CHECK (auth.uid() = user_id); + +-- Note: public.list already has tightened WITH CHECK (auth.uid() = user_id) +-- from migration 20250625101126_remote_schema.sql. No change needed. +-- Note: public.channel INSERT policy is intentionally permissive. Channels are a +-- shared, protocol-level namespace (Farcaster channels belong to everyone, not +-- individual herocast users) so any authenticated user may insert channel +-- metadata rows. No user-owned secret material lives on `channel`, so a loose +-- WITH CHECK carries no confidentiality impact. Tagged `lint-allow-with-check-true` +-- so the CI linter permits it going forward. +-- lint-allow-with-check-true: channel is a shared protocol-namespace table + +-- ============================================================================ +-- Phase 1.2: draft.created_by_user_id — records the user who *scheduled* the draft, +-- independent of the account it publishes from. This is the ownership anchor used by +-- the authorize_draft_publish RPC to verify drafts were created by the account owner. +-- ============================================================================ + +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM information_schema.columns + WHERE table_schema = 'public' + AND table_name = 'draft' + AND column_name = 'created_by_user_id' + ) THEN + ALTER TABLE public.draft + ADD COLUMN created_by_user_id uuid DEFAULT auth.uid() + REFERENCES auth.users(id) ON DELETE CASCADE; + END IF; +END $$; + +-- Backfill: existing drafts are assumed to have been created by the account owner. +UPDATE public.draft d +SET created_by_user_id = a.user_id +FROM public.accounts a +WHERE d.account_id = a.id + AND d.created_by_user_id IS NULL; + +ALTER TABLE public.draft ALTER COLUMN created_by_user_id SET NOT NULL; + +-- ============================================================================ +-- Phase 1.3: tighten draft policy — in addition to requiring the draft's account +-- to belong to the caller, also require the draft's creator to be the caller. +-- ============================================================================ + +ALTER POLICY "Enable access to rows for users" + ON public.draft + WITH CHECK ( + public.is_account_of_user(auth.uid(), account_id) + AND created_by_user_id = auth.uid() + ); + +-- ============================================================================ +-- Phase 1.4: auto-interaction list ownership trigger +-- Enforces at DB level that contents->>'sourceAccountId' refers to an account +-- owned by the same user who owns the list. +-- ============================================================================ + +CREATE OR REPLACE FUNCTION public.validate_auto_interaction_list() +RETURNS TRIGGER +LANGUAGE plpgsql +SET search_path = 'public','pg_catalog' +AS $$ +DECLARE + src_account_id uuid; + src_user_id uuid; +BEGIN + IF NEW.type <> 'auto_interaction' THEN + RETURN NEW; + END IF; + + src_account_id := (NEW.contents->>'sourceAccountId')::uuid; + IF src_account_id IS NULL THEN + RAISE EXCEPTION 'auto_interaction list requires contents.sourceAccountId'; + END IF; + + SELECT user_id INTO src_user_id FROM public.accounts WHERE id = src_account_id; + IF src_user_id IS DISTINCT FROM NEW.user_id THEN + RAISE EXCEPTION 'sourceAccountId must belong to list owner'; + END IF; + + RETURN NEW; +END; +$$; + +DROP TRIGGER IF EXISTS enforce_auto_interaction_ownership ON public.list; +CREATE TRIGGER enforce_auto_interaction_ownership + BEFORE INSERT OR UPDATE ON public.list + FOR EACH ROW EXECUTE FUNCTION public.validate_auto_interaction_list(); + +-- ============================================================================ +-- Phase 1.5: remove user INSERT on signing_audit_log. The signer edge function +-- writes audit rows via the service-role client (narrow, legitimate use). +-- ============================================================================ + +DROP POLICY IF EXISTS "Users can insert own audit logs" ON public.signing_audit_log; + +-- ============================================================================ +-- Phase 1.6: FORCE RLS on sensitive tables so row-owning postgres roles +-- (e.g. extensions running as table owner) cannot bypass policies. +-- ============================================================================ + +ALTER TABLE public.accounts FORCE ROW LEVEL SECURITY; +ALTER TABLE public.draft FORCE ROW LEVEL SECURITY; +ALTER TABLE public.list FORCE ROW LEVEL SECURITY; +ALTER TABLE public.signing_audit_log FORCE ROW LEVEL SECURITY; +ALTER TABLE public.signing_idempotency FORCE ROW LEVEL SECURITY; diff --git a/supabase/migrations/20260423000002_signer_hardening_rpcs.sql b/supabase/migrations/20260423000002_signer_hardening_rpcs.sql new file mode 100644 index 00000000..965720b2 --- /dev/null +++ b/supabase/migrations/20260423000002_signer_hardening_rpcs.sql @@ -0,0 +1,80 @@ +-- Signer-key hardening (Phase 2a): authorization RPCs used by cron edge functions +-- as the trust boundary for signer service calls. +-- +-- Both RPCs are SECURITY DEFINER and called by the service-role client from +-- publish-cast-from-db and process-auto-interactions. They verify ownership +-- invariants, then return the validated (owner_user_id, account/source_account_id) +-- tuple. Callers mint a short-TTL user JWT with the returned sub and present it +-- to the signer service (no more service-role bypass). + +-- ============================================================================ +-- authorize_draft_publish: verifies the draft is 'scheduled' and that its +-- created_by_user_id matches the account owner. Raises on any mismatch. +-- ============================================================================ + +CREATE OR REPLACE FUNCTION public.authorize_draft_publish(p_draft_id uuid) +RETURNS TABLE(owner_user_id uuid, account_id uuid) +LANGUAGE plpgsql +SECURITY DEFINER +SET search_path = 'public','pg_catalog' +AS $$ +BEGIN + RETURN QUERY + SELECT a.user_id, d.account_id + FROM public.draft d + JOIN public.accounts a ON a.id = d.account_id + WHERE d.id = p_draft_id + AND d.status = 'scheduled' + AND d.created_by_user_id = a.user_id; + + IF NOT FOUND THEN + RAISE EXCEPTION 'unauthorized draft publish' USING ERRCODE = 'P0001'; + END IF; +END; +$$; + +ALTER FUNCTION public.authorize_draft_publish(uuid) OWNER TO postgres; +REVOKE ALL ON FUNCTION public.authorize_draft_publish(uuid) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION public.authorize_draft_publish(uuid) TO service_role; + +-- ============================================================================ +-- authorize_auto_interaction: verifies the list is of type 'auto_interaction', +-- has a sourceAccountId, and that the source account belongs to the list owner. +-- Returns the owner's user_id and the validated source account id. +-- ============================================================================ + +CREATE OR REPLACE FUNCTION public.authorize_auto_interaction(p_list_id uuid) +RETURNS TABLE(owner_user_id uuid, source_account_id uuid) +LANGUAGE plpgsql +SECURITY DEFINER +SET search_path = 'public','pg_catalog' +AS $$ +DECLARE + _list_user uuid; + _src uuid; + _src_user uuid; +BEGIN + SELECT l.user_id, (l.contents->>'sourceAccountId')::uuid + INTO _list_user, _src + FROM public.list l + WHERE l.id = p_list_id + AND l.type = 'auto_interaction'; + + IF _list_user IS NULL OR _src IS NULL THEN + RAISE EXCEPTION 'list not found or sourceAccountId missing' USING ERRCODE = 'P0001'; + END IF; + + SELECT user_id INTO _src_user FROM public.accounts WHERE id = _src; + IF _src_user IS DISTINCT FROM _list_user THEN + RAISE EXCEPTION 'sourceAccountId ownership mismatch' USING ERRCODE = 'P0001'; + END IF; + + owner_user_id := _list_user; + source_account_id := _src; + RETURN NEXT; +END; +$$; + +ALTER FUNCTION public.authorize_auto_interaction(uuid) OWNER TO postgres; +REVOKE ALL ON FUNCTION public.authorize_auto_interaction(uuid) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION public.authorize_auto_interaction(uuid) TO service_role; diff --git a/supabase/migrations/20260423000003_signer_hardening_grants_and_audit.sql b/supabase/migrations/20260423000003_signer_hardening_grants_and_audit.sql new file mode 100644 index 00000000..d20a5e44 --- /dev/null +++ b/supabase/migrations/20260423000003_signer_hardening_grants_and_audit.sql @@ -0,0 +1,152 @@ +-- Signer-key hardening (Phases 3, 4.1, 6.3): grant cleanup, audit log expansion, +-- vault lockdown, and pgsodium decrypt narrowing. +-- +-- All revokes use DO blocks where the target role may not be granted, so the +-- migration is safe to re-run and survives variation between Supabase projects. + +-- ============================================================================ +-- Phase 3.1: revoke direct view grants from anon/authenticated. The signer now +-- calls the decrypted_account RPC (next block); no other caller should read +-- the decrypted views directly. +-- ============================================================================ + +REVOKE ALL ON public.decrypted_accounts FROM anon, authenticated; +REVOKE ALL ON public.decrypted_dm_accounts FROM anon, authenticated; + +-- ============================================================================ +-- Phase 3.2: decrypted_account RPC EXECUTE is deliberately retained for +-- `authenticated`. +-- +-- The RPC is SECURITY DEFINER and self-filters on `auth.uid() = user_id`, +-- which is the intended user-facing decryption boundary. Revoking EXECUTE +-- from `authenticated` would break the signer's user-JWT client path +-- (the signer calls the RPC via a Supabase client whose role is +-- `authenticated`, carrying either a real user JWT or a cron-minted short- +-- lived JWT). The view revokes above (Phase 3.1) are the surface we locked +-- down because the view has no built-in filter; the RPC stays open to +-- authenticated callers by design. Application-level defense-in-depth lives +-- in `farcaster-signer/lib/accounts.ts`, which double-checks the returned +-- row's `user_id` against the caller's JWT `sub`. +-- +-- The GRANT below is redundant with service_role's default privileges but +-- is harmless and makes the intent explicit. +-- ============================================================================ + +GRANT EXECUTE ON FUNCTION public.decrypted_account(uuid) TO service_role; + +-- ============================================================================ +-- Phase 3.3: audit log expansion. Adds actor_user_id (who initiated the action, +-- may differ from signing_audit_log.user_id which is the account owner) and +-- source (user action vs. cron job). +-- ============================================================================ + +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM information_schema.columns + WHERE table_schema = 'public' + AND table_name = 'signing_audit_log' + AND column_name = 'actor_user_id' + ) THEN + ALTER TABLE public.signing_audit_log + ADD COLUMN actor_user_id uuid REFERENCES auth.users(id) ON DELETE SET NULL; + END IF; + + IF NOT EXISTS ( + SELECT 1 FROM information_schema.columns + WHERE table_schema = 'public' + AND table_name = 'signing_audit_log' + AND column_name = 'source' + ) THEN + ALTER TABLE public.signing_audit_log + ADD COLUMN source text + CHECK (source IN ('user','cron:publish','cron:auto-interaction','system')) + DEFAULT 'user'; + END IF; +END $$; + +-- ============================================================================ +-- Phase 4.1: vault lockdown. Do this BEFORE any secret is placed in vault.secrets. +-- The vault schema should only be readable by service_role / postgres. +-- ============================================================================ + +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM information_schema.schemata WHERE schema_name = 'vault') THEN + EXECUTE 'REVOKE USAGE ON SCHEMA vault FROM authenticated'; + EXECUTE 'REVOKE ALL ON vault.secrets FROM authenticated'; + EXECUTE 'REVOKE ALL ON vault.decrypted_secrets FROM authenticated'; + END IF; +END $$; + +-- ============================================================================ +-- Phase 6.3: narrow pgsodium decrypt capability to service_role only. +-- Guarded because the role may not be granted to authenticated in every project. +-- ============================================================================ + +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'pgsodium_keyiduser') THEN + BEGIN + EXECUTE 'REVOKE pgsodium_keyiduser FROM authenticated'; + EXCEPTION WHEN OTHERS THEN + -- Not granted to authenticated in this project — safe to ignore. + NULL; + END; + END IF; +END $$; + +-- ============================================================================ +-- OPERATOR-ACTION SQL (NOT executed by this migration) +-- ============================================================================ +-- The steps below require Supabase-dashboard or manual operator actions and are +-- documented here so the operator can apply them in a controlled way. +-- See `.context/phase-6-operator-actions.md` for full context. +-- +-- ---------------------------------------------------------------------------- +-- Phase 4.2: rotate service_role, migrate cron secrets to vault +-- ---------------------------------------------------------------------------- +-- 1) In Supabase dashboard: Project Settings -> API -> Rotate service_role. +-- Note the new JWT value. +-- +-- 2) Store the rotated secret in vault (requires postgres/service_role connection): +-- +-- INSERT INTO vault.secrets (name, secret) +-- VALUES ('signer_cron_token', ''); +-- +-- 3) For each cron job that currently embeds the old service_role JWT in its +-- command (jobids observed: 3, 5, 6, 9, 11), rewrite the command to read +-- the token from vault at call time. Example pattern: +-- +-- UPDATE cron.job +-- SET command = $cron$ +-- SELECT net.http_post( +-- url := 'https://.supabase.co/functions/v1/publish-cast-from-db', +-- headers := jsonb_build_object( +-- 'Authorization', 'Bearer ' || (SELECT decrypted_secret FROM vault.decrypted_secrets WHERE name = 'signer_cron_token'), +-- 'Content-Type', 'application/json' +-- ), +-- body := '{}'::jsonb +-- ); +-- $cron$ +-- WHERE jobid = 3; +-- +-- 4) Revoke the old JWT: Supabase dashboard -> rotate again if the previous +-- value leaked, otherwise the earlier rotation already invalidates it. +-- +-- ---------------------------------------------------------------------------- +-- Phase 6.1: wrap pgsodium KEK under platform root key +-- ---------------------------------------------------------------------------- +-- The pgsodium key `dcd0dca7-c03a-40c5-b348-fefb87be2845` is currently stored in +-- the database. Wrap it under the Supabase platform root key so an admin with +-- DB-only access cannot exfiltrate the KEK. +-- +-- Procedure is Supabase-specific; see: +-- https://supabase.com/docs/guides/database/vault +-- for the current instructions. +-- +-- ---------------------------------------------------------------------------- +-- Phase 6.2 (housekeeping): retire the unnamed pgsodium key `95866e70…` +-- ---------------------------------------------------------------------------- +-- Audit all code and schema references first. If none remain, mark the key +-- inactive via the Supabase dashboard or pgsodium key-management SQL. From f6f265e8ef0e53351e8d3516d5e848c6fbc5b9f1 Mon Sep 17 00:00:00 2001 From: hellno Date: Thu, 23 Apr 2026 19:11:06 +0200 Subject: [PATCH 02/18] signer: remove service-role bypass, use per-action JWTs Replaces the signer service-role bypass (any holder of service_role could sign as any account) with a per-action JWT flow: - farcaster-signer/lib/auth.ts: delete the service_role branch. Signer now always validates via supabase.auth.getUser(). Extracts a validated `source` claim from the JWT for audit attribution. - farcaster-signer/lib/accounts.ts: userId is required; reads via decrypted_account(uuid) RPC; defense-in-depth check that account.user_id matches the JWT sub. - farcaster-signer/lib/audit.ts: constructs a service-role client internally for signing_audit_log INSERTs (the user-INSERT policy was dropped for log integrity). Writes actor_user_id + source. - publish-cast-from-db / process-auto-interactions: call the new authorize_draft_publish / authorize_auto_interaction RPCs before signing; mint a 60s-TTL HS256 JWT locally using SUPABASE_JWT_SECRET (sub = validated owner_user_id); pass it to the signer instead of the service_role key. - _shared/redact.ts: redactHeaders / redactSecrets helpers applied to error-log dumps so axios errors no longer leak Authorization / Neynar api_key headers into Sentry. Requires the three db: migrations to have applied first so the authorize_* RPCs exist and the signing_audit_log schema includes actor_user_id / source. Deploy atomically with migrations. --- supabase/functions/_shared/redact.ts | 36 ++++++ .../farcaster-signer/handlers/cast.ts | 14 +- .../farcaster-signer/handlers/follow.ts | 14 +- .../farcaster-signer/handlers/reaction.ts | 14 +- .../farcaster-signer/handlers/user-data.ts | 7 +- .../farcaster-signer/lib/accounts.ts | 40 +++--- .../functions/farcaster-signer/lib/audit.ts | 60 ++++++++- .../functions/farcaster-signer/lib/auth.ts | 43 +++++-- .../functions/farcaster-signer/lib/types.ts | 11 +- .../process-auto-interactions/index.ts | 120 +++++++++++++++--- .../functions/publish-cast-from-db/index.ts | 105 +++++++++++++-- 11 files changed, 396 insertions(+), 68 deletions(-) create mode 100644 supabase/functions/_shared/redact.ts diff --git a/supabase/functions/_shared/redact.ts b/supabase/functions/_shared/redact.ts new file mode 100644 index 00000000..f5819eab --- /dev/null +++ b/supabase/functions/_shared/redact.ts @@ -0,0 +1,36 @@ +/** + * Log-scrubbing helpers shared across edge functions. + * + * `redactHeaders`: returns a shallow copy of a header map with any key outside + * a small safelist replaced with `[REDACTED]`. Use when dumping `error.config.headers`, + * `error.response.headers`, or any raw HTTP header bag into logs. + * + * `redactSecrets`: regex-replaces tokens that look like Supabase JWTs (starting with + * `eyJ...`) or service-role keys (starting with `sb_...`) in an already-serialized + * string. Use on `JSON.stringify(error.config.data)` / `error.response.data` before + * logging so that accidental secret leakage is scrubbed. + */ + +export function redactHeaders( + headers: Record | undefined | null, +): Record { + if (!headers) return {}; + const SAFE = new Set([ + 'content-type', + 'user-agent', + 'accept', + 'accept-encoding', + ]); + const out: Record = {}; + for (const [key, value] of Object.entries(headers)) { + out[key] = SAFE.has(key.toLowerCase()) ? value : '[REDACTED]'; + } + return out; +} + +export function redactSecrets(serialized: string): string { + if (!serialized) return serialized; + return serialized + .replace(/eyJ[A-Za-z0-9_\-.]{40,}/g, '[REDACTED_JWT]') + .replace(/sb_[a-z0-9_]{30,}/g, '[REDACTED_SB]'); +} diff --git a/supabase/functions/farcaster-signer/handlers/cast.ts b/supabase/functions/farcaster-signer/handlers/cast.ts index 43737371..d84b2281 100644 --- a/supabase/functions/farcaster-signer/handlers/cast.ts +++ b/supabase/functions/farcaster-signer/handlers/cast.ts @@ -71,7 +71,8 @@ function successResponse(data: { success: true; hash: string; fid: number }): Re * 9. Return success response */ export async function handlePostCast(req: Request, authResult: AuthResult): Promise { - const { userId: authUserId, supabaseClient } = authResult; + const { userId: authUserId, supabaseClient, source } = authResult; + const auditSource = source ?? 'user'; let accountId: string | undefined; let auditUserId: string | undefined = authUserId; @@ -184,6 +185,8 @@ export async function handlePostCast(req: Request, authResult: AuthResult): Prom supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'cast', success: true, }); @@ -212,6 +215,8 @@ export async function handlePostCast(req: Request, authResult: AuthResult): Prom supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'cast', success: false, errorCode, @@ -249,7 +254,8 @@ export async function handlePostCast(req: Request, authResult: AuthResult): Prom * 6. Return success response */ export async function handleDeleteCast(req: Request, authResult: AuthResult): Promise { - const { userId: authUserId, supabaseClient } = authResult; + const { userId: authUserId, supabaseClient, source } = authResult; + const auditSource = source ?? 'user'; let accountId: string | undefined; let auditUserId: string | undefined = authUserId; @@ -283,6 +289,8 @@ export async function handleDeleteCast(req: Request, authResult: AuthResult): Pr supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'remove_cast', success: true, }); @@ -311,6 +319,8 @@ export async function handleDeleteCast(req: Request, authResult: AuthResult): Pr supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'remove_cast', success: false, errorCode, diff --git a/supabase/functions/farcaster-signer/handlers/follow.ts b/supabase/functions/farcaster-signer/handlers/follow.ts index 85e33819..c0ce4dfa 100644 --- a/supabase/functions/farcaster-signer/handlers/follow.ts +++ b/supabase/functions/farcaster-signer/handlers/follow.ts @@ -14,7 +14,8 @@ import { FollowRequestSchema, validateRequest } from '../lib/validate.ts'; * Handle POST /follow - Follow a user */ export async function handlePostFollow(req: Request, authResult: AuthResult): Promise { - const { userId: authUserId, supabaseClient } = authResult; + const { userId: authUserId, supabaseClient, source } = authResult; + const auditSource = source ?? 'user'; let accountId: string | undefined; let auditUserId: string | undefined = authUserId; @@ -48,6 +49,8 @@ export async function handlePostFollow(req: Request, authResult: AuthResult): Pr supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'follow', success: true, }); @@ -74,6 +77,8 @@ export async function handlePostFollow(req: Request, authResult: AuthResult): Pr supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'follow', success: false, errorCode: error instanceof Error ? error.message : 'Unknown error', @@ -88,7 +93,8 @@ export async function handlePostFollow(req: Request, authResult: AuthResult): Pr * Handle DELETE /follow - Unfollow a user */ export async function handleDeleteFollow(req: Request, authResult: AuthResult): Promise { - const { userId: authUserId, supabaseClient } = authResult; + const { userId: authUserId, supabaseClient, source } = authResult; + const auditSource = source ?? 'user'; let accountId: string | undefined; let auditUserId: string | undefined = authUserId; @@ -122,6 +128,8 @@ export async function handleDeleteFollow(req: Request, authResult: AuthResult): supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'unfollow', success: true, }); @@ -148,6 +156,8 @@ export async function handleDeleteFollow(req: Request, authResult: AuthResult): supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'unfollow', success: false, errorCode: error instanceof Error ? error.message : 'Unknown error', diff --git a/supabase/functions/farcaster-signer/handlers/reaction.ts b/supabase/functions/farcaster-signer/handlers/reaction.ts index 5bf20d4d..13c67440 100644 --- a/supabase/functions/farcaster-signer/handlers/reaction.ts +++ b/supabase/functions/farcaster-signer/handlers/reaction.ts @@ -14,7 +14,8 @@ import { DeleteReactionRequestSchema, ReactionRequestSchema, validateRequest } f * Handle POST /reaction - Add a reaction (like or recast) */ export async function handlePostReaction(req: Request, authResult: AuthResult): Promise { - const { userId: authUserId, supabaseClient } = authResult; + const { userId: authUserId, supabaseClient, source } = authResult; + const auditSource = source ?? 'user'; let accountId: string | undefined; let auditUserId: string | undefined = authUserId; @@ -50,6 +51,8 @@ export async function handlePostReaction(req: Request, authResult: AuthResult): supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: reactionType, success: true, }); @@ -76,6 +79,8 @@ export async function handlePostReaction(req: Request, authResult: AuthResult): supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'reaction', success: false, errorCode: error instanceof Error ? error.message : 'Unknown error', @@ -90,7 +95,8 @@ export async function handlePostReaction(req: Request, authResult: AuthResult): * Handle DELETE /reaction - Remove a reaction (like or recast) */ export async function handleDeleteReaction(req: Request, authResult: AuthResult): Promise { - const { userId: authUserId, supabaseClient } = authResult; + const { userId: authUserId, supabaseClient, source } = authResult; + const auditSource = source ?? 'user'; let accountId: string | undefined; let auditUserId: string | undefined = authUserId; @@ -126,6 +132,8 @@ export async function handleDeleteReaction(req: Request, authResult: AuthResult) supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: `remove_${reactionType}`, success: true, }); @@ -152,6 +160,8 @@ export async function handleDeleteReaction(req: Request, authResult: AuthResult) supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'remove_reaction', success: false, errorCode: error instanceof Error ? error.message : 'Unknown error', diff --git a/supabase/functions/farcaster-signer/handlers/user-data.ts b/supabase/functions/farcaster-signer/handlers/user-data.ts index 95ad18ba..20e09e9b 100644 --- a/supabase/functions/farcaster-signer/handlers/user-data.ts +++ b/supabase/functions/farcaster-signer/handlers/user-data.ts @@ -14,7 +14,8 @@ import { UserDataRequestSchema, validateRequest } from '../lib/validate.ts'; * Handle POST /user-data - Update user data in Farcaster */ export async function handlePostUserData(req: Request, authResult: AuthResult): Promise { - const { userId: authUserId, supabaseClient } = authResult; + const { userId: authUserId, supabaseClient, source } = authResult; + const auditSource = source ?? 'user'; let accountId: string | undefined; let auditUserId: string | undefined = authUserId; @@ -40,6 +41,8 @@ export async function handlePostUserData(req: Request, authResult: AuthResult): supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'user_data', success: true, }); @@ -65,6 +68,8 @@ export async function handlePostUserData(req: Request, authResult: AuthResult): supabaseClient, accountId, userId: auditUserId, + actorUserId: authUserId, + source: auditSource, action: 'user_data', success: false, errorCode: error instanceof Error ? error.message : 'Unknown error', diff --git a/supabase/functions/farcaster-signer/lib/accounts.ts b/supabase/functions/farcaster-signer/lib/accounts.ts index 2345ac05..8a41c57a 100644 --- a/supabase/functions/farcaster-signer/lib/accounts.ts +++ b/supabase/functions/farcaster-signer/lib/accounts.ts @@ -5,32 +5,34 @@ import type { SigningAccount } from './types.ts'; /** * Retrieves an account for signing operations. * - * Queries the decrypted_accounts view which automatically handles - * private key decryption. RLS is enforced via the authenticated - * Supabase client (user's JWT), ensuring users can only access - * their own accounts. + * Invokes the `decrypted_account` SECURITY DEFINER RPC, which self-filters by + * `auth.uid()` and returns the decrypted private key only when the authenticated + * user owns the account. The per-caller JWT (either a real user JWT or a cron- + * minted short-lived JWT whose `sub` is the validated owner) is the trust + * boundary — no extra `.eq('user_id', userId)` filter is needed. * * @param supabaseClient - Authenticated Supabase client with user's JWT * @param accountId - UUID of the account to retrieve - * @param userId - User ID for additional verification (optional for service-role calls) + * @param userId - Authenticated user ID. Used for defense-in-depth: after the RPC + * returns, we assert `account.user_id === userId` so that any + * future loosening of the RPC's `auth.uid()` filter still fails + * closed here at the application layer. * @returns SigningAccount containing fid, privateKey, and userId * @throws SignerServiceError if account not found or not active */ export async function getAccountForSigning( supabaseClient: SupabaseClient, accountId: string, - userId?: string + userId: string ): Promise { - let query = supabaseClient - .from('decrypted_accounts') - .select('id, platform_account_id, decrypted_private_key, status, user_id') - .eq('id', accountId); + // The RPC is SECURITY DEFINER and self-filters on `auth.uid() = user_id`. + // That is the canonical trust boundary. We still re-check ownership below + // as defense-in-depth: if the RPC ever drifted (e.g. loosened filter), the + // caller's JWT `sub` must still match the row's `user_id`. - if (userId) { - query = query.eq('user_id', userId); - } - - const { data: accounts, error } = await query; + const { data: accounts, error } = await supabaseClient.rpc('decrypted_account', { + account_id: accountId, + }); if (error) { console.error('Error fetching account:', error); @@ -65,6 +67,14 @@ export async function getAccountForSigning( throw new SignerServiceError(ErrorCodes.ACCOUNT_NOT_FOUND, 'Account has no user', 404); } + // Defense-in-depth: the RPC should already self-filter by `auth.uid()`, but + // verify the returned row's `user_id` matches the caller's JWT `sub` so a + // regression in the RPC filter can't leak a key across tenants. Use the + // ACCOUNT_NOT_FOUND code to avoid leaking account existence. + if (account.user_id !== userId) { + throw new SignerServiceError(ErrorCodes.ACCOUNT_NOT_FOUND, 'Account not found', 404); + } + return { fid: Number(account.platform_account_id), privateKey: account.decrypted_private_key, diff --git a/supabase/functions/farcaster-signer/lib/audit.ts b/supabase/functions/farcaster-signer/lib/audit.ts index 0fda50a9..396920f7 100644 --- a/supabase/functions/farcaster-signer/lib/audit.ts +++ b/supabase/functions/farcaster-signer/lib/audit.ts @@ -1,38 +1,84 @@ -import type { SupabaseClient } from '@supabase/supabase-js'; +import { createClient, type SupabaseClient } from '@supabase/supabase-js'; /** * Audit logging for signing operations. * Best-effort logging - failures don't break signing operations. + * + * Writes run as service_role (not as the caller's user JWT) because + * `signing_audit_log` has `FORCE ROW LEVEL SECURITY` and no user-INSERT + * policy: the audit table is intentionally append-only from the signer. + * A dedicated privileged client is constructed here so handlers keep using + * the user-scoped client for everything else. */ export interface AuditLogParams { supabaseClient: SupabaseClient; accountId: string; + /** + * The account owner's user id (matches `accounts.user_id`). Kept as-is for + * backward compatibility with existing audit rows. + */ userId: string; + /** + * The actor who initiated the action (from the caller JWT `sub`). For user + * traffic this equals `userId`; for cron traffic it's the owner whose behalf + * the cron is acting on — the value is still meaningful for audit since the + * `source` column disambiguates. + */ + actorUserId: string; + /** + * Request origin tag: 'user' | 'cron:publish' | 'cron:auto-interaction' | 'system'. + */ + source: string; action: string; success: boolean; errorCode?: string; } +let cachedPrivilegedClient: SupabaseClient | null = null; + +/** + * Lazily construct a service-role Supabase client. Cached across calls within + * the same edge-function invocation to avoid re-parsing env vars per log row. + */ +function getPrivilegedClient(): SupabaseClient { + if (cachedPrivilegedClient) return cachedPrivilegedClient; + const url = Deno.env.get('SUPABASE_URL') || Deno.env.get('API_URL') || Deno.env.get('SUPABASE_API_URL'); + const serviceRoleKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY') || Deno.env.get('SERVICE_ROLE_KEY'); + if (!url || !serviceRoleKey) { + throw new Error('Missing Supabase URL or service_role key for audit writes'); + } + cachedPrivilegedClient = createClient(url, serviceRoleKey, { + auth: { persistSession: false, autoRefreshToken: false }, + }); + return cachedPrivilegedClient; +} + /** * Log a signing action to the audit log table. * This is a best-effort operation - it should never throw or break the signing flow. * * @param params - Audit log parameters - * @param params.supabaseClient - Supabase client instance + * @param params.supabaseClient - Supabase client instance (unused for the + * INSERT itself; kept in the signature so handlers don't need refactoring). * @param params.accountId - The account ID that was used for signing - * @param params.userId - The user ID who initiated the action + * @param params.userId - The account owner's user id + * @param params.actorUserId - The caller who initiated the action (JWT `sub`) + * @param params.source - Request origin tag (user/cron:*) * @param params.action - The action type (e.g., 'cast', 'like', 'recast', 'follow', 'unfollow', 'remove_cast') * @param params.success - Whether the operation succeeded * @param params.errorCode - Error code if the operation failed (optional) */ export async function logSigningAction(params: AuditLogParams): Promise { - const { supabaseClient, accountId, userId, action, success, errorCode } = params; + const { accountId, userId, actorUserId, source, action, success, errorCode } = params; try { - const { error } = await supabaseClient.from('signing_audit_log').insert({ + const client = getPrivilegedClient(); + const { error } = await client.from('signing_audit_log').insert({ account_id: accountId, user_id: userId, + actor_user_id: actorUserId, + source, action, success, error_code: errorCode ?? null, @@ -43,6 +89,8 @@ export async function logSigningAction(params: AuditLogParams): Promise { console.error('[audit] Failed to log signing action:', error.message, { accountId, userId, + actorUserId, + source, action, success, errorCode, @@ -53,6 +101,8 @@ export async function logSigningAction(params: AuditLogParams): Promise { console.error('[audit] Unexpected error logging signing action:', err, { accountId, userId, + actorUserId, + source, action, success, errorCode, diff --git a/supabase/functions/farcaster-signer/lib/auth.ts b/supabase/functions/farcaster-signer/lib/auth.ts index e2b0d615..8480912b 100644 --- a/supabase/functions/farcaster-signer/lib/auth.ts +++ b/supabase/functions/farcaster-signer/lib/auth.ts @@ -2,14 +2,38 @@ import { createClient } from '@supabase/supabase-js'; import { ErrorCodes, SignerServiceError } from './errors.ts'; import type { AuthResult } from './types.ts'; +const VALID_SOURCES = /^(user|cron:publish|cron:auto-interaction|system)$/; + +/** + * Best-effort decode of a JWT's payload segment. Returns null if the token is + * malformed — this helper is advisory only (already-validated tokens only) and + * should never throw. + */ +function decodeJwtPayload(token: string): Record | null { + try { + const parts = token.split('.'); + if (parts.length < 2) return null; + // base64url -> base64 + const b64 = parts[1].replace(/-/g, '+').replace(/_/g, '/'); + const padded = b64 + '='.repeat((4 - (b64.length % 4)) % 4); + const json = atob(padded); + return JSON.parse(json); + } catch { + return null; + } +} + /** * Authenticates a request using the Authorization header. * * Creates a Supabase client with the user's JWT token (not service role) * to ensure RLS policies are enforced for all subsequent queries. * + * Cron callers mint a short-lived HS256 JWT with `sub = ` and + * present it here — this path validates it identically to a human-user JWT. + * * @param authHeader - The Authorization header value (e.g., "Bearer ") - * @returns AuthResult containing userId (if user-authenticated) and Supabase client + * @returns AuthResult containing userId and Supabase client * @throws SignerServiceError if authentication fails */ export async function authenticateRequest(authHeader: string | null): Promise { @@ -25,21 +49,11 @@ export async function authenticateRequest(authHeader: string | null): Promise { return Deno.env.get('SUPABASE_SERVICE_ROLE_KEY') || Deno.env.get('SERVICE_ROLE_KEY'); }; -async function callSignerService(path: string, body: Record): Promise<{ hash: string }> { +/** + * Mint a short-lived HS256 JWT that the signer edge function validates via + * `supabase.auth.getUser()`. Matches Supabase's expected user JWT shape so that + * downstream RLS evaluates `auth.uid() = sub`. `scope` is audit metadata + * (e.g. `{ account_id, list_id }`); `source` tags the cron origin. + * + * NOTE: Requires SUPABASE_JWT_SECRET (symmetric HS256). If the project is + * migrated to asymmetric (JWKs) JWT signing, this path must switch to signing + * with the project's private key and the signer must verify via JWKs. Operator: + * confirm via Dashboard -> Project Settings -> JWT Signing before deploy. + */ +async function mintUserJwt( + sub: string, + scope: Record, + source: string +): Promise { + const secret = Deno.env.get('SUPABASE_JWT_SECRET'); + if (!secret) { + throw new Error('SUPABASE_JWT_SECRET missing'); + } + const key = await crypto.subtle.importKey( + 'raw', + new TextEncoder().encode(secret), + { name: 'HMAC', hash: 'SHA-256' }, + false, + ['sign'] + ); + return await create( + { alg: 'HS256', typ: 'JWT' }, + { + sub, + role: 'authenticated', + aud: 'authenticated', + source, + scope, + iat: getNumericDate(0), + exp: getNumericDate(60), + }, + key + ); +} + +async function callSignerService( + path: string, + body: Record, + userJwt: string +): Promise<{ hash: string }> { const supabaseUrl = getSupabaseUrl(); const serviceRoleKey = getServiceRoleKey(); @@ -37,7 +85,7 @@ async function callSignerService(path: string, body: Record): P method: 'POST', headers: { 'Content-Type': 'application/json', - Authorization: `Bearer ${serviceRoleKey}`, + Authorization: `Bearer ${userJwt}`, apikey: serviceRoleKey, }, body: JSON.stringify(body), @@ -53,6 +101,16 @@ async function callSignerService(path: string, body: Record): P if (!response.ok || !data || data.success === false) { const errorMessage = data?.error?.message || `Signer service failed (${response.status})`; const errorCode = data?.error?.code; + if (response && typeof (response as Response).headers !== 'undefined') { + const headerBag: Record = {}; + (response as Response).headers.forEach((v, k) => { + headerBag[k] = v; + }); + console.error('Signer service response headers:', JSON.stringify(redactHeaders(headerBag), null, 2)); + } + if (data) { + console.error('Signer service response data:', redactSecrets(JSON.stringify(data, null, 2))); + } throw new Error(errorCode ? `${errorCode}: ${errorMessage}` : errorMessage); } @@ -131,13 +189,37 @@ async function processAutoInteractionList(supabase: any, list: any) { return; } - // Note: We'll get the account details from decrypted_accounts view below + // Server-side authorization: SECURITY DEFINER RPC validates list.type='auto_interaction' + // and `accounts.user_id = list.user_id` for the sourceAccountId, returning the + // authoritative owner + source account id. Prefer this over content.sourceAccountId + // (which a compromised writer could lie about). + const { data: authRows, error: authErr } = await supabase.rpc('authorize_auto_interaction', { + p_list_id: list.id, + }); + + if (authErr || !authRows || authRows.length === 0) { + console.warn(`[list ${list.id}] authorization failed, skipping`, authErr); + return; + } + + const { owner_user_id, source_account_id } = authRows[0] as { + owner_user_id: string; + source_account_id: string; + }; + + // Single 60-second JWT covers every signer call for this list run. + const userJwt = await mintUserJwt( + owner_user_id, + { account_id: source_account_id, list_id: list.id }, + 'cron:auto-interaction' + ); - // Get decrypted account data directly from the view (like publish-cast-from-db does) + // Get decrypted account data directly from the view (like publish-cast-from-db does). + // Use source_account_id from the RPC — the validated id — instead of content.sourceAccountId. const { data: accounts, error: decryptError } = await supabase .from('decrypted_accounts') .select('id, platform_account_id') - .eq('id', content.sourceAccountId); + .eq('id', source_account_id); if (decryptError || !accounts || accounts.length === 0) { throw new Error(`Failed to get decrypted account: ${decryptError?.message || 'Account not found'}`); @@ -147,7 +229,7 @@ async function processAutoInteractionList(supabase: any, list: any) { console.log(`[List ${list.id}] Decrypted account:`, { hasAccount: !!account, - accountId: content.sourceAccountId, + accountId: source_account_id, platformAccountId: account.platform_account_id, }); @@ -217,18 +299,18 @@ async function processAutoInteractionList(supabase: any, list: any) { isReply: !!cast.parent_hash, }); - // Perform the actions + // Perform the actions — use validated source_account_id and the minted JWT. const actions = []; if (content.actionType === 'like' || content.actionType === 'both') { actions.push({ type: 'like', - action: () => submitReaction('like', cast, content.sourceAccountId), + action: () => submitReaction('like', cast, source_account_id, userJwt), }); } if (content.actionType === 'recast' || content.actionType === 'both') { actions.push({ type: 'recast', - action: () => submitReaction('recast', cast, content.sourceAccountId), + action: () => submitReaction('recast', cast, source_account_id, userJwt), }); } @@ -513,19 +595,23 @@ async function fetchAllRecentCasts( return allCasts; } -async function submitReaction(type: 'like' | 'recast', cast: any, accountId: string) { +async function submitReaction(type: 'like' | 'recast', cast: any, accountId: string, userJwt: string) { if (!accountId) { throw new Error('Account ID is required to submit reactions'); } - const response = await callSignerService('/reaction', { - account_id: accountId, - type, - target: { - fid: cast.author.fid, - hash: cast.hash, + const response = await callSignerService( + '/reaction', + { + account_id: accountId, + type, + target: { + fid: cast.author.fid, + hash: cast.hash, + }, }, - }); + userJwt + ); console.log(`Successfully submitted ${type} for cast ${cast.hash}. Hash: ${response.hash}`); } diff --git a/supabase/functions/publish-cast-from-db/index.ts b/supabase/functions/publish-cast-from-db/index.ts index 7c5a67e3..275e2a9a 100644 --- a/supabase/functions/publish-cast-from-db/index.ts +++ b/supabase/functions/publish-cast-from-db/index.ts @@ -2,6 +2,8 @@ import * as Sentry from 'https://deno.land/x/sentry/index.mjs'; import { HubRestAPIClient } from 'npm:@standard-crypto/farcaster-js-hub-rest'; import { createClient } from 'npm:@supabase/supabase-js@2'; import axios from 'npm:axios'; +import { create, getNumericDate } from 'https://deno.land/x/djwt@v3.0.2/mod.ts'; +import { redactHeaders, redactSecrets } from '../_shared/redact.ts'; Sentry.init({ dsn: Deno.env.get('SENTRY_DSN'), @@ -132,7 +134,53 @@ function buildSignerPayload(draftData: any) { return payload; } -async function callSignerService(path: string, body: Record): Promise<{ hash: string }> { +/** + * Mint a short-lived HS256 JWT that the signer edge function will accept via + * `supabase.auth.getUser()`. Claim shape matches Supabase's expected user JWT + * so RLS evaluates `auth.uid() = sub`. `scope` is advisory metadata surfaced to + * audit logs (e.g. `{ account_id, draft_id }`); `source` tags the caller cron. + * + * NOTE: Requires SUPABASE_JWT_SECRET (symmetric HS256). If the project is + * migrated to asymmetric (JWKs) JWT signing, this path must switch to signing + * with the project's private key and the signer must verify via JWKs. Operator: + * confirm via Dashboard -> Project Settings -> JWT Signing before deploy. + */ +async function mintUserJwt( + sub: string, + scope: Record, + source: string +): Promise { + const secret = Deno.env.get('SUPABASE_JWT_SECRET'); + if (!secret) { + throw new Error('SUPABASE_JWT_SECRET missing'); + } + const key = await crypto.subtle.importKey( + 'raw', + new TextEncoder().encode(secret), + { name: 'HMAC', hash: 'SHA-256' }, + false, + ['sign'] + ); + return await create( + { alg: 'HS256', typ: 'JWT' }, + { + sub, + role: 'authenticated', + aud: 'authenticated', + source, + scope, + iat: getNumericDate(0), + exp: getNumericDate(60), + }, + key + ); +} + +async function callSignerService( + path: string, + body: Record, + userJwt: string +): Promise<{ hash: string }> { const supabaseUrl = getSupabaseUrl(); const serviceRoleKey = getServiceRoleKey(); @@ -144,7 +192,7 @@ async function callSignerService(path: string, body: Record): P method: 'POST', headers: { 'Content-Type': 'application/json', - Authorization: `Bearer ${serviceRoleKey}`, + Authorization: `Bearer ${userJwt}`, apikey: serviceRoleKey, }, body: JSON.stringify(body), @@ -198,19 +246,25 @@ async function submitViaSignerService({ accountId, draftId, draftData, + userJwt, }: { accountId: string; draftId: string; draftData: any; + userJwt: string; }): Promise { console.log('Submitting draft via signer service...'); const payload = buildSignerPayload(draftData); - const response = await callSignerService('/cast', { - account_id: accountId, - idempotency_key: draftId, - ...payload, - }); + const response = await callSignerService( + '/cast', + { + account_id: accountId, + idempotency_key: draftId, + ...payload, + }, + userJwt + ); console.log('Signer service returned hash:', response.hash); return response.hash; @@ -253,6 +307,25 @@ const publishDraft = async (supabaseClient, draftId) => { return Sentry.withScope(async (scope) => { scope.setTag('draftId', draftId); + // Server-side authorization: SECURITY DEFINER RPC enforces the + // draft.created_by_user_id == accounts.user_id invariant and draft.status='scheduled'. + // If it errors or returns zero rows, the draft is not authorized to publish. + const { data: authRows, error: authError } = await supabaseClient.rpc('authorize_draft_publish', { + p_draft_id: draftId, + }); + + if (authError || !authRows || authRows.length === 0) { + console.error('draft publish not authorized', authError); + Sentry.captureException(authError || new Error(`draft ${draftId} publish not authorized`)); + await supabaseClient.from('draft').update({ status: 'failed' }).eq('id', draftId); + return; + } + + const { owner_user_id, account_id: authorizedAccountId } = authRows[0] as { + owner_user_id: string; + account_id: string; + }; + const { data: drafts, error: getDraftError } = await supabaseClient .from('draft') .select('*, encoded_message_bytes') @@ -308,7 +381,7 @@ const publishDraft = async (supabaseClient, draftId) => { console.log('submit draft to protocol - draftId:', draftId); console.log('account fid:', Number(account.platform_account_id)); - // Check if we have pre-encoded message bytes + // Check if we have pre-encoded message bytes (fast path — no signer call needed) if (draft.encoded_message_bytes && Array.isArray(draft.encoded_message_bytes)) { console.log('Found pre-encoded message bytes, using reliable submission...'); await submitPreEncodedMessage({ @@ -318,10 +391,18 @@ const publishDraft = async (supabaseClient, draftId) => { console.log('No pre-encoded bytes found, using fallback approach...'); console.log('draft data structure:', JSON.stringify(castBody, null, 2)); + // Mint a 60-second JWT bound to the validated owner + draft scope. + const userJwt = await mintUserJwt( + owner_user_id, + { account_id: authorizedAccountId, draft_id: draftId }, + 'cron:publish' + ); + await submitViaSignerService({ accountId: draft.account_id, draftId, draftData: castBody, + userJwt, }); } @@ -343,16 +424,16 @@ const publishDraft = async (supabaseClient, draftId) => { console.error('=== HTTP RESPONSE ERROR ==='); console.error('Status:', e.response.status); console.error('Status Text:', e.response.statusText); - console.error('Response Data:', JSON.stringify(e.response.data, null, 2)); - console.error('Response Headers:', JSON.stringify(e.response.headers, null, 2)); + console.error('Response Data:', redactSecrets(JSON.stringify(e.response.data, null, 2))); + console.error('Response Headers:', JSON.stringify(redactHeaders(e.response?.headers), null, 2)); } if (e.config) { console.error('=== REQUEST CONFIG ==='); console.error('URL:', e.config.url); console.error('Method:', e.config.method); - console.error('Headers:', JSON.stringify(e.config.headers, null, 2)); - console.error('Request Data:', JSON.stringify(e.config.data, null, 2)); + console.error('Headers:', JSON.stringify(redactHeaders(e.config?.headers), null, 2)); + console.error('Request Data:', redactSecrets(JSON.stringify(e.config.data, null, 2))); } if (e.request && !e.response) { From 5b2ce6a72ab401b8e02e799114789cc4e810221d Mon Sep 17 00:00:00 2001 From: hellno Date: Thu, 23 Apr 2026 19:11:19 +0200 Subject: [PATCH 03/18] mcp+oauth+sentry: scope enforcement and log hygiene - mcp-server: per-tool assertScope gating for every MCP tool. Tokens without explicit scope claims fall back to read-only DEFAULT_SCOPES (['read:accounts','read:casts']) so approving an MCP client no longer equals full session power. 401 response no longer echoes the bearer prefix. - oauth/decision: validates that requested scopes are a subset of what was shown on the consent page; defensively passes scopes through to approveAuthorization. - api/auth/siwe: stop logging full request.headers (session cookie leak). - sentry.server.config + sentry.edge.config: beforeSend scrubber that redacts Authorization/apikey/cookie headers, JWT-shaped (eyJ...) and sb_... tokens across event.extra, event.breadcrumbs, event.exception.values[*].stacktrace.frames[*].vars, and event.contexts. Wrapped in try/catch so a scrub failure never blocks Sentry. instrumentation-client.ts is deliberately untouched per scope (no client-side changes in this hardening pass). --- app/api/auth/siwe/route.ts | 2 +- app/api/oauth/decision/route.ts | 85 +++++++++++++++++-- sentry.edge.config.ts | 91 +++++++++++++++++++++ sentry.server.config.ts | 91 +++++++++++++++++++++ supabase/functions/mcp-server/index.ts | 71 ++++++++++------ supabase/functions/mcp-server/lib/auth.ts | 63 +++++++++++++- supabase/functions/mcp-server/lib/scopes.ts | 39 +++++++++ supabase/functions/mcp-server/lib/types.ts | 1 + 8 files changed, 409 insertions(+), 34 deletions(-) create mode 100644 supabase/functions/mcp-server/lib/scopes.ts diff --git a/app/api/auth/siwe/route.ts b/app/api/auth/siwe/route.ts index 229fc85c..fbd193c5 100644 --- a/app/api/auth/siwe/route.ts +++ b/app/api/auth/siwe/route.ts @@ -3,7 +3,7 @@ import { type NextRequest, NextResponse } from 'next/server'; export async function GET(request: NextRequest) { try { const { searchParams } = new URL(request.url); - console.log('SIWE API', searchParams, request.method, request.headers); + console.log('SIWE API', searchParams, request.method); return NextResponse.redirect('/'); } catch (error) { diff --git a/app/api/oauth/decision/route.ts b/app/api/oauth/decision/route.ts index 6b0abd4b..5445fc9a 100644 --- a/app/api/oauth/decision/route.ts +++ b/app/api/oauth/decision/route.ts @@ -9,14 +9,45 @@ function buildLoginRedirect(authorizationId: string): string { return `/login?redirect=${encodeURIComponent(redirectPath)}`; } -function getOAuthHelpers(supabase: ReturnType) { +type OAuthHelpers = { + approve?: (id: string, opts?: { scopes?: string[] }) => Promise; + deny?: (id: string) => Promise; + getDetails?: (id: string) => Promise; +}; + +function getOAuthHelpers(supabase: ReturnType): OAuthHelpers { const oauth = (supabase.auth as unknown as { oauth?: Record }).oauth; + if (!oauth) return {}; return { - approve: oauth && (oauth as { approveAuthorization?: (id: string) => Promise }).approveAuthorization, - deny: oauth && (oauth as { denyAuthorization?: (id: string) => Promise }).denyAuthorization, + approve: (oauth as { approveAuthorization?: OAuthHelpers['approve'] }).approveAuthorization, + deny: (oauth as { denyAuthorization?: OAuthHelpers['deny'] }).denyAuthorization, + getDetails: (oauth as { getAuthorizationDetails?: OAuthHelpers['getDetails'] }).getAuthorizationDetails, }; } +/** + * Parse approved scopes from the form submission. Supports both + * `scopes` (JSON array string) and multi-valued `scope` form fields. + * Returns `null` if the form did not specify any — meaning caller should + * accept whatever scopes the stored authorization request carries. + */ +function parseApprovedScopes(formData: FormData): string[] | null { + const scopesField = formData.get('scopes'); + if (typeof scopesField === 'string' && scopesField.length > 0) { + try { + const parsed = JSON.parse(scopesField); + if (Array.isArray(parsed)) { + return parsed.filter((s): s is string => typeof s === 'string'); + } + } catch { + // fall through to space-separated parse + } + return scopesField.trim().split(/\s+/).filter(Boolean); + } + const multi = formData.getAll('scope').filter((v): v is string => typeof v === 'string' && v.length > 0); + return multi.length > 0 ? multi : null; +} + export async function POST(request: Request) { const formData = await request.formData(); const decision = formData.get('decision'); @@ -52,7 +83,7 @@ export async function POST(request: Request) { return NextResponse.redirect(buildLoginRedirect(authorizationId)); } - const { approve, deny } = getOAuthHelpers(supabase); + const { approve, deny, getDetails } = getOAuthHelpers(supabase); if (!approve || !deny) { return NextResponse.json( { error: 'Supabase OAuth helpers are unavailable. Upgrade @supabase/supabase-js.' }, @@ -60,8 +91,50 @@ export async function POST(request: Request) { ); } - const action = decision === 'approve' ? approve : deny; - const { data, error } = await action(authorizationId); + if (decision === 'deny') { + const { data, error } = await deny(authorizationId); + if (error) { + return NextResponse.json({ error: error.message || 'Failed to update authorization' }, { status: 400 }); + } + const redirectTo = data?.redirect_to || data?.redirect_url; + if (!redirectTo) { + return NextResponse.json({ error: 'Missing redirect URL' }, { status: 500 }); + } + return NextResponse.redirect(redirectTo, { status: 303 }); + } + + // APPROVE path — validate that any user-submitted scope set is a subset + // of the stored authorization request's scopes. A malicious client cannot + // widen permissions beyond what they initially requested, and a tampered + // consent form cannot grant scopes the user never saw described. + const approvedScopes = parseApprovedScopes(formData); + let effectiveScopes: string[] | undefined; + + if (approvedScopes && getDetails) { + const { data: details, error: detailsError } = await getDetails(authorizationId); + if (detailsError) { + return NextResponse.json({ error: detailsError.message || 'Failed to load authorization' }, { status: 400 }); + } + const allowed = Array.isArray(details?.scopes) ? (details.scopes as string[]) : []; + const outOfBand = approvedScopes.filter((s) => !allowed.includes(s)); + if (outOfBand.length > 0) { + return NextResponse.json( + { error: 'requested scopes exceed the authorization set', invalid: outOfBand }, + { status: 400 } + ); + } + effectiveScopes = approvedScopes; + } + + // TODO: Full scope-threading in the OAuth token payload depends on Supabase's + // approveAuthorization signature. Current @supabase/supabase-js typings do not + // document a scopes option, so we pass it opportunistically — the helper will + // use it when supported and ignore it otherwise. If the installed version + // doesn't honor it, the token receives the scopes from the stored + // authorization request, which already went through user consent. + const { data, error } = effectiveScopes + ? await approve(authorizationId, { scopes: effectiveScopes }) + : await approve(authorizationId); if (error) { return NextResponse.json({ error: error.message || 'Failed to update authorization' }, { status: 400 }); diff --git a/sentry.edge.config.ts b/sentry.edge.config.ts index 71b17f44..aafa0e40 100644 --- a/sentry.edge.config.ts +++ b/sentry.edge.config.ts @@ -5,6 +5,17 @@ import * as Sentry from '@sentry/nextjs'; +// NOTE: the scrubber code is intentionally duplicated inline (not imported from a +// shared module) — the edge runtime forbids Node-only deps and we don't want a +// shared-helper import to accidentally pull one in. +const REDACTED_HEADERS = new Set(['authorization', 'apikey', 'api-key', 'cookie', 'x-api-key']); +const JWT_RE = /eyJ[A-Za-z0-9_\-.]{40,}/g; +const SB_RE = /sb_[a-z0-9_]{30,}/g; + +function scrubSecrets(value: string): string { + return value.replace(JWT_RE, '[REDACTED_JWT]').replace(SB_RE, '[REDACTED_SB]'); +} + Sentry.init({ dsn: process.env.NEXT_PUBLIC_SENTRY_DSN, @@ -13,4 +24,84 @@ Sentry.init({ // Setting this option to true will print useful information to the console while you're setting up Sentry. debug: false, + + beforeSend(event) { + try { + if (event.request?.headers) { + for (const key of Object.keys(event.request.headers)) { + if (REDACTED_HEADERS.has(key.toLowerCase())) { + event.request.headers[key] = '[REDACTED]'; + } + } + } + + if (event.extra) { + for (const key of Object.keys(event.extra)) { + const v = event.extra[key]; + if (typeof v === 'string') { + event.extra[key] = scrubSecrets(v); + } + } + } + + if (event.breadcrumbs) { + for (const crumb of event.breadcrumbs) { + if (typeof crumb.message === 'string') { + crumb.message = scrubSecrets(crumb.message); + } + if (crumb.data) { + for (const key of Object.keys(crumb.data)) { + const v = crumb.data[key]; + if (typeof v === 'string') { + crumb.data[key] = scrubSecrets(v); + } + } + } + } + } + + // Scrub local variable captures from each stack frame — axios errors + // serialize `config.headers.Authorization` into `frame.vars` and that + // slips past the header/extra/breadcrumb passes above. + if (event.exception?.values) { + for (const ev of event.exception.values) { + const frames = ev.stacktrace?.frames; + if (!frames) continue; + for (const frame of frames) { + const vars = frame.vars as Record | undefined; + if (!vars || typeof vars !== 'object') continue; + for (const key of Object.keys(vars)) { + const val = vars[key]; + if (typeof val === 'string') { + vars[key] = scrubSecrets(val); + } else if (val && typeof val === 'object') { + try { + vars[key] = JSON.parse(scrubSecrets(JSON.stringify(val))); + } catch { + /* leave as-is if non-serializable */ + } + } + } + } + } + } + + // Scrub `contexts` — Sentry often stores request/response bodies here. + if (event.contexts) { + for (const ctxName of Object.keys(event.contexts)) { + const ctx = event.contexts[ctxName] as Record | undefined; + if (!ctx || typeof ctx !== 'object') continue; + for (const key of Object.keys(ctx)) { + const val = ctx[key]; + if (typeof val === 'string') { + ctx[key] = scrubSecrets(val); + } + } + } + } + } catch { + /* never block the SDK */ + } + return event; + }, }); diff --git a/sentry.server.config.ts b/sentry.server.config.ts index 66ddeed3..2df563a4 100644 --- a/sentry.server.config.ts +++ b/sentry.server.config.ts @@ -4,6 +4,14 @@ import * as Sentry from '@sentry/nextjs'; +const REDACTED_HEADERS = new Set(['authorization', 'apikey', 'api-key', 'cookie', 'x-api-key']); +const JWT_RE = /eyJ[A-Za-z0-9_\-.]{40,}/g; +const SB_RE = /sb_[a-z0-9_]{30,}/g; + +function scrubSecrets(value: string): string { + return value.replace(JWT_RE, '[REDACTED_JWT]').replace(SB_RE, '[REDACTED_SB]'); +} + Sentry.init({ dsn: process.env.NEXT_PUBLIC_SENTRY_DSN, @@ -13,6 +21,89 @@ Sentry.init({ // Setting this option to true will print useful information to the console while you're setting up Sentry. debug: false, + // Scrub bearer tokens, Supabase anon/service keys, and other credential-like + // strings out of every Sentry event before the SDK ships it. Wrapped in + // try/catch so a bug in scrubbing never blocks an otherwise-valid event. + beforeSend(event) { + try { + if (event.request?.headers) { + for (const key of Object.keys(event.request.headers)) { + if (REDACTED_HEADERS.has(key.toLowerCase())) { + event.request.headers[key] = '[REDACTED]'; + } + } + } + + if (event.extra) { + for (const key of Object.keys(event.extra)) { + const v = event.extra[key]; + if (typeof v === 'string') { + event.extra[key] = scrubSecrets(v); + } + } + } + + if (event.breadcrumbs) { + for (const crumb of event.breadcrumbs) { + if (typeof crumb.message === 'string') { + crumb.message = scrubSecrets(crumb.message); + } + if (crumb.data) { + for (const key of Object.keys(crumb.data)) { + const v = crumb.data[key]; + if (typeof v === 'string') { + crumb.data[key] = scrubSecrets(v); + } + } + } + } + } + + // Scrub local variable captures from each stack frame — axios errors + // serialize `config.headers.Authorization` into `frame.vars` and that + // slips past the header/extra/breadcrumb passes above. + if (event.exception?.values) { + for (const ev of event.exception.values) { + const frames = ev.stacktrace?.frames; + if (!frames) continue; + for (const frame of frames) { + const vars = frame.vars as Record | undefined; + if (!vars || typeof vars !== 'object') continue; + for (const key of Object.keys(vars)) { + const val = vars[key]; + if (typeof val === 'string') { + vars[key] = scrubSecrets(val); + } else if (val && typeof val === 'object') { + try { + vars[key] = JSON.parse(scrubSecrets(JSON.stringify(val))); + } catch { + /* leave as-is if non-serializable */ + } + } + } + } + } + } + + // Scrub `contexts` — Sentry often stores request/response bodies here. + if (event.contexts) { + for (const ctxName of Object.keys(event.contexts)) { + const ctx = event.contexts[ctxName] as Record | undefined; + if (!ctx || typeof ctx !== 'object') continue; + for (const key of Object.keys(ctx)) { + const val = ctx[key]; + if (typeof val === 'string') { + ctx[key] = scrubSecrets(val); + } + } + } + } + } catch { + /* never block the SDK */ + } + return event; + }, + // uncomment the line below to enable Spotlight (https://spotlightjs.com) // spotlight: process.env.NODE_ENV === 'development', }); diff --git a/supabase/functions/mcp-server/index.ts b/supabase/functions/mcp-server/index.ts index eea6d918..8e4d5a16 100644 --- a/supabase/functions/mcp-server/index.ts +++ b/supabase/functions/mcp-server/index.ts @@ -16,6 +16,7 @@ import { jsonRpcResult, noContentResponse, } from './lib/errors.ts'; +import { assertScope, isScopeInsufficientError, type McpScope } from './lib/scopes.ts'; import type { JsonRpcRequest, ToolDefinition, ToolHandler } from './lib/types.ts'; import { addToListTool, addToListToolDefinition } from './tools/add-to-list.ts'; import { createListTool, createListToolDefinition } from './tools/create-list.ts'; @@ -35,24 +36,35 @@ const SERVER_NAME = 'herocast-mcp'; const SERVER_VERSION = '1.0.0'; const MCP_SESSION_HEADER = 'Mcp-Session-Id'; -type Tool = { definition: ToolDefinition; handler: ToolHandler }; +type Tool = { definition: ToolDefinition; handler: ToolHandler; requiredScope: McpScope }; const tools: Tool[] = [ - { definition: postCastToolDefinition, handler: postCastTool }, - { definition: listAccountsToolDefinition, handler: (auth) => listAccountsTool(auth.supabaseClient, auth.userId) }, - { definition: getCastsToolDefinition, handler: getCastsTool }, - { definition: getUserToolDefinition, handler: (_auth, args) => getUserTool(args) }, - { definition: searchUsersToolDefinition, handler: (_auth, args) => searchUsersTool(args) }, - { definition: listListsToolDefinition, handler: listListsTool }, - { definition: getListToolDefinition, handler: getListTool }, - { definition: createListToolDefinition, handler: createListTool }, - { definition: updateListToolDefinition, handler: updateListTool }, - { definition: deleteListToolDefinition, handler: deleteListTool }, - { definition: addToListToolDefinition, handler: addToListTool }, - { definition: removeFromListToolDefinition, handler: removeFromListTool }, + { definition: postCastToolDefinition, handler: postCastTool, requiredScope: 'write:cast' }, + { + definition: listAccountsToolDefinition, + handler: (auth) => listAccountsTool(auth.supabaseClient, auth.userId), + requiredScope: 'read:accounts', + }, + { definition: getCastsToolDefinition, handler: getCastsTool, requiredScope: 'read:casts' }, + { definition: getUserToolDefinition, handler: (_auth, args) => getUserTool(args), requiredScope: 'read:accounts' }, + { + definition: searchUsersToolDefinition, + handler: (_auth, args) => searchUsersTool(args), + requiredScope: 'read:accounts', + }, + { definition: listListsToolDefinition, handler: listListsTool, requiredScope: 'manage:lists' }, + { definition: getListToolDefinition, handler: getListTool, requiredScope: 'manage:lists' }, + { definition: createListToolDefinition, handler: createListTool, requiredScope: 'manage:lists' }, + { definition: updateListToolDefinition, handler: updateListTool, requiredScope: 'manage:lists' }, + { definition: deleteListToolDefinition, handler: deleteListTool, requiredScope: 'manage:lists' }, + { definition: addToListToolDefinition, handler: addToListTool, requiredScope: 'manage:lists' }, + { definition: removeFromListToolDefinition, handler: removeFromListTool, requiredScope: 'manage:lists' }, ]; -const toolMap = new Map(tools.map((t) => [t.definition.name, t.handler])); +type ToolEntry = { handler: ToolHandler; requiredScope: McpScope }; +const toolMap = new Map( + tools.map((t) => [t.definition.name, { handler: t.handler, requiredScope: t.requiredScope }]) +); const toolDefinitions = tools.map((t) => t.definition); function getSessionId(req: Request): string { @@ -117,20 +129,16 @@ Deno.serve(async (req: Request) => { auth = await authenticateRequest(req.headers.get('Authorization')); } catch (error) { const message = error instanceof Error ? error.message : 'Unauthorized'; - const authHeader = req.headers.get('Authorization'); - // Include debug info in error response + const hasAuthHeader = !!req.headers.get('Authorization'); + // Do NOT echo any portion of the bearer token (length/prefix/slice) — + // that was historically used for debugging but leaks key material into + // client-facing responses and logs. Only a boolean presence flag is safe. return jsonRpcError( id ?? null, -32001, message, 401, - { - debug: { - hasAuthHeader: !!authHeader, - authHeaderLength: authHeader?.length ?? 0, - authHeaderPrefix: authHeader?.slice(0, 30) ?? 'none', - }, - }, + { debug: { hasAuthHeader } }, getAuthChallengeHeaders() ); } @@ -176,9 +184,20 @@ Deno.serve(async (req: Request) => { }); } - const handler = toolMap.get(name); - if (handler) { - const result = await handler(auth, toolArguments); + const entry = toolMap.get(name); + if (entry) { + try { + assertScope(auth.scopes, entry.requiredScope); + } catch (scopeErr) { + if (isScopeInsufficientError(scopeErr)) { + return jsonRpcError(id ?? null, -32603, scopeErr.message, 403, { + code: 'scope_insufficient', + required: scopeErr.required, + }); + } + throw scopeErr; + } + const result = await entry.handler(auth, toolArguments); return jsonRpcResult(id ?? null, result); } diff --git a/supabase/functions/mcp-server/lib/auth.ts b/supabase/functions/mcp-server/lib/auth.ts index da0eec53..909435b1 100644 --- a/supabase/functions/mcp-server/lib/auth.ts +++ b/supabase/functions/mcp-server/lib/auth.ts @@ -1,5 +1,61 @@ import { createClient } from '@supabase/supabase-js'; import type { AuthContext } from './types.ts'; +import { DEFAULT_SCOPES } from './scopes.ts'; + +/** + * base64url decode (JWT-safe: no padding, uses `-` and `_`). + * Returns the decoded UTF-8 string or `null` if the input is not valid. + */ +function decodeBase64Url(segment: string): string | null { + try { + // Convert base64url → base64 and pad to a multiple of 4. + const b64 = segment.replace(/-/g, '+').replace(/_/g, '/'); + const padded = b64 + '='.repeat((4 - (b64.length % 4)) % 4); + const binary = atob(padded); + const bytes = new Uint8Array(binary.length); + for (let i = 0; i < binary.length; i++) { + bytes[i] = binary.charCodeAt(i); + } + return new TextDecoder().decode(bytes); + } catch { + return null; + } +} + +/** + * Extract an array of OAuth scopes from a JWT. Supports: + * - `scopes: string[]` (array form) + * - `scopes: 'a b c'` (space-separated string) + * - `scope: string[]` or `scope: 'a b c'` (RFC 6749 standard claim) + * Returns the fallback if no scope/scopes claim is present. + * + * NOTE: This does NOT verify the JWT signature — that's already done by + * supabase.auth.getUser() upstream. We only read claims from a token we + * just verified. + */ +function extractScopesFromJwt(token: string, fallback: string[]): string[] { + const parts = token.split('.'); + if (parts.length < 2) return fallback; + + const payloadJson = decodeBase64Url(parts[1]); + if (!payloadJson) return fallback; + + let payload: Record; + try { + payload = JSON.parse(payloadJson); + } catch { + return fallback; + } + + const raw = payload.scopes ?? payload.scope; + if (Array.isArray(raw)) { + return raw.filter((s): s is string => typeof s === 'string'); + } + if (typeof raw === 'string' && raw.trim().length > 0) { + return raw.trim().split(/\s+/); + } + return fallback; +} export async function authenticateRequest(authHeader: string | null): Promise { // Debug logging for auth issues @@ -47,11 +103,16 @@ export async function authenticateRequest(authHeader: string | null): Promise).code === 'scope_insufficient' + ); +} diff --git a/supabase/functions/mcp-server/lib/types.ts b/supabase/functions/mcp-server/lib/types.ts index 0fcfa8cf..770855cc 100644 --- a/supabase/functions/mcp-server/lib/types.ts +++ b/supabase/functions/mcp-server/lib/types.ts @@ -4,6 +4,7 @@ export type AuthContext = { supabaseClient: SupabaseClient; userId: string; token: string; + scopes: string[]; }; export type ToolDefinition = { From fc4e19ecbacba1bddb4634e8d91727d95caffdbc Mon Sep 17 00:00:00 2001 From: hellno Date: Thu, 23 Apr 2026 19:11:30 +0200 Subject: [PATCH 04/18] ci: supabase migration linter Adds a Node-only migration linter that enforces: - security_invoker=true on new public views (grandfathered for existing views where the flag is set in a later migration) - SET search_path on every new CREATE FUNCTION - no WITH CHECK (true) on new policies (tables can opt in with an inline 'lint-allow-with-check-true' comment, e.g. channel) - no bare JWT-shaped literals in migration SQL Runs on push + PR for paths that touch supabase/migrations or the script itself. Uses --ignore-before=20260423 so pre-existing legacy violations are visible but not blocking; regressions in new migrations fail the build. Pre-existing flagged issues (not fixed here, intentionally): - 17 legacy items in pre-2026 migrations (reported only) - public.channel retains WITH CHECK (true) with an allow-comment (channel is a shared protocol namespace; no user secret material) --- .github/workflows/supabase-migration-lint.yml | 31 ++ scripts/lint-supabase-migrations.mjs | 373 ++++++++++++++++++ 2 files changed, 404 insertions(+) create mode 100644 .github/workflows/supabase-migration-lint.yml create mode 100644 scripts/lint-supabase-migrations.mjs diff --git a/.github/workflows/supabase-migration-lint.yml b/.github/workflows/supabase-migration-lint.yml new file mode 100644 index 00000000..7f89d001 --- /dev/null +++ b/.github/workflows/supabase-migration-lint.yml @@ -0,0 +1,31 @@ +name: Supabase Migration Lint + +on: + push: + paths: + - 'supabase/migrations/**' + - 'scripts/lint-supabase-migrations.mjs' + - '.github/workflows/supabase-migration-lint.yml' + pull_request: + paths: + - 'supabase/migrations/**' + - 'scripts/lint-supabase-migrations.mjs' + - '.github/workflows/supabase-migration-lint.yml' + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Lint Supabase migrations + # `--ignore-before` suppresses pre-cutoff legacy violations in the exit + # code (they are still printed for visibility). The cutoff marks the + # signer-hardening PR; every migration dated on or after it is enforced. + run: node scripts/lint-supabase-migrations.mjs --summary --ignore-before=20260423 diff --git a/scripts/lint-supabase-migrations.mjs b/scripts/lint-supabase-migrations.mjs new file mode 100644 index 00000000..c42f7105 --- /dev/null +++ b/scripts/lint-supabase-migrations.mjs @@ -0,0 +1,373 @@ +#!/usr/bin/env node +// Supabase migration linter. +// +// Scans every file in supabase/migrations/*.sql and enforces security rules +// so we never regress the hardening done in April 2026. +// +// Rules: +// a) security_invoker on public views: every `CREATE [OR REPLACE] VIEW +// public.` must include `WITH (security_invoker = true)` within +// ~500 chars of the CREATE keyword. Whitespace- and case-insensitive. +// Grandfather: an earlier CREATE on the same view name passes if a +// later migration (by filename) creates the same view WITH security_invoker. +// b) search_path on functions: every `CREATE [OR REPLACE] FUNCTION` must +// include a `SET search_path ...` clause in its header (before `AS $...$`). +// c) No `WITH CHECK (true)` policies unless the line immediately above +// carries `-- lint-allow-with-check-true: `. +// d) No bare JWT-shaped tokens in SQL (pattern eyJ[A-Za-z0-9_\-.]{40,}). +// The role identifier `service_role` is fine; only literal token values fail. +// +// Output: `:: : `. Exit 0 on pass, 1 on failure. +// Pass `--summary` to emit counts. Pass `--ignore-before=YYYYMMDD` to suppress +// failures in migrations whose filename date is strictly earlier than the cutoff +// (useful for CI during the initial rollout — pre-existing violations in legacy +// migrations are a separate concern per the signer-hardening plan). +// +// Plain Node (no dependencies). + +import { readdirSync, readFileSync } from 'node:fs'; +import { join, dirname, resolve } from 'node:path'; +import { fileURLToPath } from 'node:url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const REPO_ROOT = resolve(__dirname, '..'); +const MIGRATIONS_DIR = join(REPO_ROOT, 'supabase', 'migrations'); + +const WINDOW_VIEW = 500; +const WINDOW_FUNC = 4000; + +const args = process.argv.slice(2); +const SUMMARY_MODE = args.includes('--summary'); +let IGNORE_BEFORE = null; +for (const a of args) { + const m = a.match(/^--ignore-before=(\d{8})$/); + if (m) IGNORE_BEFORE = m[1]; +} + +// Strip SQL comments (line `-- ...` and block `/*...*/`) while preserving +// newline positions so any line lookups on the stripped source stay accurate. +function stripComments(src) { + let out = ''; + let i = 0; + const n = src.length; + let inLine = false; + let inBlock = false; + let inSingle = false; + let inDouble = false; + let inDollar = false; + let dollarTag = ''; + while (i < n) { + const c = src[i]; + const c2 = src[i + 1]; + if (inLine) { + if (c === '\n') { + inLine = false; + out += c; + } else { + out += ' '; + } + i++; + continue; + } + if (inBlock) { + if (c === '*' && c2 === '/') { + inBlock = false; + out += ' '; + i += 2; + continue; + } + out += c === '\n' ? '\n' : ' '; + i++; + continue; + } + if (inSingle) { + out += c; + if (c === "'" && src[i - 1] !== '\\') inSingle = false; + i++; + continue; + } + if (inDouble) { + out += c; + if (c === '"') inDouble = false; + i++; + continue; + } + if (inDollar) { + out += c; + if (c === '$') { + const end = src.indexOf('$', i + 1); + if (end !== -1) { + const candidate = src.slice(i, end + 1); + if (candidate === dollarTag) { + out += src.slice(i + 1, end + 1); + i = end + 1; + inDollar = false; + continue; + } + } + } + i++; + continue; + } + if (c === '-' && c2 === '-') { + inLine = true; + out += ' '; + i += 2; + continue; + } + if (c === '/' && c2 === '*') { + inBlock = true; + out += ' '; + i += 2; + continue; + } + if (c === "'") { + inSingle = true; + out += c; + i++; + continue; + } + if (c === '"') { + inDouble = true; + out += c; + i++; + continue; + } + if (c === '$') { + const close = src.indexOf('$', i + 1); + if (close !== -1) { + const tag = src.slice(i, close + 1); + if (/^\$[A-Za-z0-9_]*\$$/.test(tag)) { + inDollar = true; + dollarTag = tag; + out += tag; + i = close + 1; + continue; + } + } + } + out += c; + i++; + } + return out; +} + +function lineNumberOf(src, index) { + let line = 1; + for (let i = 0; i < index && i < src.length; i++) { + if (src[i] === '\n') line++; + } + return line; +} + +// --- Rule implementations ----------------------------------------------- + +function findViewCreates(stripped) { + // Returns list of { schema, name, index, hasInvoker } for every CREATE VIEW. + const re = + /create\s+(?:or\s+replace\s+)?(?:temp(?:orary)?\s+|materialized\s+)?view\s+(?:"?([a-zA-Z_][a-zA-Z0-9_]*)"?\s*\.\s*)?"?([a-zA-Z_][a-zA-Z0-9_]*)"?/gi; + const results = []; + let m; + while ((m = re.exec(stripped)) !== null) { + const schema = (m[1] || 'public').toLowerCase(); + const name = m[2]; + const index = m.index; + const window = stripped.slice(index, index + WINDOW_VIEW); + // Accept `security_invoker` or `"security_invoker"`, any whitespace, `= true` + // with optional single quotes around the value. + const hasInvoker = /"?security_invoker"?\s*=\s*'?\s*true\s*'?/i.test(window); + results.push({ schema, name, index, hasInvoker }); + } + return results; +} + +function findFunctionCreates(stripped) { + const re = + /create\s+(?:or\s+replace\s+)?function\s+(?:"?([a-zA-Z_][a-zA-Z0-9_]*)"?\s*\.\s*)?"?([a-zA-Z_][a-zA-Z0-9_]*)"?/gi; + const results = []; + let m; + while ((m = re.exec(stripped)) !== null) { + const schema = (m[1] || 'public').toLowerCase(); + const name = m[2]; + const index = m.index; + const window = stripped.slice(index, index + WINDOW_FUNC); + // Header ends at the first `AS $...$` body marker, or at the end of the + // window if the header is unusually long. + const bodyMatch = window.match(/\bas\s*\$/i); + const endIdx = bodyMatch ? bodyMatch.index : window.length; + const header = window.slice(0, endIdx); + const hasSearchPath = /\bset\s+search_path\b/i.test(header); + results.push({ schema, name, index, hasSearchPath }); + } + return results; +} + +function findPolicyWithCheckTrue(raw, stripped) { + const re = /create\s+policy\b[\s\S]*?with\s+check\s*\(\s*true\s*\)/gi; + const allowComment = /--\s*lint-allow-with-check-true\s*:/i; + const rawLines = raw.split('\n'); + const results = []; + let m; + while ((m = re.exec(stripped)) !== null) { + const localIdx = m[0].search(/with\s+check\s*\(\s*true\s*\)/i); + const absIdx = m.index + (localIdx >= 0 ? localIdx : 0); + const lineNo = lineNumberOf(raw, absIdx); + const createLine = lineNumberOf(raw, m.index); + const above = rawLines[createLine - 2] || ''; + if (allowComment.test(above)) continue; + results.push({ index: absIdx, line: lineNo }); + } + return results; +} + +function findBareJwts(stripped) { + const re = /eyJ[A-Za-z0-9_\-.]{40,}/g; + const results = []; + let m; + while ((m = re.exec(stripped)) !== null) { + results.push({ index: m.index, match: m[0] }); + } + return results; +} + +// --- Driver ------------------------------------------------------------- + +function filenameDate(filename) { + // Supabase migration filenames start with YYYYMMDDHHMMSS. Extract the YYYYMMDD. + const m = filename.match(/^(\d{8})/); + return m ? m[1] : null; +} + +function main() { + let files; + try { + files = readdirSync(MIGRATIONS_DIR) + .filter((f) => f.endsWith('.sql')) + .sort(); + } catch (err) { + console.error(`lint-supabase-migrations: cannot read ${MIGRATIONS_DIR}: ${err.message}`); + process.exit(2); + } + + // First pass: collect view-create information across ALL files so we can + // implement the grandfather rule for security_invoker_on_views. + // For each view name, remember the chronological order of CREATE statements + // and whether any of them has security_invoker. An earlier CREATE without + // security_invoker is grandfathered ONLY if a later CREATE for the same view + // name in a later file has security_invoker. + const viewHistory = new Map(); // name -> [{ file, hasInvoker, sortKey }] + + const parsedFiles = []; + for (const f of files) { + const full = join(MIGRATIONS_DIR, f); + const raw = readFileSync(full, 'utf8'); + const stripped = stripComments(raw); + const views = findViewCreates(stripped); + const funcs = findFunctionCreates(stripped); + const polyTrue = findPolicyWithCheckTrue(raw, stripped); + const jwts = findBareJwts(stripped); + parsedFiles.push({ file: f, raw, stripped, views, funcs, polyTrue, jwts }); + for (const v of views) { + if (v.schema !== 'public') continue; + if (!viewHistory.has(v.name)) viewHistory.set(v.name, []); + viewHistory.get(v.name).push({ file: f, hasInvoker: v.hasInvoker }); + } + } + + // For each view name, determine which (file, name) pairs are grandfathered. + // A statement is grandfathered if there exists a later file where the same + // view name is CREATEd WITH security_invoker. + const grandfathered = new Set(); // key: `${file}::${name}` + for (const [name, entries] of viewHistory.entries()) { + // entries are already in chronological order (files are sorted). + for (let i = 0; i < entries.length; i++) { + if (entries[i].hasInvoker) continue; + // Is there a later entry with hasInvoker? + for (let j = i + 1; j < entries.length; j++) { + if (entries[j].hasInvoker) { + grandfathered.add(`${entries[i].file}::${name}`); + break; + } + } + } + } + + // Second pass: emit failures. + const allFailures = []; + for (const pf of parsedFiles) { + const rel = `supabase/migrations/${pf.file}`; + for (const v of pf.views) { + if (v.schema !== 'public') continue; + if (v.hasInvoker) continue; + if (grandfathered.has(`${pf.file}::${v.name}`)) continue; + allFailures.push({ + file: rel, + rawFile: pf.file, + line: lineNumberOf(pf.raw, v.index), + rule: 'security-invoker-view', + message: `view "public"."${v.name}" missing WITH (security_invoker = true)`, + }); + } + for (const fn of pf.funcs) { + if (fn.schema !== 'public') continue; + if (fn.hasSearchPath) continue; + allFailures.push({ + file: rel, + rawFile: pf.file, + line: lineNumberOf(pf.raw, fn.index), + rule: 'function-search-path', + message: `function "public"."${fn.name}" missing SET search_path clause in header`, + }); + } + for (const p of pf.polyTrue) { + allFailures.push({ + file: rel, + rawFile: pf.file, + line: p.line, + rule: 'no-with-check-true', + message: 'policy uses WITH CHECK (true) without allowlist comment', + }); + } + for (const j of pf.jwts) { + allFailures.push({ + file: rel, + rawFile: pf.file, + line: lineNumberOf(pf.raw, j.index), + rule: 'no-bare-jwt', + message: `literal JWT-shaped token found: "${j.match.slice(0, 16)}..."`, + }); + } + } + + // Apply --ignore-before filter for exit code (still print all). + let blocking = 0; + for (const f of allFailures) { + const fdate = filenameDate(f.rawFile); + const isBlocking = !IGNORE_BEFORE || !fdate || fdate >= IGNORE_BEFORE; + const marker = isBlocking ? '' : ' [ignored:pre-cutoff]'; + console.log(`${f.file}:${f.line}: ${f.rule}: ${f.message}${marker}`); + if (isBlocking) blocking++; + } + + if (SUMMARY_MODE) { + const byRule = new Map(); + for (const f of allFailures) { + const fdate = filenameDate(f.rawFile); + const isBlocking = !IGNORE_BEFORE || !fdate || fdate >= IGNORE_BEFORE; + const key = `${f.rule}${isBlocking ? '' : ' (ignored)'}`; + byRule.set(key, (byRule.get(key) || 0) + 1); + } + console.log('---'); + console.log(`files scanned: ${parsedFiles.length}`); + console.log(`total failures: ${allFailures.length}`); + console.log(`blocking failures: ${blocking}`); + if (IGNORE_BEFORE) console.log(`ignore-before cutoff: ${IGNORE_BEFORE}`); + for (const [rule, count] of [...byRule.entries()].sort()) { + console.log(` ${rule}: ${count}`); + } + } + + process.exit(blocking === 0 ? 0 : 1); +} + +main(); From 00b8782a136818d22056ce350d899a3b6ad36f3a Mon Sep 17 00:00:00 2001 From: hellno Date: Thu, 23 Apr 2026 19:25:57 +0200 Subject: [PATCH 05/18] db: explicitly revoke authorize_* EXECUTE from anon/authenticated MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bug caught by local verification: the project's ALTER DEFAULT PRIVILEGES (see 20231201175719_schema_test.sql:202) grants ALL on newly-created functions to anon and authenticated. The previous REVOKE ALL ... FROM PUBLIC did NOT remove those role-specific grants — so authorize_draft_publish and authorize_auto_interaction were effectively callable by any authenticated user. An attacker could have called the RPCs directly, reading owner_user_id and bypassing the cron-only trust boundary. Explicitly revoking from anon and authenticated enforces the intended "service_role-only" posture. Verified via scripts/verify-signer-hardening.sh. --- .../migrations/20260423000002_signer_hardening_rpcs.sql | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/supabase/migrations/20260423000002_signer_hardening_rpcs.sql b/supabase/migrations/20260423000002_signer_hardening_rpcs.sql index 965720b2..e1873da2 100644 --- a/supabase/migrations/20260423000002_signer_hardening_rpcs.sql +++ b/supabase/migrations/20260423000002_signer_hardening_rpcs.sql @@ -34,7 +34,10 @@ END; $$; ALTER FUNCTION public.authorize_draft_publish(uuid) OWNER TO postgres; -REVOKE ALL ON FUNCTION public.authorize_draft_publish(uuid) FROM PUBLIC; +-- The project's ALTER DEFAULT PRIVILEGES (see 20231201175719_schema_test.sql:202) +-- grants ALL on new functions to anon and authenticated. Revoking PUBLIC alone +-- leaves those role-specific grants in place, so we revoke them explicitly too. +REVOKE ALL ON FUNCTION public.authorize_draft_publish(uuid) FROM PUBLIC, anon, authenticated; GRANT EXECUTE ON FUNCTION public.authorize_draft_publish(uuid) TO service_role; -- ============================================================================ @@ -76,5 +79,7 @@ END; $$; ALTER FUNCTION public.authorize_auto_interaction(uuid) OWNER TO postgres; -REVOKE ALL ON FUNCTION public.authorize_auto_interaction(uuid) FROM PUBLIC; +-- Same rationale as above: explicit revoke from anon, authenticated to override +-- the project's default privileges that grant ALL on new functions to them. +REVOKE ALL ON FUNCTION public.authorize_auto_interaction(uuid) FROM PUBLIC, anon, authenticated; GRANT EXECUTE ON FUNCTION public.authorize_auto_interaction(uuid) TO service_role; From 6beb38ae01daf7b895010a46a6b06a4cf0168f9d Mon Sep 17 00:00:00 2001 From: hellno Date: Thu, 23 Apr 2026 19:25:57 +0200 Subject: [PATCH 06/18] test: local signer-hardening verification script MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds scripts/verify-signer-hardening.sh — a bash script that seeds two test users (attacker + victim) against a local supabase stack and exercises every hijack vector the hardening claims to close: - V3 (draft row-planting): RLS WITH CHECK and authorize_draft_publish RPC both reject cross-user INSERTs independently. - V4 (auto-interaction hijack): trigger rejects mismatched contents.sourceAccountId and null; RPC rejects even when the trigger is bypassed via session_replication_role=replica. - V11 (audit tamper): authenticated role cannot INSERT into signing_audit_log; service_role can (used by signer after Bug 2 fix). - V16 (FORCE RLS): verifies all 5 sensitive tables. - Grant posture: decrypted_accounts view + authorize_* RPCs locked down to the right roles; vault USAGE revoked; decrypted_account RPC retained for authenticated. Usage: supabase start && supabase db reset && bash scripts/verify-signer-hardening.sh All 17 assertions currently pass against the migrations in this branch. --- scripts/verify-signer-hardening.sh | 367 +++++++++++++++++++++++++++++ 1 file changed, 367 insertions(+) create mode 100755 scripts/verify-signer-hardening.sh diff --git a/scripts/verify-signer-hardening.sh b/scripts/verify-signer-hardening.sh new file mode 100755 index 00000000..36f08fea --- /dev/null +++ b/scripts/verify-signer-hardening.sh @@ -0,0 +1,367 @@ +#!/usr/bin/env bash +# Verifies the signer-key hardening migrations + authorization RPCs against a +# local Supabase stack. Exercises every attack vector the hardening plan +# claimed to close (V2 service-role bypass at signer, V3 draft row-planting, +# V4 auto-interaction hijack, V6 user-only RLS WITH CHECK, V11 audit-log +# tamper, V16 FORCE RLS) plus the happy paths so we detect over-tightening. +# +# Prereq: `supabase start` + `supabase db reset` (the migrations applied). +# +# Usage: scripts/verify-signer-hardening.sh +# Exit 0 => all checks passed. Exit 1 => at least one failure. + +set -uo pipefail + +# ---------- config ------------------------------------------------------------ +DB_URL="${DB_URL:-postgresql://postgres:postgres@127.0.0.1:54322/postgres}" +SIGNER_URL="${SIGNER_URL:-http://127.0.0.1:54321/functions/v1/farcaster-signer}" + +PASS=0 +FAIL=0 +FAILED_CASES=() + +c_red='\033[0;31m'; c_green='\033[0;32m'; c_yellow='\033[0;33m'; c_off='\033[0m' + +pass() { echo -e " ${c_green}PASS${c_off} $1"; PASS=$((PASS + 1)); } +fail() { echo -e " ${c_red}FAIL${c_off} $1"; FAIL=$((FAIL + 1)); FAILED_CASES+=("$1"); } +note() { echo -e " ${c_yellow}NOTE${c_off} $1"; } + +header() { printf "\n== %s ==\n" "$1"; } + +# run SQL, capture stdout+stderr +psql_run() { psql "$DB_URL" -X -qAt -v ON_ERROR_STOP=1 "$@" 2>&1; } + +# run SQL, expect ERROR containing pattern +psql_expect_error() { + local label="$1" sql="$2" pattern="$3" + local out + out=$(psql "$DB_URL" -X -qAt -v ON_ERROR_STOP=1 -c "$sql" 2>&1) && { + fail "$label (expected error but query succeeded)" + echo " actual: $out" >&2 + return 1 + } + if echo "$out" | grep -qE "$pattern"; then + pass "$label" + else + fail "$label (error didn't match /$pattern/)" + echo " actual: $out" >&2 + fi +} + +# run SQL, expect success, capture single-value result +psql_expect_ok() { + local label="$1" sql="$2" + local out + if out=$(psql "$DB_URL" -X -qAt -v ON_ERROR_STOP=1 -c "$sql" 2>&1); then + pass "$label" + printf '%s' "$out" + else + fail "$label (unexpected error)" + echo " actual: $out" >&2 + fi +} + +require_db() { + if ! psql "$DB_URL" -c 'SELECT 1' >/dev/null 2>&1; then + echo "cannot connect to $DB_URL — is \`supabase start\` running?" >&2 + exit 2 + fi +} + +# ---------- setup ------------------------------------------------------------- +require_db + +header "Setup: apply seed + test fixtures" + +# Ensure pgsodium KEK used by the encrypt trigger exists. The migrations and +# seed should handle this; we confirm idempotently. +psql_run <<'SQL' >/dev/null +DO $$ +BEGIN + IF NOT EXISTS (SELECT 1 FROM pgsodium.key WHERE id='dcd0dca7-c03a-40c5-b348-fefb87be2845') THEN + INSERT INTO pgsodium.key (id, key_type, key_id, key_context, name) + VALUES ('dcd0dca7-c03a-40c5-b348-fefb87be2845'::uuid, + 'aead-det', nextval('pgsodium.key_key_id_seq'), + decode('7067736f6469756d','hex'), + 'herocast_encryption_key'); + END IF; +END $$; +SQL + +# Clear previous fixture runs (idempotent). +psql_run <<'SQL' >/dev/null +DELETE FROM public.signing_audit_log WHERE account_id IN ( + SELECT id FROM public.accounts WHERE name LIKE 'verify-fixture-%' +); +DELETE FROM public.list WHERE name LIKE 'verify-fixture-%'; +DELETE FROM public.draft WHERE data->>'verify_fixture' = 'true'; +DELETE FROM public.accounts WHERE name LIKE 'verify-fixture-%'; +DELETE FROM public.profile WHERE user_id IN ( + SELECT id FROM auth.users WHERE email IN ('verify-victim@test.local','verify-attacker@test.local') +); +DELETE FROM auth.users WHERE email IN ('verify-victim@test.local','verify-attacker@test.local'); +SQL + +# Fixture setup runs in a single psql session so that +# `session_replication_role = 'replica'` persists across the statements. +# This GUC bypasses pgsodium's encrypt trigger (whose event-trigger companion +# would otherwise reinstate it after an ALTER TABLE DISABLE). The tests only +# exercise the authorization layer, not ciphertext handling. +FIXTURE=$(psql "$DB_URL" -X -qAt -v ON_ERROR_STOP=1 <<'SQL' 2>&1 +SET session_replication_role = 'replica'; + +WITH ins AS ( + INSERT INTO auth.users (id, email, instance_id, aud, role) + VALUES (gen_random_uuid(), 'verify-victim@test.local', + '00000000-0000-0000-0000-000000000000', 'authenticated', 'authenticated') + RETURNING id +) SELECT 'VICTIM_ID=' || id::text FROM ins; + +WITH ins AS ( + INSERT INTO auth.users (id, email, instance_id, aud, role) + VALUES (gen_random_uuid(), 'verify-attacker@test.local', + '00000000-0000-0000-0000-000000000000', 'authenticated', 'authenticated') + RETURNING id +) SELECT 'ATTACKER_ID=' || id::text FROM ins; + +INSERT INTO public.profile (user_id, email) + SELECT id, email FROM auth.users + WHERE email IN ('verify-victim@test.local','verify-attacker@test.local') + ON CONFLICT (user_id) DO NOTHING; + +WITH vu AS (SELECT id FROM auth.users WHERE email='verify-victim@test.local'), + ins AS ( + INSERT INTO public.accounts (id, user_id, platform, private_key, public_key, platform_account_id, status, name) + SELECT gen_random_uuid(), vu.id, 'farcaster', '0xdeadbeef', + '0x' || encode(gen_random_bytes(32), 'hex'), + '12345', 'active', 'verify-fixture-victim' + FROM vu + RETURNING id +) SELECT 'VICTIM_ACCT=' || id::text FROM ins; + +WITH au AS (SELECT id FROM auth.users WHERE email='verify-attacker@test.local'), + ins AS ( + INSERT INTO public.accounts (id, user_id, platform, private_key, public_key, platform_account_id, status, name) + SELECT gen_random_uuid(), au.id, 'farcaster', '0xcafebabe', + '0x' || encode(gen_random_bytes(32), 'hex'), + '54321', 'active', 'verify-fixture-attacker' + FROM au + RETURNING id +) SELECT 'ATTACKER_ACCT=' || id::text FROM ins; + +RESET session_replication_role; +SQL +) +# Parse the key=value pairs out of the output +eval "$(echo "$FIXTURE" | grep -E '^(VICTIM_|ATTACKER_)')" + +if [[ -z "${VICTIM_ID:-}" || -z "${ATTACKER_ID:-}" || -z "${VICTIM_ACCT:-}" || -z "${ATTACKER_ACCT:-}" ]]; then + echo "fixture setup failed:" >&2 + echo "$FIXTURE" >&2 + exit 2 +fi + +note "victim user_id: $VICTIM_ID" +note "attacker user_id: $ATTACKER_ID" +note "victim account_id: $VICTIM_ACCT" +note "attacker account_id: $ATTACKER_ACCT" + +note "victim account_id: $VICTIM_ACCT" +note "attacker account_id: $ATTACKER_ACCT" + +# ---------- V3: draft row-planting -------------------------------------------- +header "V3 — scheduled-cast hijack via draft row-planting" + +# (a) RLS layer — attacker INSERT with victim's account_id should fail +psql_expect_error \ + "V3a RLS: attacker cannot INSERT draft with victim's account_id" \ + "SET LOCAL role authenticated; + SET LOCAL \"request.jwt.claims\" = '{\"sub\": \"$ATTACKER_ID\", \"role\": \"authenticated\"}'; + INSERT INTO public.draft (account_id, status, scheduled_for, data, created_by_user_id) + VALUES ('$VICTIM_ACCT'::uuid, 'scheduled', now()+interval '1 minute', '{\"text\":\"hijack\"}'::jsonb, '$ATTACKER_ID'::uuid);" \ + "row-level security|new row violates row-level security|check constraint|policy" + +# (b) RPC layer — construct a draft directly (as postgres, bypassing RLS) where +# created_by_user_id != accounts.user_id, then call authorize_draft_publish. +BOGUS_DRAFT=$(psql_run -c "INSERT INTO public.draft (account_id, status, scheduled_for, data, created_by_user_id) + VALUES ('$VICTIM_ACCT'::uuid, 'scheduled', now()+interval '1 minute', '{\"text\":\"hijack\",\"verify_fixture\":true}'::jsonb, '$ATTACKER_ID'::uuid) + RETURNING id;") +psql_expect_error \ + "V3b RPC: authorize_draft_publish rejects cross-user draft" \ + "SELECT * FROM public.authorize_draft_publish('$BOGUS_DRAFT'::uuid);" \ + "unauthorized draft publish" + +# (c) Happy path — victim's own legit draft passes RLS + RPC +VICTIM_DRAFT=$(psql_run -c "SET LOCAL role authenticated; + SET LOCAL \"request.jwt.claims\" = '{\"sub\": \"$VICTIM_ID\", \"role\": \"authenticated\"}'; + INSERT INTO public.draft (account_id, status, scheduled_for, data) + VALUES ('$VICTIM_ACCT'::uuid, 'scheduled', now()+interval '1 minute', '{\"text\":\"legit\",\"verify_fixture\":true}'::jsonb) + RETURNING id;") +if [[ -n "$VICTIM_DRAFT" ]]; then + pass "V3c happy path: victim's own legit draft INSERT succeeds" +else + fail "V3c happy path: victim's own legit draft INSERT failed" +fi + +# RPC should accept the legit draft +psql_expect_ok \ + "V3d happy path: authorize_draft_publish accepts victim's own draft" \ + "SELECT owner_user_id::text FROM public.authorize_draft_publish('$VICTIM_DRAFT'::uuid);" >/dev/null + +# ---------- V4: auto-interaction list hijack ---------------------------------- +header "V4 — auto-interaction hijack via contents.sourceAccountId" + +# (a) Trigger layer — attacker's list with mismatched sourceAccountId +psql_expect_error \ + "V4a trigger: mismatched sourceAccountId rejected on INSERT" \ + "SET LOCAL role authenticated; + SET LOCAL \"request.jwt.claims\" = '{\"sub\": \"$ATTACKER_ID\", \"role\": \"authenticated\"}'; + INSERT INTO public.list (user_id, idx, name, type, contents, account_id) + VALUES ('$ATTACKER_ID'::uuid, 1, 'verify-fixture-hijack', 'auto_interaction', + jsonb_build_object('sourceAccountId','$VICTIM_ACCT','actionType','both','fids',ARRAY['1']), + '$ATTACKER_ACCT'::uuid);" \ + "sourceAccountId must belong to list owner|sourceAccountId" + +# (b) Trigger — null sourceAccountId also rejected +psql_expect_error \ + "V4b trigger: null sourceAccountId rejected" \ + "SET LOCAL role authenticated; + SET LOCAL \"request.jwt.claims\" = '{\"sub\": \"$ATTACKER_ID\", \"role\": \"authenticated\"}'; + INSERT INTO public.list (user_id, idx, name, type, contents, account_id) + VALUES ('$ATTACKER_ID'::uuid, 2, 'verify-fixture-nullsrc', 'auto_interaction', + jsonb_build_object('actionType','both','fids',ARRAY['1']), + '$ATTACKER_ACCT'::uuid);" \ + "sourceAccountId" + +# (c) Happy path — attacker's own auto-interaction list against their own account +ATTACKER_LIST=$(psql_run -c "SET LOCAL role authenticated; + SET LOCAL \"request.jwt.claims\" = '{\"sub\": \"$ATTACKER_ID\", \"role\": \"authenticated\"}'; + INSERT INTO public.list (user_id, idx, name, type, contents, account_id) + VALUES ('$ATTACKER_ID'::uuid, 3, 'verify-fixture-legit', 'auto_interaction', + jsonb_build_object('sourceAccountId','$ATTACKER_ACCT','actionType','both','fids',ARRAY['1']), + '$ATTACKER_ACCT'::uuid) + RETURNING id;") +if [[ -n "$ATTACKER_LIST" ]]; then + pass "V4c happy path: owner's auto-interaction list INSERT succeeds" +else + fail "V4c happy path: owner's auto-interaction list INSERT failed" +fi + +# (d) RPC layer: authorize_auto_interaction on the legit list succeeds +psql_expect_ok \ + "V4d RPC happy path: authorize_auto_interaction accepts legit list" \ + "SELECT source_account_id::text FROM public.authorize_auto_interaction('$ATTACKER_LIST'::uuid);" >/dev/null + +# (e) RPC layer: craft a list bypassing the trigger (session_replication_role=replica +# skips user triggers for the INSERT) and verify the RPC still rejects it — +# independent defense layer. +BOGUS_LIST=$(psql "$DB_URL" -X -qAt -v ON_ERROR_STOP=1 </dev/null 2>&1 +SET ROLE service_role; +INSERT INTO public.signing_audit_log (user_id, actor_user_id, account_id, action, success, source) +VALUES ('$VICTIM_ID'::uuid, '$VICTIM_ID'::uuid, '$VICTIM_ACCT'::uuid, 'cast.create', true, 'user'); +RESET ROLE; +SQL +then + pass "V11b service_role can INSERT audit rows (used by signer)" +else + fail "V11b service_role audit INSERT failed (Bug 2 regression)" +fi + +# V16: FORCE RLS on accounts prevents postgres owner from bypassing when running +# as authenticated (documents the posture) +FORCED=$(psql_run -c "SELECT relname FROM pg_class WHERE relforcerowsecurity=true AND relname IN ('accounts','draft','list','signing_audit_log','signing_idempotency');") +EXPECTED_COUNT=5 +ACTUAL_COUNT=$(echo "$FORCED" | wc -l | tr -d ' ') +if [[ "$ACTUAL_COUNT" -eq "$EXPECTED_COUNT" ]]; then + pass "V16 FORCE ROW LEVEL SECURITY set on 5 sensitive tables" +else + fail "V16 expected 5 FORCED tables, got $ACTUAL_COUNT: $FORCED" +fi + +# ---------- Grant & RPC posture ----------------------------------------------- +header "Grant posture" + +# `-qAt` + ::text renders bool as true/false (not t/f); normalize with a helper +truthy() { [[ "$1" == "t" || "$1" == "true" ]]; } +falsy() { [[ "$1" == "f" || "$1" == "false" ]]; } + +# decrypted_account RPC should be EXECUTE-able by authenticated (we kept this +# grant; the RPC self-filters via auth.uid()). +RPC_AUTH=$(psql_run -c "SELECT has_function_privilege('authenticated','public.decrypted_account(uuid)','EXECUTE')::text;") +if truthy "$RPC_AUTH"; then + pass "decrypted_account EXECUTE retained for authenticated (Bug 1 fix)" +else + fail "decrypted_account EXECUTE should be true for authenticated, got '$RPC_AUTH'" +fi + +# authorize_* RPCs should be service_role-only +for rpc in authorize_draft_publish authorize_auto_interaction; do + ANON=$(psql_run -c "SELECT has_function_privilege('anon','public.${rpc}(uuid)','EXECUTE')::text;") + AUTH=$(psql_run -c "SELECT has_function_privilege('authenticated','public.${rpc}(uuid)','EXECUTE')::text;") + SERVICE=$(psql_run -c "SELECT has_function_privilege('service_role','public.${rpc}(uuid)','EXECUTE')::text;") + if falsy "$ANON" && falsy "$AUTH" && truthy "$SERVICE"; then + pass "${rpc} EXECUTE restricted to service_role" + else + fail "${rpc} EXECUTE grants wrong (anon=$ANON auth=$AUTH service=$SERVICE; expected false/false/true)" + fi +done + +# Decrypted view grants should not include anon/authenticated +VIEW_ANON=$(psql_run -c "SELECT has_table_privilege('anon','public.decrypted_accounts','SELECT')::text;") +VIEW_AUTH=$(psql_run -c "SELECT has_table_privilege('authenticated','public.decrypted_accounts','SELECT')::text;") +if falsy "$VIEW_ANON" && falsy "$VIEW_AUTH"; then + pass "decrypted_accounts view not SELECTable by anon/authenticated" +else + fail "decrypted_accounts view grants wrong (anon=$VIEW_ANON auth=$VIEW_AUTH; expected false/false)" +fi + +# Vault schema USAGE should be revoked from authenticated +VAULT_USAGE=$(psql_run -c "SELECT has_schema_privilege('authenticated','vault','USAGE')::text;" 2>&1) +if falsy "$VAULT_USAGE"; then + pass "vault schema USAGE revoked from authenticated" +elif truthy "$VAULT_USAGE"; then + fail "vault schema USAGE still granted to authenticated" +else + note "vault schema privilege probe returned: $VAULT_USAGE (schema may not exist locally)" +fi + +# ---------- Summary ----------------------------------------------------------- +header "Summary" +printf " %d pass, %d fail\n" "$PASS" "$FAIL" +if [[ "$FAIL" -gt 0 ]]; then + printf "\nFailed cases:\n" + for c in "${FAILED_CASES[@]}"; do + printf " - %s\n" "$c" + done + exit 1 +fi +exit 0 From ec610f7bb9d7babc3cf5dc2f701f722e8ae16f96 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 12:11:45 +0200 Subject: [PATCH 07/18] db: regenerate types after signer hardening migrations Reflects the post-migration schema: - draft.created_by_user_id (NOT NULL, default auth.uid()) - signing_audit_log.actor_user_id and source columns - authorize_draft_publish(p_draft_id) and authorize_auto_interaction(p_list_id) RPCs Generated via `supabase gen types typescript --project-id --schema public`. --- src/common/types/database.types.ts | 1087 ++++++++++++++++------------ 1 file changed, 609 insertions(+), 478 deletions(-) diff --git a/src/common/types/database.types.ts b/src/common/types/database.types.ts index 0f1b988c..16ce63be 100644 --- a/src/common/types/database.types.ts +++ b/src/common/types/database.types.ts @@ -1,637 +1,768 @@ -export type Json = string | number | boolean | null | { [key: string]: Json | undefined } | Json[]; +export type Json = + | string + | number + | boolean + | null + | { [key: string]: Json | undefined } + | Json[] export type Database = { // Allows to automatically instantiate createClient with right options // instead of createClient(URL, KEY) __InternalSupabase: { - PostgrestVersion: '11.1.0 (1f13e43)'; - }; + PostgrestVersion: "11.1.0 (1f13e43)" + } public: { Tables: { accounts: { Row: { - created_at: string; - data: Json | null; - display_order: number | null; - farcaster_api_key: string | null; - id: string; - name: string | null; - platform: string | null; - platform_account_id: string | null; - private_key: string; - public_key: string | null; - status: string | null; - user_id: string; - }; + created_at: string + data: Json | null + display_order: number | null + farcaster_api_key: string | null + id: string + name: string | null + platform: string | null + platform_account_id: string | null + private_key: string + public_key: string | null + status: string | null + user_id: string + } Insert: { - created_at?: string; - data?: Json | null; - display_order?: number | null; - farcaster_api_key?: string | null; - id?: string; - name?: string | null; - platform?: string | null; - platform_account_id?: string | null; - private_key: string; - public_key?: string | null; - status?: string | null; - user_id?: string; - }; + created_at?: string + data?: Json | null + display_order?: number | null + farcaster_api_key?: string | null + id?: string + name?: string | null + platform?: string | null + platform_account_id?: string | null + private_key: string + public_key?: string | null + status?: string | null + user_id?: string + } Update: { - created_at?: string; - data?: Json | null; - display_order?: number | null; - farcaster_api_key?: string | null; - id?: string; - name?: string | null; - platform?: string | null; - platform_account_id?: string | null; - private_key?: string; - public_key?: string | null; - status?: string | null; - user_id?: string; - }; - Relationships: []; - }; + created_at?: string + data?: Json | null + display_order?: number | null + farcaster_api_key?: string | null + id?: string + name?: string | null + platform?: string | null + platform_account_id?: string | null + private_key?: string + public_key?: string | null + status?: string | null + user_id?: string + } + Relationships: [] + } accounts_to_channel: { Row: { - account_id: string; - channel_id: string; - created_at: string; - id: string; - index: number | null; - last_read: string | null; - }; + account_id: string + channel_id: string + created_at: string + id: string + index: number | null + last_read: string | null + } Insert: { - account_id: string; - channel_id: string; - created_at?: string; - id?: string; - index?: number | null; - last_read?: string | null; - }; + account_id: string + channel_id: string + created_at?: string + id?: string + index?: number | null + last_read?: string | null + } Update: { - account_id?: string; - channel_id?: string; - created_at?: string; - id?: string; - index?: number | null; - last_read?: string | null; - }; + account_id?: string + channel_id?: string + created_at?: string + id?: string + index?: number | null + last_read?: string | null + } Relationships: [ { - foreignKeyName: 'accounts_to_channel_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'accounts'; - referencedColumns: ['id']; + foreignKeyName: "accounts_to_channel_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "accounts" + referencedColumns: ["id"] }, { - foreignKeyName: 'accounts_to_channel_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'decrypted_accounts'; - referencedColumns: ['id']; + foreignKeyName: "accounts_to_channel_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_accounts" + referencedColumns: ["id"] }, { - foreignKeyName: 'accounts_to_channel_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'decrypted_dm_accounts'; - referencedColumns: ['id']; + foreignKeyName: "accounts_to_channel_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_dm_accounts" + referencedColumns: ["id"] }, { - foreignKeyName: 'accounts_to_channel_channel_id_fkey'; - columns: ['channel_id']; - isOneToOne: false; - referencedRelation: 'channel'; - referencedColumns: ['id']; + foreignKeyName: "accounts_to_channel_channel_id_fkey" + columns: ["channel_id"] + isOneToOne: false + referencedRelation: "channel" + referencedColumns: ["id"] }, - ]; - }; + ] + } analytics: { Row: { - data: Json | null; - fid: number; - status: string; - updated_at: string | null; - }; + data: Json | null + fid: number + status: string + updated_at: string | null + } Insert: { - data?: Json | null; - fid: number; - status: string; - updated_at?: string | null; - }; + data?: Json | null + fid: number + status: string + updated_at?: string | null + } Update: { - data?: Json | null; - fid?: number; - status?: string; - updated_at?: string | null; - }; - Relationships: []; - }; + data?: Json | null + fid?: number + status?: string + updated_at?: string | null + } + Relationships: [] + } auto_interaction_history: { Row: { - action: string; - cast_hash: string; - error_message: string | null; - list_id: string; - processed_at: string | null; - status: string | null; - }; + action: string + cast_hash: string + error_message: string | null + list_id: string + processed_at: string | null + status: string | null + } Insert: { - action: string; - cast_hash: string; - error_message?: string | null; - list_id: string; - processed_at?: string | null; - status?: string | null; - }; + action: string + cast_hash: string + error_message?: string | null + list_id: string + processed_at?: string | null + status?: string | null + } Update: { - action?: string; - cast_hash?: string; - error_message?: string | null; - list_id?: string; - processed_at?: string | null; - status?: string | null; - }; + action?: string + cast_hash?: string + error_message?: string | null + list_id?: string + processed_at?: string | null + status?: string | null + } Relationships: [ { - foreignKeyName: 'auto_interaction_history_list_id_fkey'; - columns: ['list_id']; - isOneToOne: false; - referencedRelation: 'list'; - referencedColumns: ['id']; + foreignKeyName: "auto_interaction_history_list_id_fkey" + columns: ["list_id"] + isOneToOne: false + referencedRelation: "list" + referencedColumns: ["id"] }, - ]; - }; + ] + } channel: { Row: { - created_at: string; - data: Json | null; - description: string | null; - icon_url: string | null; - id: string; - name: string | null; - source: string | null; - url: string | null; - }; + created_at: string + data: Json | null + description: string | null + icon_url: string | null + id: string + name: string | null + source: string | null + url: string | null + } Insert: { - created_at?: string; - data?: Json | null; - description?: string | null; - icon_url?: string | null; - id?: string; - name?: string | null; - source?: string | null; - url?: string | null; - }; + created_at?: string + data?: Json | null + description?: string | null + icon_url?: string | null + id?: string + name?: string | null + source?: string | null + url?: string | null + } Update: { - created_at?: string; - data?: Json | null; - description?: string | null; - icon_url?: string | null; - id?: string; - name?: string | null; - source?: string | null; - url?: string | null; - }; - Relationships: []; - }; + created_at?: string + data?: Json | null + description?: string | null + icon_url?: string | null + id?: string + name?: string | null + source?: string | null + url?: string | null + } + Relationships: [] + } customers: { Row: { - created_at: string | null; - hypersub_token_id: string | null; - id: string; - product: string | null; - stripe_customer_id: string | null; - user_id: string; - }; + created_at: string | null + hypersub_token_id: string | null + id: string + product: string | null + stripe_customer_id: string | null + user_id: string + } Insert: { - created_at?: string | null; - hypersub_token_id?: string | null; - id?: string; - product?: string | null; - stripe_customer_id?: string | null; - user_id: string; - }; + created_at?: string | null + hypersub_token_id?: string | null + id?: string + product?: string | null + stripe_customer_id?: string | null + user_id: string + } Update: { - created_at?: string | null; - hypersub_token_id?: string | null; - id?: string; - product?: string | null; - stripe_customer_id?: string | null; - user_id?: string; - }; - Relationships: []; - }; + created_at?: string | null + hypersub_token_id?: string | null + id?: string + product?: string | null + stripe_customer_id?: string | null + user_id?: string + } + Relationships: [] + } draft: { Row: { - account_id: string; - created_at: string; - data: Json | null; - encoded_message_bytes: number[] | null; - id: string; - published_at: string | null; - scheduled_for: string | null; - status: string | null; - updated_at: string; - }; + account_id: string + created_at: string + created_by_user_id: string + data: Json | null + encoded_message_bytes: number[] | null + id: string + published_at: string | null + scheduled_for: string | null + status: string | null + updated_at: string + } Insert: { - account_id: string; - created_at?: string; - data?: Json | null; - encoded_message_bytes?: number[] | null; - id?: string; - published_at?: string | null; - scheduled_for?: string | null; - status?: string | null; - updated_at?: string; - }; + account_id: string + created_at?: string + created_by_user_id?: string + data?: Json | null + encoded_message_bytes?: number[] | null + id?: string + published_at?: string | null + scheduled_for?: string | null + status?: string | null + updated_at?: string + } Update: { - account_id?: string; - created_at?: string; - data?: Json | null; - encoded_message_bytes?: number[] | null; - id?: string; - published_at?: string | null; - scheduled_for?: string | null; - status?: string | null; - updated_at?: string; - }; + account_id?: string + created_at?: string + created_by_user_id?: string + data?: Json | null + encoded_message_bytes?: number[] | null + id?: string + published_at?: string | null + scheduled_for?: string | null + status?: string | null + updated_at?: string + } Relationships: [ { - foreignKeyName: 'public_draft_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'accounts'; - referencedColumns: ['id']; + foreignKeyName: "public_draft_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "accounts" + referencedColumns: ["id"] }, { - foreignKeyName: 'public_draft_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'decrypted_accounts'; - referencedColumns: ['id']; + foreignKeyName: "public_draft_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_accounts" + referencedColumns: ["id"] }, { - foreignKeyName: 'public_draft_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'decrypted_dm_accounts'; - referencedColumns: ['id']; + foreignKeyName: "public_draft_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_dm_accounts" + referencedColumns: ["id"] }, - ]; - }; + ] + } list: { Row: { - account_id: string | null; - contents: Json; - created_at: string; - id: string; - idx: number; - name: string; - type: Database['public']['Enums']['list_type']; - user_id: string | null; - }; + account_id: string | null + contents: Json + created_at: string + id: string + idx: number + name: string + type: Database["public"]["Enums"]["list_type"] + user_id: string | null + } Insert: { - account_id?: string | null; - contents: Json; - created_at?: string; - id?: string; - idx: number; - name: string; - type: Database['public']['Enums']['list_type']; - user_id?: string | null; - }; + account_id?: string | null + contents: Json + created_at?: string + id?: string + idx: number + name: string + type: Database["public"]["Enums"]["list_type"] + user_id?: string | null + } Update: { - account_id?: string | null; - contents?: Json; - created_at?: string; - id?: string; - idx?: number; - name?: string; - type?: Database['public']['Enums']['list_type']; - user_id?: string | null; - }; + account_id?: string | null + contents?: Json + created_at?: string + id?: string + idx?: number + name?: string + type?: Database["public"]["Enums"]["list_type"] + user_id?: string | null + } Relationships: [ { - foreignKeyName: 'list_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'accounts'; - referencedColumns: ['id']; + foreignKeyName: "list_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "accounts" + referencedColumns: ["id"] }, { - foreignKeyName: 'list_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'decrypted_accounts'; - referencedColumns: ['id']; + foreignKeyName: "list_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_accounts" + referencedColumns: ["id"] }, { - foreignKeyName: 'list_account_id_fkey'; - columns: ['account_id']; - isOneToOne: false; - referencedRelation: 'decrypted_dm_accounts'; - referencedColumns: ['id']; + foreignKeyName: "list_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_dm_accounts" + referencedColumns: ["id"] }, { - foreignKeyName: 'public_list_user_id_fkey'; - columns: ['user_id']; - isOneToOne: false; - referencedRelation: 'profile'; - referencedColumns: ['user_id']; + foreignKeyName: "public_list_user_id_fkey" + columns: ["user_id"] + isOneToOne: false + referencedRelation: "profile" + referencedColumns: ["user_id"] }, - ]; - }; + ] + } notification_read_states: { Row: { - created_at: string; - id: string; - notification_id: string; - notification_type: string; - read_at: string; - user_id: string; - }; + created_at: string + id: string + notification_id: string + notification_type: string + read_at: string + user_id: string + } Insert: { - created_at?: string; - id?: string; - notification_id: string; - notification_type: string; - read_at: string; - user_id: string; - }; + created_at?: string + id?: string + notification_id: string + notification_type: string + read_at: string + user_id: string + } Update: { - created_at?: string; - id?: string; - notification_id?: string; - notification_type?: string; - read_at?: string; - user_id?: string; - }; - Relationships: []; - }; + created_at?: string + id?: string + notification_id?: string + notification_type?: string + read_at?: string + user_id?: string + } + Relationships: [] + } profile: { Row: { - email: string | null; - user_id: string; - }; + email: string | null + user_id: string + } + Insert: { + email?: string | null + user_id: string + } + Update: { + email?: string | null + user_id?: string + } + Relationships: [] + } + signing_audit_log: { + Row: { + account_id: string | null + action: string + actor_user_id: string | null + created_at: string | null + error_code: string | null + id: string + source: string | null + success: boolean + user_id: string | null + } + Insert: { + account_id?: string | null + action: string + actor_user_id?: string | null + created_at?: string | null + error_code?: string | null + id?: string + source?: string | null + success: boolean + user_id?: string | null + } + Update: { + account_id?: string | null + action?: string + actor_user_id?: string | null + created_at?: string | null + error_code?: string | null + id?: string + source?: string | null + success?: boolean + user_id?: string | null + } + Relationships: [ + { + foreignKeyName: "signing_audit_log_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "accounts" + referencedColumns: ["id"] + }, + { + foreignKeyName: "signing_audit_log_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_accounts" + referencedColumns: ["id"] + }, + { + foreignKeyName: "signing_audit_log_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_dm_accounts" + referencedColumns: ["id"] + }, + ] + } + signing_idempotency: { + Row: { + account_id: string + created_at: string | null + idempotency_key: string + response_error: string | null + response_hash: string | null + } Insert: { - email?: string | null; - user_id: string; - }; + account_id: string + created_at?: string | null + idempotency_key: string + response_error?: string | null + response_hash?: string | null + } Update: { - email?: string | null; - user_id?: string; - }; - Relationships: []; - }; + account_id?: string + created_at?: string | null + idempotency_key?: string + response_error?: string | null + response_hash?: string | null + } + Relationships: [ + { + foreignKeyName: "signing_idempotency_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "accounts" + referencedColumns: ["id"] + }, + { + foreignKeyName: "signing_idempotency_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_accounts" + referencedColumns: ["id"] + }, + { + foreignKeyName: "signing_idempotency_account_id_fkey" + columns: ["account_id"] + isOneToOne: false + referencedRelation: "decrypted_dm_accounts" + referencedColumns: ["id"] + }, + ] + } user_preferences: { Row: { - created_at: string; - preferences: Json; - updated_at: string; - user_id: string; - }; + created_at: string + preferences: Json + updated_at: string + user_id: string + } Insert: { - created_at?: string; - preferences?: Json; - updated_at?: string; - user_id: string; - }; + created_at?: string + preferences?: Json + updated_at?: string + user_id: string + } Update: { - created_at?: string; - preferences?: Json; - updated_at?: string; - user_id?: string; - }; - Relationships: []; - }; - }; + created_at?: string + preferences?: Json + updated_at?: string + user_id?: string + } + Relationships: [] + } + } Views: { decrypted_accounts: { Row: { - created_at: string | null; - data: Json | null; - decrypted_farcaster_api_key: string | null; - decrypted_private_key: string | null; - display_order: number | null; - farcaster_api_key: string | null; - id: string | null; - name: string | null; - platform: string | null; - platform_account_id: string | null; - private_key: string | null; - public_key: string | null; - status: string | null; - user_id: string | null; - }; + created_at: string | null + data: Json | null + decrypted_farcaster_api_key: string | null + decrypted_private_key: string | null + display_order: number | null + farcaster_api_key: string | null + id: string | null + name: string | null + platform: string | null + platform_account_id: string | null + private_key: string | null + public_key: string | null + status: string | null + user_id: string | null + } Insert: { - created_at?: string | null; - data?: Json | null; - decrypted_farcaster_api_key?: never; - decrypted_private_key?: never; - display_order?: number | null; - farcaster_api_key?: string | null; - id?: string | null; - name?: string | null; - platform?: string | null; - platform_account_id?: string | null; - private_key?: string | null; - public_key?: string | null; - status?: string | null; - user_id?: string | null; - }; + created_at?: string | null + data?: Json | null + decrypted_farcaster_api_key?: never + decrypted_private_key?: never + display_order?: number | null + farcaster_api_key?: string | null + id?: string | null + name?: string | null + platform?: string | null + platform_account_id?: string | null + private_key?: string | null + public_key?: string | null + status?: string | null + user_id?: string | null + } Update: { - created_at?: string | null; - data?: Json | null; - decrypted_farcaster_api_key?: never; - decrypted_private_key?: never; - display_order?: number | null; - farcaster_api_key?: string | null; - id?: string | null; - name?: string | null; - platform?: string | null; - platform_account_id?: string | null; - private_key?: string | null; - public_key?: string | null; - status?: string | null; - user_id?: string | null; - }; - Relationships: []; - }; + created_at?: string | null + data?: Json | null + decrypted_farcaster_api_key?: never + decrypted_private_key?: never + display_order?: number | null + farcaster_api_key?: string | null + id?: string | null + name?: string | null + platform?: string | null + platform_account_id?: string | null + private_key?: string | null + public_key?: string | null + status?: string | null + user_id?: string | null + } + Relationships: [] + } decrypted_dm_accounts: { Row: { - decrypted_farcaster_api_key: string | null; - id: string | null; - platform_account_id: string | null; - user_id: string | null; - }; + decrypted_farcaster_api_key: string | null + id: string | null + platform_account_id: string | null + user_id: string | null + } Insert: { - decrypted_farcaster_api_key?: never; - id?: string | null; - platform_account_id?: string | null; - user_id?: string | null; - }; + decrypted_farcaster_api_key?: never + id?: string | null + platform_account_id?: string | null + user_id?: string | null + } Update: { - decrypted_farcaster_api_key?: never; - id?: string | null; - platform_account_id?: string | null; - user_id?: string | null; - }; - Relationships: []; - }; - }; + decrypted_farcaster_api_key?: never + id?: string | null + platform_account_id?: string | null + user_id?: string | null + } + Relationships: [] + } + } Functions: { + authorize_auto_interaction: { + Args: { p_list_id: string } + Returns: { + owner_user_id: string + source_account_id: string + }[] + } + authorize_draft_publish: { + Args: { p_draft_id: string } + Returns: { + account_id: string + owner_user_id: string + }[] + } decrypted_account: { - Args: { account_id: string }; + Args: { account_id: string } Returns: { - created_at: string; - data: Json; - decrypted_private_key: string; - id: string; - name: string; - platform: string; - platform_account_id: string; - private_key: string; - public_key: string; - status: string; - user_id: string; - }[]; - }; + created_at: string + data: Json + decrypted_private_key: string + id: string + name: string + platform: string + platform_account_id: string + private_key: string + public_key: string + status: string + user_id: string + }[] + } is_account_of_user: { - Args: { _account_id: string; _user_id: string }; - Returns: boolean; - }; - trigger_process_auto_interactions: { Args: never; Returns: undefined }; - }; + Args: { _account_id: string; _user_id: string } + Returns: boolean + } + trigger_process_auto_interactions: { Args: never; Returns: undefined } + } Enums: { - list_type: 'fids' | 'search' | 'auto_interaction'; - }; + list_type: "fids" | "search" | "auto_interaction" + } CompositeTypes: { - [_ in never]: never; - }; - }; -}; + [_ in never]: never + } + } +} -type DatabaseWithoutInternals = Omit; +type DatabaseWithoutInternals = Omit -type DefaultSchema = DatabaseWithoutInternals[Extract]; +type DefaultSchema = DatabaseWithoutInternals[Extract] export type Tables< DefaultSchemaTableNameOrOptions extends - | keyof (DefaultSchema['Tables'] & DefaultSchema['Views']) + | keyof (DefaultSchema["Tables"] & DefaultSchema["Views"]) | { schema: keyof DatabaseWithoutInternals }, TableName extends DefaultSchemaTableNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? keyof (DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions['schema']]['Tables'] & - DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions['schema']]['Views']) + ? keyof (DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions["schema"]]["Tables"] & + DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions["schema"]]["Views"]) : never = never, > = DefaultSchemaTableNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? (DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions['schema']]['Tables'] & - DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions['schema']]['Views'])[TableName] extends { - Row: infer R; + ? (DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions["schema"]]["Tables"] & + DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions["schema"]]["Views"])[TableName] extends { + Row: infer R } ? R : never - : DefaultSchemaTableNameOrOptions extends keyof (DefaultSchema['Tables'] & DefaultSchema['Views']) - ? (DefaultSchema['Tables'] & DefaultSchema['Views'])[DefaultSchemaTableNameOrOptions] extends { - Row: infer R; + : DefaultSchemaTableNameOrOptions extends keyof (DefaultSchema["Tables"] & + DefaultSchema["Views"]) + ? (DefaultSchema["Tables"] & + DefaultSchema["Views"])[DefaultSchemaTableNameOrOptions] extends { + Row: infer R } ? R : never - : never; + : never export type TablesInsert< - DefaultSchemaTableNameOrOptions extends keyof DefaultSchema['Tables'] | { schema: keyof DatabaseWithoutInternals }, + DefaultSchemaTableNameOrOptions extends + | keyof DefaultSchema["Tables"] + | { schema: keyof DatabaseWithoutInternals }, TableName extends DefaultSchemaTableNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? keyof DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions['schema']]['Tables'] + ? keyof DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions["schema"]]["Tables"] : never = never, > = DefaultSchemaTableNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions['schema']]['Tables'][TableName] extends { - Insert: infer I; + ? DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions["schema"]]["Tables"][TableName] extends { + Insert: infer I } ? I : never - : DefaultSchemaTableNameOrOptions extends keyof DefaultSchema['Tables'] - ? DefaultSchema['Tables'][DefaultSchemaTableNameOrOptions] extends { - Insert: infer I; + : DefaultSchemaTableNameOrOptions extends keyof DefaultSchema["Tables"] + ? DefaultSchema["Tables"][DefaultSchemaTableNameOrOptions] extends { + Insert: infer I } ? I : never - : never; + : never export type TablesUpdate< - DefaultSchemaTableNameOrOptions extends keyof DefaultSchema['Tables'] | { schema: keyof DatabaseWithoutInternals }, + DefaultSchemaTableNameOrOptions extends + | keyof DefaultSchema["Tables"] + | { schema: keyof DatabaseWithoutInternals }, TableName extends DefaultSchemaTableNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? keyof DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions['schema']]['Tables'] + ? keyof DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions["schema"]]["Tables"] : never = never, > = DefaultSchemaTableNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions['schema']]['Tables'][TableName] extends { - Update: infer U; + ? DatabaseWithoutInternals[DefaultSchemaTableNameOrOptions["schema"]]["Tables"][TableName] extends { + Update: infer U } ? U : never - : DefaultSchemaTableNameOrOptions extends keyof DefaultSchema['Tables'] - ? DefaultSchema['Tables'][DefaultSchemaTableNameOrOptions] extends { - Update: infer U; + : DefaultSchemaTableNameOrOptions extends keyof DefaultSchema["Tables"] + ? DefaultSchema["Tables"][DefaultSchemaTableNameOrOptions] extends { + Update: infer U } ? U : never - : never; + : never export type Enums< - DefaultSchemaEnumNameOrOptions extends keyof DefaultSchema['Enums'] | { schema: keyof DatabaseWithoutInternals }, + DefaultSchemaEnumNameOrOptions extends + | keyof DefaultSchema["Enums"] + | { schema: keyof DatabaseWithoutInternals }, EnumName extends DefaultSchemaEnumNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? keyof DatabaseWithoutInternals[DefaultSchemaEnumNameOrOptions['schema']]['Enums'] + ? keyof DatabaseWithoutInternals[DefaultSchemaEnumNameOrOptions["schema"]]["Enums"] : never = never, > = DefaultSchemaEnumNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? DatabaseWithoutInternals[DefaultSchemaEnumNameOrOptions['schema']]['Enums'][EnumName] - : DefaultSchemaEnumNameOrOptions extends keyof DefaultSchema['Enums'] - ? DefaultSchema['Enums'][DefaultSchemaEnumNameOrOptions] - : never; + ? DatabaseWithoutInternals[DefaultSchemaEnumNameOrOptions["schema"]]["Enums"][EnumName] + : DefaultSchemaEnumNameOrOptions extends keyof DefaultSchema["Enums"] + ? DefaultSchema["Enums"][DefaultSchemaEnumNameOrOptions] + : never export type CompositeTypes< PublicCompositeTypeNameOrOptions extends - | keyof DefaultSchema['CompositeTypes'] + | keyof DefaultSchema["CompositeTypes"] | { schema: keyof DatabaseWithoutInternals }, CompositeTypeName extends PublicCompositeTypeNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? keyof DatabaseWithoutInternals[PublicCompositeTypeNameOrOptions['schema']]['CompositeTypes'] + ? keyof DatabaseWithoutInternals[PublicCompositeTypeNameOrOptions["schema"]]["CompositeTypes"] : never = never, > = PublicCompositeTypeNameOrOptions extends { - schema: keyof DatabaseWithoutInternals; + schema: keyof DatabaseWithoutInternals } - ? DatabaseWithoutInternals[PublicCompositeTypeNameOrOptions['schema']]['CompositeTypes'][CompositeTypeName] - : PublicCompositeTypeNameOrOptions extends keyof DefaultSchema['CompositeTypes'] - ? DefaultSchema['CompositeTypes'][PublicCompositeTypeNameOrOptions] - : never; + ? DatabaseWithoutInternals[PublicCompositeTypeNameOrOptions["schema"]]["CompositeTypes"][CompositeTypeName] + : PublicCompositeTypeNameOrOptions extends keyof DefaultSchema["CompositeTypes"] + ? DefaultSchema["CompositeTypes"][PublicCompositeTypeNameOrOptions] + : never export const Constants = { public: { Enums: { - list_type: ['fids', 'search', 'auto_interaction'], + list_type: ["fids", "search", "auto_interaction"], }, }, -} as const; - -// Convenience type aliases for common table row types -export type List = Database['public']['Tables']['list']['Row']; -export type Account = Database['public']['Tables']['accounts']['Row']; +} as const From bb68ce1cd566fc7e2b2bf5f19fd7f295d5d0b2e7 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 12:54:26 +0200 Subject: [PATCH 08/18] cron: rename SUPABASE_JWT_SECRET -> LEGACY_JWT_SECRET MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Supabase reserves the SUPABASE_* env-var prefix for auto-injected vars and the CLI rejects `supabase secrets set SUPABASE_JWT_SECRET=...` with "Env name cannot start with SUPABASE_, skipping". Earlier checklist text (now corrected) assumed SUPABASE_JWT_SECRET would be auto-provided — that is not the case for projects migrated to JWT signing keys, where SUPABASE_SECRET_KEYS is auto-provided in JWKS form instead. Both cron functions (publish-cast-from-db, process-auto-interactions) now read LEGACY_JWT_SECRET. Operator must set it once via: supabase secrets set LEGACY_JWT_SECRET= The value is the project's "Legacy JWT secret" from Dashboard -> Project Settings -> JWT Keys (the same secret that signs the legacy service_role/anon JWTs). Caught while smoke-testing the deployed cron flow: a freshly scheduled draft was being marked 'failed' because mintUserJwt threw on the missing env var, and publishDraft's catch block flipped status -> failed. --- .../functions/process-auto-interactions/index.ts | 14 ++++++++------ supabase/functions/publish-cast-from-db/index.ts | 16 ++++++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/supabase/functions/process-auto-interactions/index.ts b/supabase/functions/process-auto-interactions/index.ts index 7e3cdb91..4c0d76d2 100644 --- a/supabase/functions/process-auto-interactions/index.ts +++ b/supabase/functions/process-auto-interactions/index.ts @@ -33,19 +33,21 @@ const getServiceRoleKey = () => { * downstream RLS evaluates `auth.uid() = sub`. `scope` is audit metadata * (e.g. `{ account_id, list_id }`); `source` tags the cron origin. * - * NOTE: Requires SUPABASE_JWT_SECRET (symmetric HS256). If the project is - * migrated to asymmetric (JWKs) JWT signing, this path must switch to signing - * with the project's private key and the signer must verify via JWKs. Operator: - * confirm via Dashboard -> Project Settings -> JWT Signing before deploy. + * NOTE: Requires LEGACY_JWT_SECRET (symmetric HS256) — set via + * `supabase secrets set LEGACY_JWT_SECRET=`. The Supabase CLI reserves + * the SUPABASE_* prefix, so we use a non-prefixed name. The value is the + * project's "Legacy JWT secret" from Dashboard -> Project Settings -> JWT Keys. + * Once the project moves fully off the legacy key, switch to ES256 signing + * with SUPABASE_SECRET_KEYS. */ async function mintUserJwt( sub: string, scope: Record, source: string ): Promise { - const secret = Deno.env.get('SUPABASE_JWT_SECRET'); + const secret = Deno.env.get('LEGACY_JWT_SECRET'); if (!secret) { - throw new Error('SUPABASE_JWT_SECRET missing'); + throw new Error('LEGACY_JWT_SECRET missing'); } const key = await crypto.subtle.importKey( 'raw', diff --git a/supabase/functions/publish-cast-from-db/index.ts b/supabase/functions/publish-cast-from-db/index.ts index 275e2a9a..0b9c6211 100644 --- a/supabase/functions/publish-cast-from-db/index.ts +++ b/supabase/functions/publish-cast-from-db/index.ts @@ -140,19 +140,23 @@ function buildSignerPayload(draftData: any) { * so RLS evaluates `auth.uid() = sub`. `scope` is advisory metadata surfaced to * audit logs (e.g. `{ account_id, draft_id }`); `source` tags the caller cron. * - * NOTE: Requires SUPABASE_JWT_SECRET (symmetric HS256). If the project is - * migrated to asymmetric (JWKs) JWT signing, this path must switch to signing - * with the project's private key and the signer must verify via JWKs. Operator: - * confirm via Dashboard -> Project Settings -> JWT Signing before deploy. + * NOTE: Requires LEGACY_JWT_SECRET (symmetric HS256) — set via + * `supabase secrets set LEGACY_JWT_SECRET=`. The Supabase CLI reserves + * the SUPABASE_* prefix, so we deliberately use a non-prefixed name. The value + * is the project's "Legacy JWT secret" from Dashboard -> Project Settings -> + * JWT Keys (used to verify pre-migration HS256 tokens). Once the project moves + * fully off the legacy key, this path must switch to signing with the project's + * ES256 private key (from SUPABASE_SECRET_KEYS) and the signer must verify via + * JWKs. */ async function mintUserJwt( sub: string, scope: Record, source: string ): Promise { - const secret = Deno.env.get('SUPABASE_JWT_SECRET'); + const secret = Deno.env.get('LEGACY_JWT_SECRET'); if (!secret) { - throw new Error('SUPABASE_JWT_SECRET missing'); + throw new Error('LEGACY_JWT_SECRET missing'); } const key = await crypto.subtle.importKey( 'raw', From efe283ad8932ab9a6db72c249c23614b13aa00a3 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 15:14:26 +0200 Subject: [PATCH 09/18] cron: rename minted-JWT 'scope' claim to 'cron_meta' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gotrue's AccessTokenClaims Go struct expects 'scope' to be a space-delimited string per OAuth 2.0 (RFC 6749 §3.3). We were passing an object ({ account_id, draft_id }), causing the signer's supabase.auth.getUser() to reject every cron-minted token with: invalid JWT: unable to parse or verify signature, token is malformed: could not JSON decode claim: json: cannot unmarshal object into Go struct field AccessTokenClaims.scope of type string Renaming our custom claim to 'cron_meta' avoids the reserved name and keeps the bookkeeping object intact for audit/debugging purposes. Caught while smoke-testing the deployed cron flow against prod after LEGACY_JWT_SECRET was correctly set. Diagnostic console.log was added during debugging; removed in this same commit since the actual error turned out to be claim shape, not secret value. --- supabase/functions/process-auto-interactions/index.ts | 7 ++++++- supabase/functions/publish-cast-from-db/index.ts | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/supabase/functions/process-auto-interactions/index.ts b/supabase/functions/process-auto-interactions/index.ts index 4c0d76d2..d263bfdf 100644 --- a/supabase/functions/process-auto-interactions/index.ts +++ b/supabase/functions/process-auto-interactions/index.ts @@ -56,6 +56,11 @@ async function mintUserJwt( false, ['sign'] ); + // NOTE: do not put a `scope` claim in this JWT. gotrue's AccessTokenClaims + // expects `scope` to be a space-delimited string (OAuth 2.0 standard) and will + // reject the token with "json: cannot unmarshal object into Go struct field + // AccessTokenClaims.scope of type string" if we pass an object. We use + // `cron_meta` for our own bookkeeping instead. return await create( { alg: 'HS256', typ: 'JWT' }, { @@ -63,7 +68,7 @@ async function mintUserJwt( role: 'authenticated', aud: 'authenticated', source, - scope, + cron_meta: scope, iat: getNumericDate(0), exp: getNumericDate(60), }, diff --git a/supabase/functions/publish-cast-from-db/index.ts b/supabase/functions/publish-cast-from-db/index.ts index 0b9c6211..a2be263a 100644 --- a/supabase/functions/publish-cast-from-db/index.ts +++ b/supabase/functions/publish-cast-from-db/index.ts @@ -165,6 +165,11 @@ async function mintUserJwt( false, ['sign'] ); + // NOTE: do not put a `scope` claim in this JWT. gotrue's AccessTokenClaims + // expects `scope` to be a space-delimited string (OAuth 2.0 standard) and will + // reject the token with "json: cannot unmarshal object into Go struct field + // AccessTokenClaims.scope of type string" if we pass an object. We use + // `cron_meta` for our own bookkeeping instead. return await create( { alg: 'HS256', typ: 'JWT' }, { @@ -172,7 +177,7 @@ async function mintUserJwt( role: 'authenticated', aud: 'authenticated', source, - scope, + cron_meta: scope, iat: getNumericDate(0), exp: getNumericDate(60), }, From 1ed67a4ff8cd868bc563dbeb12ec389194d9c584 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 21:35:33 +0200 Subject: [PATCH 10/18] cron: switch JWT mint from HS256 (legacy) to ES256 (asymmetric) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After exhausting debug cycles on the legacy HS256 secret value mismatch, pivoted to ES256 per the project's already-active JWT signing keys infrastructure. ES256 is the documented future-proof path post-migration: https://supabase.com/docs/guides/auth/signing-keys Setup performed via Supabase Management API: 1. Generated a dedicated ES256 (P-256) keypair locally (Node crypto). 2. Imported the private JWK as a standby signing key on the project, getting kid `5789fc0d-8d87-467f-bc37-97d8fdb20df6`. 3. Verified the public side appears in https://.supabase.co/auth/v1/.well-known/jwks.json 4. Stored the private JWK as the function secret `CRON_SIGNING_PRIVATE_JWK`. mintUserJwt now: - Reads `CRON_SIGNING_PRIVATE_JWK`, parses to JWK, imports for ECDSA P-256. - Signs JWT with `alg: ES256` and the JWK's `kid` in the header. - Drops the LEGACY_JWT_SECRET dependency; that secret can be removed from the project's edge-function env (operator action). Signer's `supabase.auth.getUser()` validates automatically via JWKs lookup by kid — no signer-code change needed. The new key is `standby` so it verifies but does not displace the project's current ES256 Auth signing key (kid `b6908320-...`). Verified end-to-end: scheduled draft 18a46cef-... published successfully at 2026-05-03 19:30 UTC, audit log shows source='cron:publish' with success=true and actor_user_id populated. Followups (separate change): - Once stable for ~1 week, remove LEGACY_JWT_SECRET function secret. - Operator can then safely revoke the legacy HS256 signing key (`140a997e-...`) after migrating service_role/anon to sb_secret_*/sb_publishable_*. --- .../process-auto-interactions/index.ts | 41 ++++++++--------- .../functions/publish-cast-from-db/index.ts | 45 +++++++++---------- 2 files changed, 40 insertions(+), 46 deletions(-) diff --git a/supabase/functions/process-auto-interactions/index.ts b/supabase/functions/process-auto-interactions/index.ts index d263bfdf..4c619215 100644 --- a/supabase/functions/process-auto-interactions/index.ts +++ b/supabase/functions/process-auto-interactions/index.ts @@ -28,41 +28,38 @@ const getServiceRoleKey = () => { }; /** - * Mint a short-lived HS256 JWT that the signer edge function validates via - * `supabase.auth.getUser()`. Matches Supabase's expected user JWT shape so that - * downstream RLS evaluates `auth.uid() = sub`. `scope` is audit metadata - * (e.g. `{ account_id, list_id }`); `source` tags the cron origin. + * Mint a short-lived ES256 JWT that the signer edge function accepts via + * `supabase.auth.getUser()`. The claim shape matches Supabase's expected + * user JWT so RLS resolves `auth.uid() = sub`. `source` tags the caller cron; + * `cron_meta` carries our own bookkeeping (account_id / list_id) — `scope` + * is reserved by gotrue's AccessTokenClaims (OAuth 2.0 string). * - * NOTE: Requires LEGACY_JWT_SECRET (symmetric HS256) — set via - * `supabase secrets set LEGACY_JWT_SECRET=`. The Supabase CLI reserves - * the SUPABASE_* prefix, so we use a non-prefixed name. The value is the - * project's "Legacy JWT secret" from Dashboard -> Project Settings -> JWT Keys. - * Once the project moves fully off the legacy key, switch to ES256 signing - * with SUPABASE_SECRET_KEYS. + * Requires CRON_SIGNING_PRIVATE_JWK env: a JSON-serialized ES256 (P-256) + * private JWK whose public side is registered in the project's signing-keys + * (status >= standby) so it appears in `/auth/v1/.well-known/jwks.json`. */ async function mintUserJwt( sub: string, scope: Record, source: string ): Promise { - const secret = Deno.env.get('LEGACY_JWT_SECRET'); - if (!secret) { - throw new Error('LEGACY_JWT_SECRET missing'); + const privateJwkRaw = Deno.env.get('CRON_SIGNING_PRIVATE_JWK'); + if (!privateJwkRaw) { + throw new Error('CRON_SIGNING_PRIVATE_JWK missing'); + } + const jwk = JSON.parse(privateJwkRaw) as JsonWebKey & { kid?: string }; + if (!jwk.kid) { + throw new Error('CRON_SIGNING_PRIVATE_JWK missing kid'); } const key = await crypto.subtle.importKey( - 'raw', - new TextEncoder().encode(secret), - { name: 'HMAC', hash: 'SHA-256' }, + 'jwk', + jwk, + { name: 'ECDSA', namedCurve: 'P-256' }, false, ['sign'] ); - // NOTE: do not put a `scope` claim in this JWT. gotrue's AccessTokenClaims - // expects `scope` to be a space-delimited string (OAuth 2.0 standard) and will - // reject the token with "json: cannot unmarshal object into Go struct field - // AccessTokenClaims.scope of type string" if we pass an object. We use - // `cron_meta` for our own bookkeeping instead. return await create( - { alg: 'HS256', typ: 'JWT' }, + { alg: 'ES256', typ: 'JWT', kid: jwk.kid }, { sub, role: 'authenticated', diff --git a/supabase/functions/publish-cast-from-db/index.ts b/supabase/functions/publish-cast-from-db/index.ts index a2be263a..115580cd 100644 --- a/supabase/functions/publish-cast-from-db/index.ts +++ b/supabase/functions/publish-cast-from-db/index.ts @@ -135,43 +135,40 @@ function buildSignerPayload(draftData: any) { } /** - * Mint a short-lived HS256 JWT that the signer edge function will accept via - * `supabase.auth.getUser()`. Claim shape matches Supabase's expected user JWT - * so RLS evaluates `auth.uid() = sub`. `scope` is advisory metadata surfaced to - * audit logs (e.g. `{ account_id, draft_id }`); `source` tags the caller cron. + * Mint a short-lived ES256 JWT that the signer edge function accepts via + * `supabase.auth.getUser()`. The claim shape matches Supabase's expected + * user JWT so RLS resolves `auth.uid() = sub`. `source` tags the caller cron; + * `cron_meta` carries our own bookkeeping (account_id / draft_id) — `scope` + * is reserved by gotrue's AccessTokenClaims (OAuth 2.0 string). * - * NOTE: Requires LEGACY_JWT_SECRET (symmetric HS256) — set via - * `supabase secrets set LEGACY_JWT_SECRET=`. The Supabase CLI reserves - * the SUPABASE_* prefix, so we deliberately use a non-prefixed name. The value - * is the project's "Legacy JWT secret" from Dashboard -> Project Settings -> - * JWT Keys (used to verify pre-migration HS256 tokens). Once the project moves - * fully off the legacy key, this path must switch to signing with the project's - * ES256 private key (from SUPABASE_SECRET_KEYS) and the signer must verify via - * JWKs. + * Requires CRON_SIGNING_PRIVATE_JWK env: a JSON-serialized ES256 (P-256) + * private JWK whose public side is registered in the project's signing-keys + * (status >= standby) so it appears in `/auth/v1/.well-known/jwks.json`. The + * `kid` from the JWK is set in the JWT header so gotrue picks the matching + * verifier. */ async function mintUserJwt( sub: string, scope: Record, source: string ): Promise { - const secret = Deno.env.get('LEGACY_JWT_SECRET'); - if (!secret) { - throw new Error('LEGACY_JWT_SECRET missing'); + const privateJwkRaw = Deno.env.get('CRON_SIGNING_PRIVATE_JWK'); + if (!privateJwkRaw) { + throw new Error('CRON_SIGNING_PRIVATE_JWK missing'); + } + const jwk = JSON.parse(privateJwkRaw) as JsonWebKey & { kid?: string }; + if (!jwk.kid) { + throw new Error('CRON_SIGNING_PRIVATE_JWK missing kid'); } const key = await crypto.subtle.importKey( - 'raw', - new TextEncoder().encode(secret), - { name: 'HMAC', hash: 'SHA-256' }, + 'jwk', + jwk, + { name: 'ECDSA', namedCurve: 'P-256' }, false, ['sign'] ); - // NOTE: do not put a `scope` claim in this JWT. gotrue's AccessTokenClaims - // expects `scope` to be a space-delimited string (OAuth 2.0 standard) and will - // reject the token with "json: cannot unmarshal object into Go struct field - // AccessTokenClaims.scope of type string" if we pass an object. We use - // `cron_meta` for our own bookkeeping instead. return await create( - { alg: 'HS256', typ: 'JWT' }, + { alg: 'ES256', typ: 'JWT', kid: jwk.kid }, { sub, role: 'authenticated', From 98c0ff6eb69169f4c3c4d73046a2f963d16cb3c9 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 21:49:48 +0200 Subject: [PATCH 11/18] cleanup: address codex pre-merge review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three small fixes after codex flagged stale comments + a small info leak: - farcaster-signer/lib/auth.ts: doc-string said cron mints HS256, but it now mints ES256. Updated to reflect the post-debug state. - mcp-server/lib/auth.ts: removed `console.log('[auth] Received auth header:', authHeader.slice(0, 20) + '...')`. The 20-char slice exposed the JWT header + first ~14 bytes of payload, contradicting our scope-cleanup goal (mcp-server/index.ts:124-134 already strips this from the response body). Also dropped a few adjacent debug console.log lines that leaked token length / user-id / scopes. - publish-cast-from-db: replaced the trailing demo curl comment that included a hardcoded `eyJ...` JWT (a public Supabase demo token, but a future log dump could match the JWT-shape regex). Updated to use `$SUPABASE_ANON_KEY` placeholder. - Renamed `mintUserJwt`'s param `scope` → `cronMeta` in both cron files to match the JWT claim name (which we changed in commit efe283ad to avoid OAuth's reserved `scope` claim). No functional change; functions redeployed for cosmetic + log-leak fix. --- supabase/functions/farcaster-signer/lib/auth.ts | 5 +++-- supabase/functions/mcp-server/lib/auth.ts | 10 ---------- .../process-auto-interactions/index.ts | 4 ++-- .../functions/publish-cast-from-db/index.ts | 17 ++++++++--------- 4 files changed, 13 insertions(+), 23 deletions(-) diff --git a/supabase/functions/farcaster-signer/lib/auth.ts b/supabase/functions/farcaster-signer/lib/auth.ts index 8480912b..c2039ab0 100644 --- a/supabase/functions/farcaster-signer/lib/auth.ts +++ b/supabase/functions/farcaster-signer/lib/auth.ts @@ -29,8 +29,9 @@ function decodeJwtPayload(token: string): Record | null { * Creates a Supabase client with the user's JWT token (not service role) * to ensure RLS policies are enforced for all subsequent queries. * - * Cron callers mint a short-lived HS256 JWT with `sub = ` and - * present it here — this path validates it identically to a human-user JWT. + * Cron callers mint a short-lived ES256 JWT with `sub = ` and + * present it here — this path validates it identically to a human-user JWT + * (gotrue verifies via JWKs lookup by `kid`). * * @param authHeader - The Authorization header value (e.g., "Bearer ") * @returns AuthResult containing userId and Supabase client diff --git a/supabase/functions/mcp-server/lib/auth.ts b/supabase/functions/mcp-server/lib/auth.ts index 909435b1..6de2e07f 100644 --- a/supabase/functions/mcp-server/lib/auth.ts +++ b/supabase/functions/mcp-server/lib/auth.ts @@ -58,21 +58,15 @@ function extractScopesFromJwt(token: string, fallback: string[]): string[] { } export async function authenticateRequest(authHeader: string | null): Promise { - // Debug logging for auth issues - console.log('[auth] Received auth header:', authHeader ? `${authHeader.slice(0, 20)}...` : 'null'); - if (!authHeader) { throw new Error('Missing Authorization header'); } const token = authHeader.replace(/^Bearer\s+/i, ''); if (!token || token === authHeader) { - console.log('[auth] Invalid format - token extraction failed'); throw new Error('Invalid Authorization header format'); } - console.log('[auth] Token extracted, length:', token.length); - const supabaseUrl = Deno.env.get('SUPABASE_URL') || Deno.env.get('API_URL') || Deno.env.get('SUPABASE_API_URL'); const supabaseAnonKey = Deno.env.get('SUPABASE_ANON_KEY') || Deno.env.get('ANON_KEY'); @@ -94,12 +88,10 @@ export async function authenticateRequest(authHeader: string | null): Promise { */ async function mintUserJwt( sub: string, - scope: Record, + cronMeta: Record, source: string ): Promise { const privateJwkRaw = Deno.env.get('CRON_SIGNING_PRIVATE_JWK'); @@ -65,7 +65,7 @@ async function mintUserJwt( role: 'authenticated', aud: 'authenticated', source, - cron_meta: scope, + cron_meta: cronMeta, iat: getNumericDate(0), exp: getNumericDate(60), }, diff --git a/supabase/functions/publish-cast-from-db/index.ts b/supabase/functions/publish-cast-from-db/index.ts index 115580cd..215c9443 100644 --- a/supabase/functions/publish-cast-from-db/index.ts +++ b/supabase/functions/publish-cast-from-db/index.ts @@ -149,7 +149,7 @@ function buildSignerPayload(draftData: any) { */ async function mintUserJwt( sub: string, - scope: Record, + cronMeta: Record, source: string ): Promise { const privateJwkRaw = Deno.env.get('CRON_SIGNING_PRIVATE_JWK'); @@ -174,7 +174,7 @@ async function mintUserJwt( role: 'authenticated', aud: 'authenticated', source, - cron_meta: scope, + cron_meta: cronMeta, iat: getNumericDate(0), exp: getNumericDate(60), }, @@ -559,10 +559,9 @@ Deno.serve(async (req) => { }); }); -// # run -// supabase functions serve --debug -// # and then -// curl --request POST 'http://localhost:54321/functions/v1/publish-cast-from-db' \ -// --header 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0' \ -// --header 'Content-Type: application/json' \ -// --data '{ "name":"Functions" }' +// Local invocation: +// supabase functions serve --debug +// curl -X POST http://localhost:54321/functions/v1/publish-cast-from-db \ +// -H "Authorization: Bearer $SUPABASE_ANON_KEY" \ +// -H "Content-Type: application/json" \ +// --data '{}' From ac36212bbd869428a525b3e9d07c501a48c5e5e3 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 22:06:49 +0200 Subject: [PATCH 12/18] chore: ignore Claude Code local state directory `.claude/` contains per-machine settings (MCP permission allowlist, runtime PID lock files) that should never enter the repo. Adding it to .gitignore so accidentally-untracked files there won't show up in `git status` for any contributor running Claude Code. --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index f3291f8d..3b94e0a3 100644 --- a/.gitignore +++ b/.gitignore @@ -116,3 +116,6 @@ dist/ # Benchmark results scripts/benchmark-results/ + +# Claude Code local state (per-machine permissions, runtime locks) +.claude/ From 30af71ad76c81fcb41305735cca9f5360b019b65 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 22:23:26 +0200 Subject: [PATCH 13/18] ci: replace removed `--force` flag with `--yes` for `supabase db reset` The latest Supabase CLI (v2.90+) dropped the legacy `--force` flag. Job 74138951854 failed with `unknown flag: --force` because the runner pulls `supabase/setup-cli@v1` with `version: latest`. The equivalent non-interactive flag is `--yes`, which answers Y to all prompts. Reference (CI failure log): Run supabase db reset --force unknown flag: --force ##[error]Process completed with exit code 1. --- .github/workflows/signing-service-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/signing-service-tests.yml b/.github/workflows/signing-service-tests.yml index 7602a8ce..4c1699c3 100644 --- a/.github/workflows/signing-service-tests.yml +++ b/.github/workflows/signing-service-tests.yml @@ -29,7 +29,7 @@ jobs: run: supabase start - name: Reset database - run: supabase db reset --force + run: supabase db reset --yes - name: Apply pgsodium grants run: | From d2a8900c473e866fe6da7c3470a5b773d9445e48 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 22:37:10 +0200 Subject: [PATCH 14/18] ci: exclude unused supabase services to avoid inbucket port race After fixing the --force flag, the next CI run failed in `supabase start` with: failed to start docker container "supabase_inbucket_herocast": failed to bind host port for 0.0.0.0:54324:172.18.0.5:8025/tcp: address already in use `supabase start` retried after an initial container hiccup and the inbucket port was still bound from the previous attempt, killing the whole stack startup. Signer tests only exercise db + auth + edge-runtime. Excluding the unused services (inbucket/studio/storage-api/realtime/imgproxy/etc) removes the failure surface and trims runner startup time. --- .github/workflows/signing-service-tests.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/signing-service-tests.yml b/.github/workflows/signing-service-tests.yml index 4c1699c3..c7a35949 100644 --- a/.github/workflows/signing-service-tests.yml +++ b/.github/workflows/signing-service-tests.yml @@ -26,7 +26,10 @@ jobs: deno-version: v1.x - name: Start Supabase - run: supabase start + # Exclude services the signer tests don't need. Avoids the inbucket + # port-bind race (54324) that intermittently fails on GitHub runners + # when supabase start retries after a container startup hiccup. + run: supabase start --exclude inbucket,studio,storage-api,realtime,imgproxy,pgadmin-schema-diff,migra,pg-prove,pgbouncer,vector,supavisor - name: Reset database run: supabase db reset --yes From 750faf29fc6a9dbbf84ac0851092ad913d859c4e Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 22:43:24 +0200 Subject: [PATCH 15/18] ci: seed pgsodium key via create_key() so encrypt actually works The signing-service-tests workflow had never passed (every run since the workflow was added has failed). Root cause for the latest seed.ts failure: Failed to create account ...: {"code":"P0002","message":"query returned no rows"} The bare `INSERT INTO pgsodium.key (id, key_type, key_id, key_context)` in `pgsodium_seed_key.sql` populated the row but pgsodium's crypto_aead_det_encrypt could not derive `raw_key` from the bare metadata, so the account-encrypt trigger raised P0002 on every INSERT. Switching to `pgsodium.create_key('aead-det', 'herocast_encryption_key')` generates a properly initialized key (raw_key + nonce + sequence). We then UPDATE the auto-generated id to the deterministic UUID referenced by the trigger functions (dcd0dca7-...), preserving the existing trigger contracts. Idempotent: returns early if the deterministic UUID already exists; otherwise cleans up any prior partial seed, creates the key, reassigns. --- supabase/setup/pgsodium_seed_key.sql | 30 ++++++++++++++++++---------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/supabase/setup/pgsodium_seed_key.sql b/supabase/setup/pgsodium_seed_key.sql index 66e89d8f..7fbc4d78 100644 --- a/supabase/setup/pgsodium_seed_key.sql +++ b/supabase/setup/pgsodium_seed_key.sql @@ -1,17 +1,25 @@ --- Ensures the deterministic key referenced by triggers exists locally. +-- Ensures the deterministic key referenced by the accounts encrypt triggers +-- exists locally / in CI. Uses pgsodium.create_key() so that raw_key is +-- generated correctly and the encrypt path works (a bare INSERT into +-- pgsodium.key leaves raw_key derivation broken, surfacing as +-- "query returned no rows" P0002 from pgsodium.crypto_aead_det_encrypt). DO $$ DECLARE target_uuid constant uuid := 'dcd0dca7-c03a-40c5-b348-fefb87be2845'; + generated_uuid uuid; BEGIN - IF NOT EXISTS ( - SELECT 1 FROM pgsodium.key WHERE id = target_uuid - ) THEN - INSERT INTO pgsodium.key (id, key_type, key_id, key_context) - VALUES ( - target_uuid, - 'aead-det', - nextval('pgsodium.key_key_id_seq'), - decode('7067736f6469756d', 'hex') - ); + IF EXISTS (SELECT 1 FROM pgsodium.key WHERE id = target_uuid) THEN + RETURN; END IF; + + -- Drop any previous attempt that left a partial key under our chosen name. + DELETE FROM pgsodium.key WHERE name = 'herocast_encryption_key'; + + -- Create the key the proper way; pgsodium handles raw_key generation. + SELECT id INTO generated_uuid + FROM pgsodium.create_key('aead-det'::pgsodium.key_type, 'herocast_encryption_key'); + + -- Reassign the auto-generated id to the deterministic UUID that the + -- accounts encrypt trigger functions reference by literal. + UPDATE pgsodium.key SET id = target_uuid WHERE id = generated_uuid; END $$; From 16d924644aa242900b13720ced3742378c4e9c58 Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 23:19:34 +0200 Subject: [PATCH 16/18] ci: seed pgsodium key via supabase/seed.sql, not psql -U postgres MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Root cause of CI failure: in the Supabase CLI image, the `postgres` role is not a true superuser and does not own pgsodium internals. After `supabase db reset` re-locks pgsodium, the post-reset psql step fails with "permission denied for table key" when trying to seed the encryption key via DELETE/UPDATE on pgsodium.key — and the seed.ts step then fails with P0002 ("query returned no rows") at encrypt time. Move the key seed into `supabase/seed.sql` (auto-applied by `db reset` with the migration-runner role, which DOES have pgsodium write access). Verified locally: - encrypt_test returns valid ciphertext after `supabase db reset` - seed.ts creates both test accounts - full deno test suite: 35 passed, 0 failed, 9 e2e-ignored Also re-grant `pgsodium_keyiduser` to `authenticated` in seed.sql since migration 20260423000003 revokes it for production hardening (tests need it back for the decrypt path). Drop the redundant `psql ... pgsodium_seed_key.sql` line from CI; the grants psql call is kept (it succeeds for the GRANT ROLE membership and silently no-ops on the function-EXECUTE grants the postgres role lacks ownership for, which are not on the tested code path anyway). --- .github/workflows/signing-service-tests.yml | 6 +++- supabase/seed.sql | 36 +++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/.github/workflows/signing-service-tests.yml b/.github/workflows/signing-service-tests.yml index c7a35949..0b2c3927 100644 --- a/.github/workflows/signing-service-tests.yml +++ b/.github/workflows/signing-service-tests.yml @@ -35,10 +35,14 @@ jobs: run: supabase db reset --yes - name: Apply pgsodium grants + # Key seeding is handled by supabase/seed.sql (auto-run by `db reset` + # with privileged access). The grants script applies role-membership + # changes; psql does not propagate SQL errors so failures here are + # non-fatal — fine for the redundant function-EXECUTE grants that the + # postgres role lacks ownership to apply in the CI Supabase image. run: | export PGPASSWORD=postgres psql -h localhost -p 54322 -U postgres -d postgres -f supabase/setup/pgsodium_grants.sql - psql -h localhost -p 54322 -U postgres -d postgres -f supabase/setup/pgsodium_seed_key.sql - name: Export Supabase env run: | diff --git a/supabase/seed.sql b/supabase/seed.sql index e69de29b..a56b1ab2 100644 --- a/supabase/seed.sql +++ b/supabase/seed.sql @@ -0,0 +1,36 @@ +-- Test/CI seed: ensure the pgsodium key referenced by the encrypt triggers +-- exists. Runs after migrations as part of `supabase db reset`, with the +-- privileged role used by the migration runner (so pgsodium.key writes are +-- allowed — the postgres role used by post-reset psql sessions cannot). +-- +-- Idempotent: returns early if the key already exists. Safe to ship in repo. +DO $$ +DECLARE + target_uuid constant uuid := 'dcd0dca7-c03a-40c5-b348-fefb87be2845'; + generated_uuid uuid; +BEGIN + IF EXISTS (SELECT 1 FROM pgsodium.key WHERE id = target_uuid) THEN + RETURN; + END IF; + + -- Drop any previous attempt that left a partial key under our chosen name. + DELETE FROM pgsodium.key WHERE name = 'herocast_encryption_key'; + + -- Create the key the proper way; pgsodium handles raw_key generation. + SELECT id INTO generated_uuid + FROM pgsodium.create_key('aead-det'::pgsodium.key_type, 'herocast_encryption_key'); + + -- Reassign the auto-generated id to the deterministic UUID that the + -- accounts encrypt trigger functions reference by literal. + UPDATE pgsodium.key SET id = target_uuid WHERE id = generated_uuid; +END $$; + +-- Re-grant the pgsodium role required for the test path. Migration +-- 20260423000003 revokes this from `authenticated` for production hardening; +-- tests need it back for the encrypt/decrypt path to work. +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'pgsodium_keyiduser') THEN + EXECUTE 'GRANT pgsodium_keyiduser TO authenticated'; + END IF; +END $$; From 039a5badaf7af24934b60d3836dd6467924d785b Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 23:31:45 +0200 Subject: [PATCH 17/18] ci: seed pgsodium key via migration, not seed.sql MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Earlier attempt put the key seed in supabase/seed.sql, but in the Supabase CLI image used by CI the post-migration `postgres` role lacks pgsodium_keymaker membership — INSERT/UPDATE on pgsodium.key fails with SQLSTATE 42501 "permission denied for table key", which surfaces in seed.ts as P0002 "query returned no rows" from the encrypt trigger. Move the seed into a migration. Migrations apply with the migration- runner role's elevated privileges, where pgsodium.create_key() (a SECURITY DEFINER pgsodium-owned function) and the subsequent UPDATE on pgsodium.key both succeed. Idempotent guard means it's a no-op against the prod database that already has the key. Verified locally: - `supabase db reset` applies the new migration cleanly - encrypt path round-trips: crypto_aead_det_encrypt('test', ...) returns valid ciphertext via the literal UUID dcd0dca7-... Empty out supabase/seed.sql since the key seed no longer lives there. --- .../20260423000004_test_pgsodium_seed.sql | 42 +++++++++++++++++++ supabase/seed.sql | 36 ---------------- 2 files changed, 42 insertions(+), 36 deletions(-) create mode 100644 supabase/migrations/20260423000004_test_pgsodium_seed.sql diff --git a/supabase/migrations/20260423000004_test_pgsodium_seed.sql b/supabase/migrations/20260423000004_test_pgsodium_seed.sql new file mode 100644 index 00000000..7778fb47 --- /dev/null +++ b/supabase/migrations/20260423000004_test_pgsodium_seed.sql @@ -0,0 +1,42 @@ +-- Idempotent seed for the pgsodium key referenced by accounts encrypt +-- triggers. Required for fresh local/CI databases where pgsodium has no +-- pre-existing key with the literal UUID baked into the encrypt expressions +-- (see supabase/migrations/20231201175719_schema_test.sql). +-- +-- Why a migration (not seed.sql): in the Supabase CLI image used by CI, the +-- post-migration `postgres` role lacks pgsodium_keymaker membership, so +-- direct INSERT/UPDATE on pgsodium.key fails with "permission denied for +-- table key" (SQLSTATE 42501) — the symptom that surfaces in seed.ts as +-- P0002 "query returned no rows" from crypto_aead_det_encrypt. Migrations +-- run with elevated privileges and can call SECURITY DEFINER pgsodium APIs. +-- +-- Production safety: idempotent — returns early if the key already exists. +-- The deployed prod database already has this key (UUID matches the +-- existing encrypt-trigger references), so this migration is a no-op there. +DO $$ +DECLARE + target_uuid constant uuid := 'dcd0dca7-c03a-40c5-b348-fefb87be2845'; + generated_uuid uuid; +BEGIN + IF EXISTS (SELECT 1 FROM pgsodium.key WHERE id = target_uuid) THEN + RETURN; + END IF; + + -- Drop any partial earlier attempt under our chosen name. Safe in fresh + -- environments; in production the EXISTS check above already returned. + DELETE FROM pgsodium.key WHERE name = 'herocast_encryption_key'; + + -- pgsodium.create_key is SECURITY DEFINER (owner pgsodium_keymaker) and + -- handles the privileged INSERT into pgsodium.key, deriving raw_key from + -- the server root key. This is the documented pgsodium pattern; passing + -- an explicit raw_key bypasses the derivation chain and breaks encrypt + -- in environments where server_root_key is not the storage key (the + -- decrypted_key view tries to "decrypt" the literal bytes and fails with + -- "invalid ciphertext"). + SELECT id INTO generated_uuid + FROM pgsodium.create_key('aead-det'::pgsodium.key_type, 'herocast_encryption_key'); + + -- Realign the auto-generated id to the deterministic UUID that the encrypt + -- expressions in earlier migrations reference by literal. + UPDATE pgsodium.key SET id = target_uuid WHERE id = generated_uuid; +END $$; diff --git a/supabase/seed.sql b/supabase/seed.sql index a56b1ab2..e69de29b 100644 --- a/supabase/seed.sql +++ b/supabase/seed.sql @@ -1,36 +0,0 @@ --- Test/CI seed: ensure the pgsodium key referenced by the encrypt triggers --- exists. Runs after migrations as part of `supabase db reset`, with the --- privileged role used by the migration runner (so pgsodium.key writes are --- allowed — the postgres role used by post-reset psql sessions cannot). --- --- Idempotent: returns early if the key already exists. Safe to ship in repo. -DO $$ -DECLARE - target_uuid constant uuid := 'dcd0dca7-c03a-40c5-b348-fefb87be2845'; - generated_uuid uuid; -BEGIN - IF EXISTS (SELECT 1 FROM pgsodium.key WHERE id = target_uuid) THEN - RETURN; - END IF; - - -- Drop any previous attempt that left a partial key under our chosen name. - DELETE FROM pgsodium.key WHERE name = 'herocast_encryption_key'; - - -- Create the key the proper way; pgsodium handles raw_key generation. - SELECT id INTO generated_uuid - FROM pgsodium.create_key('aead-det'::pgsodium.key_type, 'herocast_encryption_key'); - - -- Reassign the auto-generated id to the deterministic UUID that the - -- accounts encrypt trigger functions reference by literal. - UPDATE pgsodium.key SET id = target_uuid WHERE id = generated_uuid; -END $$; - --- Re-grant the pgsodium role required for the test path. Migration --- 20260423000003 revokes this from `authenticated` for production hardening; --- tests need it back for the encrypt/decrypt path to work. -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'pgsodium_keyiduser') THEN - EXECUTE 'GRANT pgsodium_keyiduser TO authenticated'; - END IF; -END $$; From 72f42f19bf882d01badd0f18354e67de0228333b Mon Sep 17 00:00:00 2001 From: hellno Date: Sun, 3 May 2026 23:38:23 +0200 Subject: [PATCH 18/18] ci: connect as supabase_admin to seed pgsodium key MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Migration approach (commit 039a5bad) hit the same SQLSTATE 42501 wall: the migration runner role in CI's Supabase image also lacks pgsodium write access — `permission denied for table key` even when the seed runs as part of an applied migration. (Locally `postgres` happens to be a member of pgsodium_keymaker so the same migration succeeds, but that membership is absent in the CI image.) The supabase_admin role IS the local-dev superuser and has full pgsodium privileges. Its password matches the project-wide PGPASSWORD default in the supabase CLI image, so we can connect via -U supabase_admin without extra setup. Use that connection for the grants + seed step. Drop the migration that tried to do the seed under the migration-runner role. Verified locally: `psql -U supabase_admin` applies pgsodium_seed_key.sql cleanly and the resulting key is queryable as the postgres role afterwards. --- .github/workflows/signing-service-tests.yml | 16 +++---- .../20260423000004_test_pgsodium_seed.sql | 42 ------------------- 2 files changed, 9 insertions(+), 49 deletions(-) delete mode 100644 supabase/migrations/20260423000004_test_pgsodium_seed.sql diff --git a/.github/workflows/signing-service-tests.yml b/.github/workflows/signing-service-tests.yml index 0b2c3927..da6b2bad 100644 --- a/.github/workflows/signing-service-tests.yml +++ b/.github/workflows/signing-service-tests.yml @@ -34,15 +34,17 @@ jobs: - name: Reset database run: supabase db reset --yes - - name: Apply pgsodium grants - # Key seeding is handled by supabase/seed.sql (auto-run by `db reset` - # with privileged access). The grants script applies role-membership - # changes; psql does not propagate SQL errors so failures here are - # non-fatal — fine for the redundant function-EXECUTE grants that the - # postgres role lacks ownership to apply in the CI Supabase image. + - name: Apply pgsodium grants and seed key + # Connect as supabase_admin (the local-dev superuser; password is the + # same `postgres` value used for all built-in roles in the supabase + # CLI image). The `postgres` role in newer CLI images lacks + # pgsodium_keymaker membership and cannot write to pgsodium.key, so + # seeding under -U postgres fails with SQLSTATE 42501 — surfacing in + # seed.ts as P0002 "query returned no rows" from the encrypt trigger. run: | export PGPASSWORD=postgres - psql -h localhost -p 54322 -U postgres -d postgres -f supabase/setup/pgsodium_grants.sql + psql -h localhost -p 54322 -U supabase_admin -d postgres -f supabase/setup/pgsodium_grants.sql + psql -h localhost -p 54322 -U supabase_admin -d postgres -f supabase/setup/pgsodium_seed_key.sql - name: Export Supabase env run: | diff --git a/supabase/migrations/20260423000004_test_pgsodium_seed.sql b/supabase/migrations/20260423000004_test_pgsodium_seed.sql deleted file mode 100644 index 7778fb47..00000000 --- a/supabase/migrations/20260423000004_test_pgsodium_seed.sql +++ /dev/null @@ -1,42 +0,0 @@ --- Idempotent seed for the pgsodium key referenced by accounts encrypt --- triggers. Required for fresh local/CI databases where pgsodium has no --- pre-existing key with the literal UUID baked into the encrypt expressions --- (see supabase/migrations/20231201175719_schema_test.sql). --- --- Why a migration (not seed.sql): in the Supabase CLI image used by CI, the --- post-migration `postgres` role lacks pgsodium_keymaker membership, so --- direct INSERT/UPDATE on pgsodium.key fails with "permission denied for --- table key" (SQLSTATE 42501) — the symptom that surfaces in seed.ts as --- P0002 "query returned no rows" from crypto_aead_det_encrypt. Migrations --- run with elevated privileges and can call SECURITY DEFINER pgsodium APIs. --- --- Production safety: idempotent — returns early if the key already exists. --- The deployed prod database already has this key (UUID matches the --- existing encrypt-trigger references), so this migration is a no-op there. -DO $$ -DECLARE - target_uuid constant uuid := 'dcd0dca7-c03a-40c5-b348-fefb87be2845'; - generated_uuid uuid; -BEGIN - IF EXISTS (SELECT 1 FROM pgsodium.key WHERE id = target_uuid) THEN - RETURN; - END IF; - - -- Drop any partial earlier attempt under our chosen name. Safe in fresh - -- environments; in production the EXISTS check above already returned. - DELETE FROM pgsodium.key WHERE name = 'herocast_encryption_key'; - - -- pgsodium.create_key is SECURITY DEFINER (owner pgsodium_keymaker) and - -- handles the privileged INSERT into pgsodium.key, deriving raw_key from - -- the server root key. This is the documented pgsodium pattern; passing - -- an explicit raw_key bypasses the derivation chain and breaks encrypt - -- in environments where server_root_key is not the storage key (the - -- decrypted_key view tries to "decrypt" the literal bytes and fails with - -- "invalid ciphertext"). - SELECT id INTO generated_uuid - FROM pgsodium.create_key('aead-det'::pgsodium.key_type, 'herocast_encryption_key'); - - -- Realign the auto-generated id to the deterministic UUID that the encrypt - -- expressions in earlier migrations reference by literal. - UPDATE pgsodium.key SET id = target_uuid WHERE id = generated_uuid; -END $$;