diff --git a/.agents/skills/accessibility/SKILL.md b/.agents/skills/accessibility/SKILL.md
new file mode 100644
index 0000000..af2299a
--- /dev/null
+++ b/.agents/skills/accessibility/SKILL.md
@@ -0,0 +1,440 @@
+---
+name: accessibility
+description: Audit and improve web accessibility following WCAG 2.2 guidelines. Use when asked to "improve accessibility", "a11y audit", "WCAG compliance", "screen reader support", "keyboard navigation", or "make accessible".
+license: MIT
+metadata:
+ author: web-quality-skills
+ version: "1.1"
+---
+
+# Accessibility (a11y)
+
+Comprehensive accessibility guidelines based on WCAG 2.2 and Lighthouse accessibility audits. Goal: make content usable by everyone, including people with disabilities.
+
+## WCAG Principles: POUR
+
+| Principle | Description |
+|-----------|-------------|
+| **P**erceivable | Content can be perceived through different senses |
+| **O**perable | Interface can be operated by all users |
+| **U**nderstandable | Content and interface are understandable |
+| **R**obust | Content works with assistive technologies |
+
+## Conformance levels
+
+| Level | Requirement | Target |
+|-------|-------------|--------|
+| **A** | Minimum accessibility | Must pass |
+| **AA** | Standard compliance | Should pass (legal requirement in many jurisdictions) |
+| **AAA** | Enhanced accessibility | Nice to have |
+
+---
+
+## Perceivable
+
+### Text alternatives (1.1)
+
+**Images require alt text:**
+```html
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+```
+
+**Icon buttons need accessible names:**
+```html
+
+
+
+
+
+
+
+
+
+
+
+ Open menu
+
+```
+
+**Visually hidden class:**
+```css
+.visually-hidden {
+ position: absolute;
+ width: 1px;
+ height: 1px;
+ padding: 0;
+ margin: -1px;
+ overflow: hidden;
+ clip: rect(0, 0, 0, 0);
+ white-space: nowrap;
+ border: 0;
+}
+```
+
+### Color contrast (1.4.3, 1.4.6)
+
+| Text Size | AA minimum | AAA enhanced |
+|-----------|------------|--------------|
+| Normal text (< 18px / < 14px bold) | 4.5:1 | 7:1 |
+| Large text (≥ 18px / ≥ 14px bold) | 3:1 | 4.5:1 |
+| UI components & graphics | 3:1 | 3:1 |
+
+```css
+/* ❌ Low contrast (2.5:1) */
+.low-contrast {
+ color: #999;
+ background: #fff;
+}
+
+/* ✅ Sufficient contrast (7:1) */
+.high-contrast {
+ color: #333;
+ background: #fff;
+}
+
+/* ✅ Focus states need contrast too */
+:focus-visible {
+ outline: 2px solid #005fcc;
+ outline-offset: 2px;
+}
+```
+
+**Don't rely on color alone:**
+```html
+
+
+
+
+
+
+
+
+
+ Please enter a valid email address
+
+
+```
+
+### Media alternatives (1.2)
+
+```html
+
+
+
+
+
+
+
+
+
+
+
+
+ Transcript
+ Full transcript text...
+
+```
+
+---
+
+## Operable
+
+### Keyboard accessible (2.1)
+
+**All functionality must be keyboard accessible:**
+```javascript
+// ❌ Only handles click
+element.addEventListener('click', handleAction);
+
+// ✅ Handles both click and keyboard
+element.addEventListener('click', handleAction);
+element.addEventListener('keydown', (e) => {
+ if (e.key === 'Enter' || e.key === ' ') {
+ e.preventDefault();
+ handleAction();
+ }
+});
+```
+
+**No keyboard traps.** Users must be able to Tab into and out of every component. Use the [modal focus trap pattern](references/A11Y-PATTERNS.md#modal-focus-trap) for dialogs—the native `` element handles this automatically.
+
+### Focus visible (2.4.7)
+
+```css
+/* ❌ Never remove focus outlines */
+*:focus { outline: none; }
+
+/* ✅ Use :focus-visible for keyboard-only focus */
+:focus {
+ outline: none;
+}
+
+:focus-visible {
+ outline: 2px solid #005fcc;
+ outline-offset: 2px;
+}
+
+/* ✅ Or custom focus styles */
+button:focus-visible {
+ box-shadow: 0 0 0 3px rgba(0, 95, 204, 0.5);
+}
+```
+
+### Focus not obscured (2.4.11) — new in 2.2
+
+When an element receives keyboard focus, it must not be entirely hidden by other author-created content such as sticky headers, footers, or overlapping panels. At Level AAA (2.4.12), no part of the focused element may be hidden.
+
+```css
+/* ✅ Account for sticky headers when scrolling to focused elements */
+:target {
+ scroll-margin-top: 80px;
+}
+
+/* ✅ Ensure focused items clear fixed/sticky bars */
+:focus {
+ scroll-margin-top: 80px;
+ scroll-margin-bottom: 60px;
+}
+```
+
+### Skip links (2.4.1)
+
+Provide a skip link so keyboard users can bypass repetitive navigation. See the [skip link pattern](references/A11Y-PATTERNS.md#skip-link) for full markup and styles.
+
+### Target size (2.5.8) — new in 2.2
+
+Interactive targets must be at least **24 × 24 CSS pixels** (AA). Exceptions: inline text links, elements where the browser controls the size, and targets where a 24px circle centered on the bounding box does not overlap another target.
+
+```css
+/* ✅ Minimum target size */
+button,
+[role="button"],
+input[type="checkbox"] + label,
+input[type="radio"] + label {
+ min-width: 24px;
+ min-height: 24px;
+}
+
+/* ✅ Comfortable target size (recommended 44×44) */
+.touch-target {
+ min-width: 44px;
+ min-height: 44px;
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+}
+```
+
+### Dragging movements (2.5.7) — new in 2.2
+
+Any action that requires dragging must have a single-pointer alternative (e.g., buttons, inputs). See the [dragging movements pattern](references/A11Y-PATTERNS.md#dragging-movements) for a sortable-list example.
+
+### Timing (2.2)
+
+```javascript
+// Allow users to extend time limits
+function showSessionWarning() {
+ const modal = createModal({
+ title: 'Session Expiring',
+ content: 'Your session will expire in 2 minutes.',
+ actions: [
+ { label: 'Extend session', action: extendSession },
+ { label: 'Log out', action: logout }
+ ],
+ timeout: 120000
+ });
+}
+```
+
+### Motion (2.3)
+
+```css
+/* Respect reduced motion preference */
+@media (prefers-reduced-motion: reduce) {
+ *,
+ *::before,
+ *::after {
+ animation-duration: 0.01ms !important;
+ animation-iteration-count: 1 !important;
+ transition-duration: 0.01ms !important;
+ scroll-behavior: auto !important;
+ }
+}
+```
+
+---
+
+## Understandable
+
+### Page language (3.1.1)
+
+```html
+
+
+
+
+
+
+
+The French word for hello is bonjour .
+```
+
+### Consistent navigation (3.2.3)
+
+```html
+
+
+
+
+```
+
+### Consistent help (3.2.6) — new in 2.2
+
+If a help mechanism (contact info, chat widget, FAQ link, self-help option) is repeated across multiple pages, it must appear in the **same relative order** each time. Users who rely on consistent placement shouldn't have to hunt for help on every page.
+
+### Form labels (3.3.2)
+
+Every input needs a programmatically associated label. See the [form labels pattern](references/A11Y-PATTERNS.md#form-labels) for explicit, implicit, and instructional examples.
+
+### Error handling (3.3.1, 3.3.3)
+
+Announce errors to screen readers with `role="alert"` or `aria-live`, set `aria-invalid="true"` on invalid fields, and focus the first error on submit. See the [error handling pattern](references/A11Y-PATTERNS.md#error-handling) for full markup and JS.
+
+### Redundant entry (3.3.7) — new in 2.2
+
+Don't force users to re-enter information they already provided in the same session. Auto-populate from earlier steps, or let users select from previously entered values. Exceptions: security re-confirmation and content that has expired.
+
+```html
+
+
+ Shipping address
+
+
+ Same as billing address
+
+
+
+```
+
+### Accessible authentication (3.3.8) — new in 2.2
+
+Login flows must not rely on cognitive function tests (e.g., remembering a password, solving a puzzle) unless at least one of:
+- A copy-paste or autofill mechanism is available
+- An alternative method exists (e.g., passkey, SSO, email link)
+- The test uses object recognition or personal content (AA only; AAA removes this exception)
+
+```html
+
+
+
+
+Sign in with passkey
+Email me a login link
+```
+
+---
+
+## Robust
+
+### ARIA usage (4.1.2)
+
+**Prefer native elements:**
+```html
+
+Click me
+
+
+Click me
+
+
+Option
+
+
+ Option
+```
+
+**When ARIA is needed,** use the correct roles and states. See the [ARIA tabs pattern](references/A11Y-PATTERNS.md#aria-tabs) for a complete tablist example.
+
+### Live regions (4.1.3)
+
+Use `aria-live` regions to announce dynamic content changes without moving focus. See the [live regions pattern](references/A11Y-PATTERNS.md#live-regions-and-notifications) for markup and a `showNotification()` helper.
+
+---
+
+## Testing checklist
+
+### Automated testing
+```bash
+# Lighthouse accessibility audit
+npx lighthouse https://example.com --only-categories=accessibility
+
+# axe-core
+npm install @axe-core/cli -g
+axe https://example.com
+```
+
+### Manual testing
+
+- [ ] **Keyboard navigation:** Tab through entire page, use Enter/Space to activate
+- [ ] **Screen reader:** Test with VoiceOver (Mac), NVDA (Windows), or TalkBack (Android)
+- [ ] **Zoom:** Content usable at 200% zoom
+- [ ] **High contrast:** Test with Windows High Contrast Mode
+- [ ] **Reduced motion:** Test with `prefers-reduced-motion: reduce`
+- [ ] **Focus order:** Logical and follows visual order
+- [ ] **Target size:** Interactive elements meet 24×24px minimum
+
+See the [screen reader commands reference](references/A11Y-PATTERNS.md#screen-reader-commands) for VoiceOver and NVDA shortcuts.
+
+---
+
+## Common issues by impact
+
+### Critical (fix immediately)
+1. Missing form labels
+2. Missing image alt text
+3. Insufficient color contrast
+4. Keyboard traps
+5. No focus indicators
+
+### Serious (fix before launch)
+1. Missing page language
+2. Missing heading structure
+3. Non-descriptive link text
+4. Auto-playing media
+5. Missing skip links
+
+### Moderate (fix soon)
+1. Missing ARIA labels on icons
+2. Inconsistent navigation
+3. Missing error identification
+4. Timing without controls
+5. Missing landmark regions
+
+## References
+
+- [WCAG 2.2 Quick Reference](https://www.w3.org/WAI/WCAG22/quickref/)
+- [WAI-ARIA Authoring Practices](https://www.w3.org/WAI/ARIA/apg/)
+- [Deque axe Rules](https://dequeuniversity.com/rules/axe/)
+- [Web Quality Audit](../web-quality-audit/SKILL.md)
+- [WCAG criteria reference](references/WCAG.md)
+- [Accessibility code patterns](references/A11Y-PATTERNS.md)
diff --git a/.agents/skills/accessibility/references/A11Y-PATTERNS.md b/.agents/skills/accessibility/references/A11Y-PATTERNS.md
new file mode 100644
index 0000000..6d500ef
--- /dev/null
+++ b/.agents/skills/accessibility/references/A11Y-PATTERNS.md
@@ -0,0 +1,233 @@
+# Accessibility Code Patterns
+
+Practical, copy-paste-ready patterns for common accessibility requirements. Each pattern is self-contained and linked from the main [SKILL.md](../SKILL.md).
+
+---
+
+## Modal focus trap
+
+Trap keyboard focus inside a modal dialog so Tab/Shift+Tab cycle through its focusable elements and Escape closes it.
+
+```javascript
+function openModal(modal) {
+ const focusableElements = modal.querySelectorAll(
+ 'button, [href], input, select, textarea, [tabindex]:not([tabindex="-1"])'
+ );
+ const firstElement = focusableElements[0];
+ const lastElement = focusableElements[focusableElements.length - 1];
+
+ modal.addEventListener('keydown', (e) => {
+ if (e.key === 'Tab') {
+ if (e.shiftKey && document.activeElement === firstElement) {
+ e.preventDefault();
+ lastElement.focus();
+ } else if (!e.shiftKey && document.activeElement === lastElement) {
+ e.preventDefault();
+ firstElement.focus();
+ }
+ }
+ if (e.key === 'Escape') {
+ closeModal();
+ }
+ });
+
+ firstElement.focus();
+}
+```
+
+The native `` element handles focus trapping automatically—prefer it when browser support allows.
+
+---
+
+## Skip link
+
+Allows keyboard users to bypass repetitive navigation and jump straight to main content.
+
+```html
+
+ Skip to main content
+
+
+
+
+
+```
+
+```css
+.skip-link {
+ position: absolute;
+ top: -40px;
+ left: 0;
+ background: #000;
+ color: #fff;
+ padding: 8px 16px;
+ z-index: 100;
+}
+
+.skip-link:focus {
+ top: 0;
+}
+```
+
+---
+
+## Error handling
+
+Announce errors to screen readers and focus the first invalid field on submit.
+
+```html
+
+```
+
+```javascript
+form.addEventListener('submit', (e) => {
+ const firstError = form.querySelector('[aria-invalid="true"]');
+ if (firstError) {
+ e.preventDefault();
+ firstError.focus();
+
+ const errorSummary = document.getElementById('error-summary');
+ errorSummary.textContent =
+ `${errors.length} errors found. Please fix them and try again.`;
+ errorSummary.focus();
+ }
+});
+```
+
+---
+
+## Form labels
+
+Every input needs an associated label—either explicit (`for`/`id`) or implicit (wrapping ``).
+
+```html
+
+
+
+
+Email address
+
+
+
+
+ Email address
+
+
+
+
+Password
+
+
+ Must be at least 8 characters with one number.
+
+```
+
+---
+
+## Dragging movements
+
+Any action triggered by dragging must offer a single-pointer alternative (WCAG 2.5.7).
+
+```html
+
+
+
+
+
+
+ Item 1
+ ↑
+ ↓
+
+
+ Item 2
+ ↑
+ ↓
+
+
+```
+
+Also applies to sliders, map panning, colour pickers, and similar drag-based widgets—always provide an equivalent click/tap or keyboard path.
+
+---
+
+## ARIA tabs
+
+Tabs require `role="tablist"`, `role="tab"`, and `role="tabpanel"` with proper `aria-selected`, `aria-controls`, and keyboard support.
+
+```html
+
+ Description
+ Reviews
+
+
+
+
+
+
+
+```
+
+Arrow keys should move focus between tabs; the active tab receives `tabindex="0"` while inactive tabs use `tabindex="-1"`.
+
+---
+
+## Live regions and notifications
+
+Use `aria-live` to announce dynamic content changes to screen readers without moving focus.
+
+```html
+
+
+
+
+
+
+
+
+
+```
+
+```javascript
+function showNotification(message, type = 'polite') {
+ const container = document.getElementById(`${type}-announcer`);
+ container.textContent = '';
+ requestAnimationFrame(() => {
+ container.textContent = message;
+ });
+}
+```
+
+Clear the container before writing to ensure the same message triggers a new announcement.
+
+---
+
+## Screen reader commands
+
+Quick reference for the most common screen reader shortcuts.
+
+| Action | VoiceOver (Mac) | NVDA (Windows) |
+|--------|-----------------|----------------|
+| Start/Stop | ⌘ + F5 | Ctrl + Alt + N |
+| Next item | VO + → | ↓ |
+| Previous item | VO + ← | ↑ |
+| Activate | VO + Space | Enter |
+| Headings list | VO + U, then arrows | H / Shift + H |
+| Links list | VO + U | K / Shift + K |
diff --git a/.agents/skills/accessibility/references/WCAG.md b/.agents/skills/accessibility/references/WCAG.md
new file mode 100644
index 0000000..a0bd65f
--- /dev/null
+++ b/.agents/skills/accessibility/references/WCAG.md
@@ -0,0 +1,191 @@
+# WCAG 2.2 Quick Reference
+
+## Success criteria by level
+
+### Level A (minimum)
+
+| Criterion | Description |
+|-----------|-------------|
+| **1.1.1** Non-text Content | All images, icons have text alternatives |
+| **1.2.1** Audio-only/Video-only | Provide transcript or audio description |
+| **1.2.2** Captions | Video with audio has captions |
+| **1.2.3** Audio Description | Video has audio description |
+| **1.3.1** Info and Relationships | Information conveyed through presentation is available programmatically |
+| **1.3.2** Meaningful Sequence | Reading order is logical |
+| **1.3.3** Sensory Characteristics | Instructions don't rely solely on shape, color, size, location, orientation, or sound |
+| **1.4.1** Use of Color | Color is not the only visual means of conveying information |
+| **1.4.2** Audio Control | Audio playing automatically can be paused/stopped |
+| **2.1.1** Keyboard | All functionality available via keyboard |
+| **2.1.2** No Keyboard Trap | Keyboard focus can be moved away from any component |
+| **2.1.4** Character Key Shortcuts | Single-key shortcuts can be turned off or remapped |
+| **2.2.1** Timing Adjustable | Time limits can be extended |
+| **2.2.2** Pause, Stop, Hide | Moving/blinking content can be paused |
+| **2.3.1** Three Flashes | Nothing flashes more than 3 times per second |
+| **2.4.1** Bypass Blocks | Skip link or landmark navigation available |
+| **2.4.2** Page Titled | Pages have descriptive titles |
+| **2.4.3** Focus Order | Focus order preserves meaning |
+| **2.4.4** Link Purpose | Link purpose clear from link text or context |
+| **2.5.1** Pointer Gestures | Multi-point gestures have single-pointer alternatives |
+| **2.5.2** Pointer Cancellation | Down-event doesn't trigger action (use up-event or click) |
+| **2.5.3** Label in Name | Accessible name contains visible label text |
+| **2.5.4** Motion Actuation | Motion-triggered functions have alternatives |
+| **3.1.1** Language of Page | Default language specified in HTML |
+| **3.2.1** On Focus | Focus doesn't trigger unexpected changes |
+| **3.2.2** On Input | Input doesn't trigger unexpected changes |
+| **3.2.6** Consistent Help | Help mechanisms appear in the same relative order across pages |
+| **3.3.1** Error Identification | Input errors clearly described |
+| **3.3.2** Labels or Instructions | Form inputs have labels or instructions |
+| **3.3.7** Redundant Entry | Information previously entered is auto-populated or available to select |
+| **4.1.2** Name, Role, Value | UI components have accessible names and correct roles |
+
+### Level AA (standard)
+
+| Criterion | Description |
+|-----------|-------------|
+| **1.2.4** Captions (Live) | Live audio has captions |
+| **1.2.5** Audio Description | Pre-recorded video has audio description |
+| **1.3.4** Orientation | Content doesn't restrict orientation |
+| **1.3.5** Identify Input Purpose | Input purpose can be programmatically determined |
+| **1.4.3** Contrast (Minimum) | 4.5:1 for normal text, 3:1 for large text |
+| **1.4.4** Resize Text | Text can be resized to 200% without loss of functionality |
+| **1.4.5** Images of Text | Text used instead of images of text |
+| **1.4.10** Reflow | Content reflows at 320px width without horizontal scroll |
+| **1.4.11** Non-text Contrast | UI components have 3:1 contrast |
+| **1.4.12** Text Spacing | Content adapts to text spacing changes |
+| **1.4.13** Content on Hover/Focus | Additional content is dismissible, hoverable, persistent |
+| **2.4.5** Multiple Ways | Multiple ways to find pages |
+| **2.4.6** Headings and Labels | Headings and labels are descriptive |
+| **2.4.7** Focus Visible | Focus indicator is visible |
+| **2.4.11** Focus Not Obscured (Minimum) | Focused element is not entirely hidden by author-created content |
+| **2.5.7** Dragging Movements | Dragging actions have single-pointer alternatives |
+| **2.5.8** Target Size (Minimum) | Interactive targets are at least 24×24 CSS pixels (with exceptions) |
+| **3.1.2** Language of Parts | Language changes are marked |
+| **3.2.3** Consistent Navigation | Navigation is consistent across pages |
+| **3.2.4** Consistent Identification | Same functionality uses same labels |
+| **3.3.3** Error Suggestion | Error corrections suggested when known |
+| **3.3.4** Error Prevention (Legal) | Actions can be reversed or confirmed |
+| **3.3.8** Accessible Authentication (Minimum) | No cognitive function test for login unless an alternative or assistance is provided |
+| **4.1.3** Status Messages | Status messages announced to screen readers |
+
+### Level AAA (enhanced)
+
+| Criterion | Description |
+|-----------|-------------|
+| **1.4.6** Contrast (Enhanced) | 7:1 for normal text, 4.5:1 for large text |
+| **1.4.8** Visual Presentation | Foreground/background colors can be selected |
+| **1.4.9** Images of Text (No Exception) | No images of text |
+| **2.1.3** Keyboard (No Exception) | All functionality keyboard accessible |
+| **2.2.3** No Timing | No time limits |
+| **2.2.4** Interruptions | Interruptions can be postponed |
+| **2.2.5** Re-authenticating | Data preserved on re-authentication |
+| **2.2.6** Timeouts | Users warned about data loss from inactivity |
+| **2.3.2** Three Flashes | No content flashes more than 3 times |
+| **2.3.3** Animation from Interactions | Motion animation can be disabled |
+| **2.4.8** Location | User location within site is available |
+| **2.4.9** Link Purpose (Link Only) | Link purpose clear from link text alone |
+| **2.4.10** Section Headings | Sections have headings |
+| **2.4.12** Focus Not Obscured (Enhanced) | No part of the focused element is hidden by author-created content |
+| **2.4.13** Focus Appearance | Focus indicator has sufficient area, contrast, and is not obscured |
+| **3.1.3** Unusual Words | Definitions available for unusual words |
+| **3.1.4** Abbreviations | Abbreviations expanded |
+| **3.1.5** Reading Level | Alternative content for complex text |
+| **3.1.6** Pronunciation | Pronunciation available where needed |
+| **3.2.5** Change on Request | Changes initiated only by user |
+| **3.3.5** Help | Context-sensitive help available |
+| **3.3.6** Error Prevention (All) | All form submissions can be reviewed |
+| **3.3.9** Accessible Authentication (Enhanced) | No cognitive function test for login (no object or personal content recognition exceptions) |
+
+## Common ARIA patterns
+
+### Buttons
+```html
+Label
+
+×
+```
+
+### Links
+```html
+Descriptive link text
+
+
+ External site
+ (opens in new tab)
+
+```
+
+### Form fields
+```html
+Email address
+
+We'll never share your email.
+```
+
+### Error states
+```html
+Email
+
+Please enter a valid email address.
+```
+
+### Navigation
+```html
+
+
+
+```
+
+### Modals
+```html
+
+
Confirm Action
+
+
+```
+
+### Live regions
+```html
+
+Status update here
+
+
+Error message here
+
+
+Loading complete
+```
+
+## What changed from 2.1 to 2.2
+
+| Change | Criterion | Level |
+|--------|-----------|-------|
+| **Removed** | 4.1.1 Parsing | A |
+| **Added** | 2.4.11 Focus Not Obscured (Minimum) | AA |
+| **Added** | 2.4.12 Focus Not Obscured (Enhanced) | AAA |
+| **Added** | 2.4.13 Focus Appearance | AAA |
+| **Added** | 2.5.7 Dragging Movements | AA |
+| **Added** | 2.5.8 Target Size (Minimum) | AA |
+| **Added** | 3.2.6 Consistent Help | A |
+| **Added** | 3.3.7 Redundant Entry | A |
+| **Added** | 3.3.8 Accessible Authentication (Minimum) | AA |
+| **Added** | 3.3.9 Accessible Authentication (Enhanced) | AAA |
+
+## Testing tools
+
+| Tool | Type | URL |
+|------|------|-----|
+| axe DevTools | Browser extension | [deque.com/axe](https://www.deque.com/axe/) |
+| WAVE | Browser extension | [wave.webaim.org](https://wave.webaim.org/) |
+| Lighthouse | Built into Chrome | DevTools → Lighthouse |
+| NVDA | Screen reader (Windows) | [nvaccess.org](https://www.nvaccess.org/) |
+| VoiceOver | Screen reader (Mac) | Built into macOS |
+| Colour Contrast Analyser | Desktop app | [tpgi.com](https://www.tpgi.com/color-contrast-checker/) |
+
+## Sources
+
+- [WCAG 2.2 W3C Recommendation](https://www.w3.org/TR/WCAG22/)
+- [WCAG 2.2 Quick Reference](https://www.w3.org/WAI/WCAG22/quickref/)
+- [What's New in WCAG 2.2](https://www.w3.org/WAI/standards-guidelines/wcag/new-in-22/)
diff --git a/.agents/skills/bun/SKILL.md b/.agents/skills/bun/SKILL.md
new file mode 100644
index 0000000..3414710
--- /dev/null
+++ b/.agents/skills/bun/SKILL.md
@@ -0,0 +1,198 @@
+---
+name: Bun
+description: Use when building, testing, or deploying JavaScript/TypeScript applications. Reach for Bun when you need to run scripts, install packages, bundle code, or test applications — it's a drop-in replacement for Node.js with integrated package manager, test runner, and bundler.
+metadata:
+ mintlify-proj: bun
+ version: "1.0"
+---
+
+# Bun Skill Reference
+
+## Product Summary
+
+Bun is an all-in-one JavaScript/TypeScript toolkit that replaces Node.js, npm, and bundlers with a single fast binary. It includes a runtime (powered by JavaScriptCore), package manager, test runner, and bundler. Key files: `bunfig.toml` (configuration), `package.json` (scripts and dependencies), `bun.lock` (lockfile). Primary CLI commands: `bun run`, `bun install`, `bun test`, `bun build`. See https://bun.com/docs for comprehensive documentation.
+
+## When to Use
+
+- **Running scripts**: Execute `.js`, `.ts`, `.jsx`, `.tsx` files directly with `bun run` or `bun ` — no compilation step needed
+- **Package management**: Install dependencies with `bun install` (25x faster than npm) or add packages with `bun add`
+- **Testing**: Write and run Jest-compatible tests with `bun test` with TypeScript support built-in
+- **Bundling**: Bundle applications for browsers or servers with `bun build` or `Bun.build()` API
+- **HTTP servers**: Build servers with `Bun.serve()` API with native WebSocket and streaming support
+- **Monorepos**: Manage workspaces with `bun install --filter` and run scripts across packages
+- **Development**: Use watch mode (`--watch`) for live reloading during development
+- **Deployment**: Compile standalone executables with `bun build --compile` or deploy to Vercel, Railway, etc.
+
+## Quick Reference
+
+### Essential Commands
+
+| Task | Command |
+|------|---------|
+| Run a file | `bun run index.ts` or `bun index.ts` |
+| Run a script | `bun run dev` (from package.json) |
+| Install dependencies | `bun install` |
+| Add a package | `bun add react` or `bun add -d @types/node` |
+| Remove a package | `bun remove react` |
+| Run tests | `bun test` |
+| Watch tests | `bun test --watch` |
+| Build for browser | `bun build ./index.tsx --outdir ./dist` |
+| Build for server | `bun build ./index.tsx --outdir ./dist --target bun` |
+| Watch build | `bun build ./index.tsx --outdir ./dist --watch` |
+| Run with watch mode | `bun --watch run index.ts` |
+| Execute a package | `bunx cowsay "Hello"` |
+
+### Configuration Files
+
+| File | Purpose |
+|------|---------|
+| `bunfig.toml` | Bun-specific configuration (optional, zero-config by default) |
+| `package.json` | Project metadata, scripts, dependencies |
+| `bun.lock` | Lockfile (text-based, replaces package-lock.json) |
+| `tsconfig.json` | TypeScript configuration (Bun respects this) |
+
+### Key bunfig.toml Sections
+
+```toml
+[install]
+linker = "hoisted" # or "isolated" for strict dependency isolation
+dev = true # install devDependencies
+optional = true # install optionalDependencies
+peer = true # install peerDependencies
+
+[test]
+root = "."
+coverage = false
+coverageThreshold = 0.9
+
+[run]
+shell = "system" # or "bun" for Bun's shell
+bun = true # alias node to bun in scripts
+```
+
+### File Type Support
+
+Bun natively transpiles and executes:
+- `.js`, `.jsx` — JavaScript and JSX
+- `.ts`, `.tsx` — TypeScript and TSX
+- `.json`, `.jsonc`, `.toml`, `.yaml` — Data files (parsed at build time)
+- `.html` — HTML with asset bundling
+- `.css` — CSS bundling
+
+## Decision Guidance
+
+| Scenario | Use | Why |
+|----------|-----|-----|
+| **Package installation** | `bun install` vs `npm install` | Bun is 25x faster, uses global cache, supports workspaces |
+| **Linker strategy** | `--linker isolated` vs `--linker hoisted` | Isolated prevents phantom dependencies; hoisted is traditional npm behavior |
+| **Build target** | `--target browser` vs `--target bun` vs `--target node` | Browser for web apps, bun for server code, node for Node.js compatibility |
+| **Module format** | `--format esm` vs `--format cjs` | ESM is default; use CJS for CommonJS compatibility |
+| **Watch mode** | `--watch` vs manual restart | Use `--watch` for development; Bun uses OS-native file watchers (fast) |
+| **Test execution** | `--concurrent` vs sequential | Concurrent for independent tests; sequential for tests with shared state |
+| **Bundling** | `bun build` vs `Bun.build()` API | CLI for simple builds; API for programmatic control and in-memory bundling |
+
+## Workflow
+
+### 1. Initialize a Project
+```bash
+bun init my-app
+cd my-app
+```
+Choose template: Blank, React, or Library. Creates `package.json`, `tsconfig.json`, `bunfig.toml`.
+
+### 2. Install Dependencies
+```bash
+bun install
+# or add specific packages
+bun add react
+bun add -d @types/node typescript
+```
+Generates `bun.lock` lockfile. Use `--frozen-lockfile` in CI for reproducible builds.
+
+### 3. Write Code
+Create `.ts`, `.tsx`, `.js`, or `.jsx` files. Bun transpiles on the fly.
+
+### 4. Run Code
+```bash
+bun run index.ts
+# or with watch mode
+bun --watch run index.ts
+```
+
+### 5. Add Scripts to package.json
+```json
+{
+ "scripts": {
+ "dev": "bun --watch run src/index.ts",
+ "build": "bun build ./src/index.tsx --outdir ./dist",
+ "test": "bun test",
+ "start": "bun run dist/index.js"
+ }
+}
+```
+
+### 6. Run Scripts
+```bash
+bun run dev
+bun run build
+bun run test
+```
+
+### 7. Test
+```bash
+# Write tests in *.test.ts or *.spec.ts
+bun test
+bun test --watch
+bun test --coverage
+```
+
+### 8. Bundle for Production
+```bash
+bun build ./src/index.tsx --outdir ./dist --minify
+# or for a server
+bun build ./src/server.ts --outdir ./dist --target bun --minify
+```
+
+### 9. Deploy
+Commit `bun.lock` to version control. In CI, use `bun ci` (equivalent to `bun install --frozen-lockfile`).
+
+## Common Gotchas
+
+- **Watch mode flag placement**: Use `bun --watch run dev`, not `bun run dev --watch`. Flags after the script name are passed to the script itself.
+- **Lifecycle scripts**: Bun does not execute `postinstall` scripts for security. Add packages to `trustedDependencies` in `package.json` to allow them.
+- **Node.js compatibility**: Bun aims for Node.js compatibility but is not 100% complete. Check `/runtime/nodejs-compat` for current status.
+- **TypeScript errors in Bun global**: Install `@types/bun` and configure `tsconfig.json` with `"lib": ["ESNext"]` and `"module": "Preserve"`.
+- **Module resolution**: Bun supports both ESM and CommonJS. Use `import` for ESM (recommended) or `require()` for CommonJS.
+- **Bundler is not a type checker**: Use `tsc` separately for type checking and `.d.ts` generation; `bun build` only transpiles.
+- **Auto-install disabled in production**: Set `install.auto = "disable"` in `bunfig.toml` for production environments.
+- **Phantom dependencies**: Use `--linker isolated` to prevent accidental imports of transitive dependencies.
+- **Environment variables**: Bun auto-loads `.env`, `.env.local`, `.env.[NODE_ENV]`. Disable with `env = false` in `bunfig.toml`.
+- **Minification by default for bun target**: When `target: "bun"`, identifiers are minified by default; use `minify: false` to disable.
+
+## Verification Checklist
+
+Before submitting work with Bun:
+
+- [ ] Run `bun install` to ensure dependencies are locked
+- [ ] Run `bun test` to verify all tests pass
+- [ ] Run `bun run build` (or your build script) and verify output in `dist/` or configured `outdir`
+- [ ] Test the built output: `bun run dist/index.js` or `node dist/index.js` (if targeting Node.js)
+- [ ] Check `bun.lock` is committed to version control
+- [ ] Verify `bunfig.toml` has correct configuration for your environment (dev vs. production)
+- [ ] Run `bun run --filter
+
+```
+
+**Use API for**: Selective deployment on specific pages
+**Don't combine**: Zone-wide toggle + manual injection
+
+### WAF Rules for JSD
+```txt
+# NEVER use on first page visit (needs HTML page first)
+(not cf.bot_management.js_detection.passed and http.request.uri.path eq "/api/user/create" and http.request.method eq "POST" and not cf.bot_management.verified_bot)
+Action: Managed Challenge (always use Managed Challenge, not Block)
+```
+
+### Limitations
+- First request won't have JSD data (needs HTML page first)
+- Strips ETags from HTML responses
+- Not supported with CSP via ` ` tags
+- Websocket endpoints not supported
+- Native mobile apps won't pass
+- cf_clearance cookie: 15-minute lifespan, max 4096 bytes
+
+## __cf_bm Cookie
+
+Cloudflare sets `__cf_bm` cookie to smooth bot scores across user sessions:
+
+- **Purpose:** Reduces false positives from score volatility
+- **Scope:** Per-domain, HTTP-only
+- **Lifespan:** Session duration
+- **Privacy:** No PII—only session classification
+- **Automatic:** No configuration required
+
+Bot scores for repeat visitors consider session history via this cookie.
+
+## Static Resource Protection
+
+**File Extensions**: ico, jpg, png, jpeg, gif, css, js, tif, tiff, bmp, pict, webp, svg, svgz, class, jar, txt, csv, doc, docx, xls, xlsx, pdf, ps, pls, ppt, pptx, ttf, otf, woff, woff2, eot, eps, ejs, swf, torrent, midi, mid, m3u8, m4a, mp3, ogg, ts
+**Plus**: `/.well-known/` path (all files)
+
+```txt
+# Exclude static resources from bot rules
+(cf.bot_management.score lt 30 and not cf.bot_management.static_resource)
+```
+
+**WARNING**: May block mail clients fetching static images
+
+## JA3/JA4 Fingerprinting (Enterprise)
+
+```txt
+# Block specific attack fingerprint
+(cf.bot_management.ja3_hash eq "8b8e3d5e3e8b3d5e")
+
+# Allow mobile app by fingerprint
+(cf.bot_management.ja4 eq "your_mobile_app_fingerprint")
+```
+
+Only available for HTTPS/TLS traffic. Missing for Worker-routed traffic or HTTP requests.
+
+## Verified Bot Categories
+
+```txt
+# Allow search engines only
+(cf.verified_bot_category eq "Search Engine Crawler")
+
+# Block AI crawlers
+(cf.verified_bot_category eq "AI Crawler")
+Action: Block
+
+# Or use dashboard: Security > Settings > Bot Management > Block AI Bots
+```
+
+| Category | String Value | Example |
+|----------|--------------|---------|
+| AI Crawler | `AI Crawler` | GPTBot, Claude-Web |
+| AI Assistant | `AI Assistant` | Perplexity-User, DuckAssistBot |
+| AI Search | `AI Search` | OAI-SearchBot |
+| Accessibility | `Accessibility` | Accessible Web Bot |
+| Academic Research | `Academic Research` | Library of Congress |
+| Advertising & Marketing | `Advertising & Marketing` | Google Adsbot |
+| Aggregator | `Aggregator` | Pinterest, Indeed |
+| Archiver | `Archiver` | Internet Archive, CommonCrawl |
+| Feed Fetcher | `Feed Fetcher` | RSS/Podcast updaters |
+| Monitoring & Analytics | `Monitoring & Analytics` | Uptime monitors |
+| Page Preview | `Page Preview` | Facebook/Slack link preview |
+| SEO | `Search Engine Optimization` | Google Lighthouse |
+| Security | `Security` | Vulnerability scanners |
+| Social Media Marketing | `Social Media Marketing` | Brandwatch |
+| Webhooks | `Webhooks` | Payment processors |
+| Other | `Other` | Uncategorized bots |
+
+## Best Practices
+
+- **ML Auto-Updates**: Enable on Enterprise for latest models
+- **Start with Managed Challenge**: Test before blocking
+- **Always exclude verified bots**: Use `not cf.bot_management.verified_bot`
+- **Exempt corporate proxies**: For B2B traffic via `cf.bot_management.corporate_proxy`
+- **Use static resource exception**: Improves performance, reduces overhead
diff --git a/.agents/skills/cloudflare-deploy/references/bot-management/gotchas.md b/.agents/skills/cloudflare-deploy/references/bot-management/gotchas.md
new file mode 100644
index 0000000..685bcbd
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/bot-management/gotchas.md
@@ -0,0 +1,114 @@
+# Bot Management Gotchas
+
+## Common Errors
+
+### "Bot Score = 0"
+
+**Cause:** Bot Management didn't run (internal Cloudflare request, Worker routing to zone (Orange-to-Orange), or request handled before BM (Redirect Rules, etc.))
+**Solution:** Check request flow and ensure Bot Management runs in request lifecycle
+
+### "JavaScript Detections Not Working"
+
+**Cause:** `js_detection.passed` always false or undefined due to: CSP headers don't allow `/cdn-cgi/challenge-platform/`, using on first page visit (needs HTML page first), ad blockers or disabled JS, JSD not enabled in dashboard, or using Block action (must use Managed Challenge)
+**Solution:** Add CSP header `Content-Security-Policy: script-src 'self' /cdn-cgi/challenge-platform/;` and ensure JSD is enabled with Managed Challenge action
+
+### "False Positives (Legitimate Users Blocked)"
+
+**Cause:** Bot detection incorrectly flagging legitimate users
+**Solution:** Check Bot Analytics for affected IPs/paths, identify detection source (ML, Heuristics, etc.), create exception rule like `(cf.bot_management.score lt 30 and http.request.uri.path eq "/problematic-path")` with Action: Skip (Bot Management), or allowlist by IP/ASN/country
+
+### "False Negatives (Bots Not Caught)"
+
+**Cause:** Bots bypassing detection
+**Solution:** Lower score threshold (30 → 50), enable JavaScript Detections, add JA3/JA4 fingerprinting rules, or use rate limiting as fallback
+
+### "Verified Bot Blocked"
+
+**Cause:** Search engine bot blocked by WAF Managed Rules (not just Bot Management)
+**Solution:** Create WAF exception for specific rule ID and verify bot via reverse DNS
+
+### "Yandex Bot Blocked During IP Update"
+
+**Cause:** Yandex updates bot IPs; new IPs unrecognized for 48h during propagation
+**Solution:**
+1. Check Security Events for specific WAF rule ID blocking Yandex
+2. Create WAF exception:
+ ```txt
+ (http.user_agent contains "YandexBot" and ip.src in {})
+ Action: Skip (WAF Managed Ruleset)
+ ```
+3. Monitor Bot Analytics for 48h
+4. Remove exception after propagation completes
+
+Issue resolves automatically after 48h. Contact Cloudflare Support if persists.
+
+### "JA3/JA4 Missing"
+
+**Cause:** Non-HTTPS traffic, Worker routing traffic, Orange-to-Orange traffic via Worker, or Bot Management skipped
+**Solution:** JA3/JA4 only available for HTTPS/TLS traffic; check request routing
+
+**JA3/JA4 Not User-Unique:** Same browser/library version = same fingerprint
+- Don't use for user identification
+- Use for client profiling only
+- Fingerprints change with browser updates
+
+## Bot Verification Methods
+
+Cloudflare verifies bots via:
+
+1. **Reverse DNS (IP validation):** Traditional method—bot IP resolves to expected domain
+2. **Web Bot Auth:** Modern cryptographic verification—faster propagation
+
+When `verifiedBot=true`, bot passed at least one method.
+
+**Inactive verified bots:** IPs removed after 24h of no traffic.
+
+## Detection Engine Behavior
+
+| Engine | Score | Timing | Plan | Notes |
+|--------|-------|--------|------|-------|
+| Heuristics | Always 1 | Immediate | All | Known fingerprints—overrides ML |
+| ML | 1-99 | Immediate | All | Majority of detections |
+| Anomaly Detection | Influences | After baseline | Enterprise | Optional, baseline analysis |
+| JavaScript Detections | Pass/fail | After JS | Pro+ | Headless browser detection |
+| Cloudflare Service | N/A | N/A | Enterprise | Zero Trust internal source |
+
+**Priority:** Heuristics > ML—if heuristic matches, score=1 regardless of ML.
+
+## Limits
+
+| Limit | Value | Notes |
+|-------|-------|-------|
+| Bot Score = 0 | Means not computed | Not score = 100 |
+| First request JSD data | May not be available | JSD data appears on subsequent requests |
+| Score accuracy | Not 100% guaranteed | False positives/negatives possible |
+| JSD on first HTML page visit | Not supported | Requires subsequent page load |
+| JSD requirements | JavaScript-enabled browser | Won't work with JS disabled or ad blockers |
+| JSD ETag stripping | Strips ETags from HTML responses | May affect caching behavior |
+| JSD CSP compatibility | Requires specific CSP | Not compatible with some CSP configurations |
+| JSD meta CSP tags | Not supported | Must use HTTP headers |
+| JSD WebSocket support | Not supported | WebSocket endpoints won't work with JSD |
+| JSD mobile app support | Native apps won't pass | Only works in browsers |
+| JA3/JA4 traffic type | HTTPS/TLS only | Not available for non-HTTPS traffic |
+| JA3/JA4 Worker routing | Missing for Worker-routed traffic | Check request routing |
+| JA3/JA4 uniqueness | Not unique per user | Shared by clients with same browser/library |
+| JA3/JA4 stability | Can change with updates | Browser/library updates affect fingerprints |
+| WAF custom rules (Free) | 5 | Varies by plan |
+| WAF custom rules (Pro) | 20 | Varies by plan |
+| WAF custom rules (Business) | 100 | Varies by plan |
+| WAF custom rules (Enterprise) | 1,000+ | Varies by plan |
+| Workers CPU time | Varies by plan | Applies to bot logic |
+| Bot Analytics sampling | 1-10% adaptive | High-volume zones sampled more aggressively |
+| Bot Analytics history | 30 days max | Historical data retention limit |
+| CSP requirements for JSD | Must allow `/cdn-cgi/challenge-platform/` | Required for JSD to function |
+
+### Plan Restrictions
+
+| Feature | Free | Pro/Business | Enterprise |
+|---------|------|--------------|------------|
+| Granular scores (1-99) | No | No | Yes |
+| JA3/JA4 | No | No | Yes |
+| Anomaly Detection | No | No | Yes |
+| Corporate Proxy detection | No | No | Yes |
+| Verified bot categories | Limited | Limited | Full |
+| Custom WAF rules | 5 | 20/100 | 1,000+ |
diff --git a/.agents/skills/cloudflare-deploy/references/bot-management/patterns.md b/.agents/skills/cloudflare-deploy/references/bot-management/patterns.md
new file mode 100644
index 0000000..4ca7085
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/bot-management/patterns.md
@@ -0,0 +1,182 @@
+# Bot Management Patterns
+
+## E-commerce Protection
+
+```txt
+# High security for checkout
+(cf.bot_management.score lt 50 and http.request.uri.path in {"/checkout" "/cart/add"} and not cf.bot_management.verified_bot and not cf.bot_management.corporate_proxy)
+Action: Managed Challenge
+```
+
+## API Protection
+
+```txt
+# Protect API with JS detection + score
+(http.request.uri.path matches "^/api/" and (cf.bot_management.score lt 30 or not cf.bot_management.js_detection.passed) and not cf.bot_management.verified_bot)
+Action: Block
+```
+
+## SEO-Friendly Bot Handling
+
+```txt
+# Allow search engine crawlers
+(cf.bot_management.score lt 30 and not cf.verified_bot_category in {"Search Engine Crawler"})
+Action: Managed Challenge
+```
+
+## Block AI Scrapers
+
+```txt
+# Block training crawlers only (allow AI assistants/search)
+(cf.verified_bot_category eq "AI Crawler")
+Action: Block
+
+# Block all AI-related bots (training + assistants + search)
+(cf.verified_bot_category in {"AI Crawler" "AI Assistant" "AI Search"})
+Action: Block
+
+# Allow AI Search, block AI Crawler and AI Assistant
+(cf.verified_bot_category in {"AI Crawler" "AI Assistant"})
+Action: Block
+
+# Or use dashboard: Security > Settings > Bot Management > Block AI Bots
+```
+
+## Rate Limiting by Bot Score
+
+```txt
+# Stricter limits for suspicious traffic
+(cf.bot_management.score lt 50)
+Rate: 10 requests per 10 seconds
+
+(cf.bot_management.score ge 50)
+Rate: 100 requests per 10 seconds
+```
+
+## Mobile App Allowlisting
+
+```txt
+# Identify mobile app by JA3/JA4
+(cf.bot_management.ja4 in {"fingerprint1" "fingerprint2"})
+Action: Skip (all remaining rules)
+```
+
+## Datacenter Detection
+
+```typescript
+import type { IncomingRequestCfProperties } from '@cloudflare/workers-types';
+
+// Low score + not corporate proxy = likely datacenter bot
+export default {
+ async fetch(request: Request): Promise {
+ const cf = request.cf as IncomingRequestCfProperties | undefined;
+ const botMgmt = cf?.botManagement;
+
+ if (botMgmt?.score && botMgmt.score < 30 &&
+ !botMgmt.corporateProxy && !botMgmt.verifiedBot) {
+ return new Response('Datacenter traffic blocked', { status: 403 });
+ }
+
+ return fetch(request);
+ }
+};
+```
+
+## Conditional Delay (Tarpit)
+
+```typescript
+import type { IncomingRequestCfProperties } from '@cloudflare/workers-types';
+
+// Add delay proportional to bot suspicion
+export default {
+ async fetch(request: Request): Promise {
+ const cf = request.cf as IncomingRequestCfProperties | undefined;
+ const botMgmt = cf?.botManagement;
+
+ if (botMgmt?.score && botMgmt.score < 50 && !botMgmt.verifiedBot) {
+ // Delay: 0-2 seconds for scores 50-0
+ const delayMs = Math.max(0, (50 - botMgmt.score) * 40);
+ await new Promise(r => setTimeout(r, delayMs));
+ }
+
+ return fetch(request);
+ }
+};
+```
+
+## Layered Defense
+
+```txt
+1. Bot Management (score-based)
+2. JavaScript Detections (for JS-capable clients)
+3. Rate Limiting (fallback protection)
+4. WAF Managed Rules (OWASP, etc.)
+```
+
+## Progressive Enhancement
+
+```txt
+Public content: High threshold (score < 10)
+Authenticated: Medium threshold (score < 30)
+Sensitive: Low threshold (score < 50) + JSD
+```
+
+## Zero Trust for Bots
+
+```txt
+1. Default deny (all scores < 30)
+2. Allowlist verified bots
+3. Allowlist mobile apps (JA3/JA4)
+4. Allowlist corporate proxies
+5. Allowlist static resources
+```
+
+## Workers: Score + JS Detection
+
+```typescript
+import type { IncomingRequestCfProperties } from '@cloudflare/workers-types';
+
+export default {
+ async fetch(request: Request): Promise {
+ const cf = request.cf as IncomingRequestCfProperties | undefined;
+ const botMgmt = cf?.botManagement;
+ const url = new URL(request.url);
+
+ if (botMgmt?.staticResource) return fetch(request); // Skip static
+
+ // API endpoints: require JS detection + good score
+ if (url.pathname.startsWith('/api/')) {
+ const jsDetectionPassed = botMgmt?.jsDetection?.passed ?? false;
+ const score = botMgmt?.score ?? 100;
+
+ if (!jsDetectionPassed || score < 30) {
+ return new Response('Unauthorized', { status: 401 });
+ }
+ }
+
+ return fetch(request);
+ }
+};
+```
+
+## Rate Limiting by JWT Claim + Bot Score
+
+```txt
+# Enterprise: Combine bot score with JWT validation
+Rate limiting > Custom rules
+- Field: lookup_json_string(http.request.jwt.claims["{config_id}"][0], "sub")
+- Matches: user ID claim
+- Additional condition: cf.bot_management.score lt 50
+```
+
+## WAF Integration Points
+
+- **WAF Custom Rules**: Primary enforcement mechanism
+- **Rate Limiting Rules**: Bot score as dimension, stricter limits for low scores
+- **Transform Rules**: Pass score to origin via custom header
+- **Workers**: Programmatic bot logic, custom scoring algorithms
+- **Page Rules / Configuration Rules**: Zone-level overrides, path-specific settings
+
+## See Also
+
+- [gotchas.md](./gotchas.md) - Common errors, false positives/negatives, limitations
diff --git a/.agents/skills/cloudflare-deploy/references/browser-rendering/README.md b/.agents/skills/cloudflare-deploy/references/browser-rendering/README.md
new file mode 100644
index 0000000..eca7220
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/browser-rendering/README.md
@@ -0,0 +1,78 @@
+# Cloudflare Browser Rendering Skill Reference
+
+**Description**: Expert knowledge for Cloudflare Browser Rendering - control headless Chrome on Cloudflare's global network for browser automation, screenshots, PDFs, web scraping, testing, and content generation.
+
+**When to use**: Any task involving Cloudflare Browser Rendering including: taking screenshots, generating PDFs, web scraping, browser automation, testing web applications, extracting structured data, capturing page metrics, or automating browser interactions.
+
+## Decision Tree
+
+### REST API vs Workers Bindings
+
+**Use REST API when:**
+- One-off, stateless tasks (screenshot, PDF, content fetch)
+- No Workers infrastructure yet
+- Simple integrations from external services
+- Need quick prototyping without deployment
+
+**Use Workers Bindings when:**
+- Complex browser automation workflows
+- Need session reuse for performance
+- Multiple page interactions per request
+- Custom scripting and logic required
+- Building production applications
+
+### Puppeteer vs Playwright
+
+| Feature | Puppeteer | Playwright |
+|---------|-----------|------------|
+| API Style | Chrome DevTools Protocol | High-level abstractions |
+| Selectors | CSS, XPath | CSS, text, role, test-id |
+| Best for | Advanced control, CDP access | Quick automation, testing |
+| Learning curve | Steeper | Gentler |
+
+**Use Puppeteer:** Need CDP protocol access, Chrome-specific features, migration from existing Puppeteer code
+**Use Playwright:** Modern selector APIs, cross-browser patterns, faster development
+
+## Tier Limits Summary
+
+| Limit | Free Tier | Paid Tier |
+|-------|-----------|-----------|
+| Daily browser time | 10 minutes | Unlimited* |
+| Concurrent sessions | 3 | 30 |
+| Requests per minute | 6 | 180 |
+
+*Subject to fair-use policy. See [gotchas.md](gotchas.md) for details.
+
+## Reading Order
+
+**New to Browser Rendering:**
+1. [configuration.md](configuration.md) - Setup and deployment
+2. [patterns.md](patterns.md) - Common use cases with examples
+3. [api.md](api.md) - API reference
+4. [gotchas.md](gotchas.md) - Avoid common pitfalls
+
+**Specific task:**
+- **Setup/deployment** → [configuration.md](configuration.md)
+- **API reference/endpoints** → [api.md](api.md)
+- **Example code/patterns** → [patterns.md](patterns.md)
+- **Debugging/troubleshooting** → [gotchas.md](gotchas.md)
+
+**REST API users:**
+- Start with [api.md](api.md) REST API section
+- Check [gotchas.md](gotchas.md) for rate limits
+
+**Workers users:**
+- Start with [configuration.md](configuration.md)
+- Review [patterns.md](patterns.md) for session management
+- Reference [api.md](api.md) for Workers Bindings
+
+## In This Reference
+
+- **[configuration.md](configuration.md)** - Setup, deployment, wrangler config, compatibility
+- **[api.md](api.md)** - REST API endpoints + Workers Bindings (Puppeteer/Playwright)
+- **[patterns.md](patterns.md)** - Common patterns, use cases, real examples
+- **[gotchas.md](gotchas.md)** - Troubleshooting, best practices, tier limits, common errors
+
+## See Also
+
+- [Cloudflare Docs](https://developers.cloudflare.com/browser-rendering/)
diff --git a/.agents/skills/cloudflare-deploy/references/browser-rendering/api.md b/.agents/skills/cloudflare-deploy/references/browser-rendering/api.md
new file mode 100644
index 0000000..eea56b0
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/browser-rendering/api.md
@@ -0,0 +1,108 @@
+# Browser Rendering API
+
+## REST API
+
+**Base:** `https://api.cloudflare.com/client/v4/accounts/{accountId}/browser-rendering`
+**Auth:** `Authorization: Bearer ` (Browser Rendering - Edit permission)
+
+### Endpoints
+
+| Endpoint | Description | Key Options |
+|----------|-------------|-------------|
+| `/content` | Get rendered HTML | `url`, `waitUntil` |
+| `/screenshot` | Capture image | `screenshotOptions: {type, fullPage, clip}` |
+| `/pdf` | Generate PDF | `pdfOptions: {format, landscape, margin}` |
+| `/snapshot` | HTML + inlined resources | `url` |
+| `/scrape` | Extract by selectors | `selectors: ["h1", ".price"]` |
+| `/json` | AI-structured extraction | `schema: {name: "string", price: "number"}` |
+| `/links` | Get all links | `url` |
+| `/markdown` | Convert to markdown | `url` |
+
+```bash
+curl -X POST '.../browser-rendering/screenshot' \
+ -H "Authorization: Bearer $TOKEN" \
+ -d '{"url":"https://example.com","screenshotOptions":{"fullPage":true}}'
+```
+
+## Workers Binding
+
+```jsonc
+// wrangler.jsonc
+{ "browser": { "binding": "MYBROWSER" } }
+```
+
+## Puppeteer
+
+```typescript
+import puppeteer from "@cloudflare/puppeteer";
+
+const browser = await puppeteer.launch(env.MYBROWSER, { keep_alive: 600000 });
+const page = await browser.newPage();
+await page.goto('https://example.com', { waitUntil: 'networkidle0' });
+
+// Content
+const html = await page.content();
+const title = await page.title();
+
+// Screenshot/PDF
+await page.screenshot({ fullPage: true, type: 'png' });
+await page.pdf({ format: 'A4', printBackground: true });
+
+// Interaction
+await page.click('#button');
+await page.type('#input', 'text');
+await page.evaluate(() => document.querySelector('h1')?.textContent);
+
+// Session management
+const sessions = await puppeteer.sessions(env.MYBROWSER);
+const limits = await puppeteer.limits(env.MYBROWSER);
+
+await browser.close();
+```
+
+## Playwright
+
+```typescript
+import { launch, connect } from "@cloudflare/playwright";
+
+const browser = await launch(env.MYBROWSER, { keep_alive: 600000 });
+const page = await browser.newPage();
+
+await page.goto('https://example.com', { waitUntil: 'networkidle' });
+
+// Modern selectors
+await page.locator('.button').click();
+await page.getByText('Submit').click();
+await page.getByTestId('search').fill('query');
+
+// Context for isolation
+const context = await browser.newContext({
+ viewport: { width: 1920, height: 1080 },
+ userAgent: 'custom'
+});
+
+await browser.close();
+```
+
+## Session Management
+
+```typescript
+// List sessions
+await puppeteer.sessions(env.MYBROWSER);
+
+// Connect to existing
+await puppeteer.connect(env.MYBROWSER, sessionId);
+
+// Check limits
+await puppeteer.limits(env.MYBROWSER);
+// { remaining: ms, total: ms, concurrent: n }
+```
+
+## Key Options
+
+| Option | Values |
+|--------|--------|
+| `waitUntil` | `load`, `domcontentloaded`, `networkidle0`, `networkidle2` |
+| `keep_alive` | Max 600000ms (10 min) |
+| `screenshot.type` | `png`, `jpeg` |
+| `pdf.format` | `A4`, `Letter`, `Legal` |
diff --git a/.agents/skills/cloudflare-deploy/references/browser-rendering/configuration.md b/.agents/skills/cloudflare-deploy/references/browser-rendering/configuration.md
new file mode 100644
index 0000000..84bad26
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/browser-rendering/configuration.md
@@ -0,0 +1,78 @@
+# Configuration & Setup
+
+## Installation
+
+```bash
+npm install @cloudflare/puppeteer # or @cloudflare/playwright
+```
+
+**Use Cloudflare packages** - standard `puppeteer`/`playwright` won't work in Workers.
+
+## wrangler.json
+
+```json
+{
+ "name": "browser-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-01-01",
+ "compatibility_flags": ["nodejs_compat"],
+ "browser": {
+ "binding": "MYBROWSER"
+ }
+}
+```
+
+**Required:** `nodejs_compat` flag and `browser.binding`.
+
+## TypeScript
+
+```typescript
+interface Env {
+ MYBROWSER: Fetcher;
+}
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ // ...
+ }
+} satisfies ExportedHandler;
+```
+
+## Development
+
+```bash
+wrangler dev --remote # --remote required for browser binding
+```
+
+**Local mode does NOT support Browser Rendering** - must use `--remote`.
+
+## REST API
+
+No wrangler config needed. Get API token with "Browser Rendering - Edit" permission.
+
+```bash
+curl -X POST \
+ 'https://api.cloudflare.com/client/v4/accounts/{accountId}/browser-rendering/screenshot' \
+ -H 'Authorization: Bearer TOKEN' \
+ -d '{"url": "https://example.com"}' --output screenshot.png
+```
+
+## Requirements
+
+| Requirement | Value |
+|-------------|-------|
+| Node.js compatibility | `nodejs_compat` flag |
+| Compatibility date | 2023-03-01+ |
+| Module format | ES modules only |
+| Browser | Chromium 119+ (no Firefox/Safari) |
+
+**Not supported:** WebGL, WebRTC, extensions, `file://` protocol, Service Worker syntax.
+
+## Troubleshooting
+
+| Error | Solution |
+|-------|----------|
+| `MYBROWSER is undefined` | Use `wrangler dev --remote` |
+| `nodejs_compat not enabled` | Add to `compatibility_flags` |
+| `Module not found` | `npm install @cloudflare/puppeteer` |
+| `Browser Rendering not available` | Enable in dashboard |
diff --git a/.agents/skills/cloudflare-deploy/references/browser-rendering/gotchas.md b/.agents/skills/cloudflare-deploy/references/browser-rendering/gotchas.md
new file mode 100644
index 0000000..7e34f2b
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/browser-rendering/gotchas.md
@@ -0,0 +1,88 @@
+# Browser Rendering Gotchas
+
+## Tier Limits
+
+| Limit | Free | Paid |
+|-------|------|------|
+| Daily browser time | 10 min | Unlimited* |
+| Concurrent sessions | 3 | 30 |
+| Requests/minute | 6 | 180 |
+| Session keep-alive | 10 min max | 10 min max |
+
+*Subject to fair-use policy.
+
+**Check quota:**
+```typescript
+const limits = await puppeteer.limits(env.MYBROWSER);
+// { remaining: 540000, total: 600000, concurrent: 2 }
+```
+
+## Always Close Browsers
+
+```typescript
+const browser = await puppeteer.launch(env.MYBROWSER);
+try {
+ const page = await browser.newPage();
+ await page.goto("https://example.com");
+ return new Response(await page.content());
+} finally {
+ await browser.close(); // ALWAYS in finally
+}
+```
+
+**Workers vs REST:** REST auto-closes after timeout. Workers must call `close()` or session stays open until `keep_alive` expires.
+
+## Optimize Concurrency
+
+```typescript
+// ❌ 3 sessions (hits free tier limit)
+const browser1 = await puppeteer.launch(env.MYBROWSER);
+const browser2 = await puppeteer.launch(env.MYBROWSER);
+
+// ✅ 1 session, multiple pages
+const browser = await puppeteer.launch(env.MYBROWSER);
+const page1 = await browser.newPage();
+const page2 = await browser.newPage();
+```
+
+## Common Errors
+
+| Error | Cause | Fix |
+|-------|-------|-----|
+| Session limit exceeded | Too many concurrent | Close unused browsers, use pages not browsers |
+| Page navigation timeout | Slow page or `networkidle` on busy page | Increase timeout, use `waitUntil: "load"` |
+| Session not found | Expired session | Catch error, launch new session |
+| Evaluation failed | DOM element missing | Use `?.` optional chaining |
+| Protocol error: Target closed | Page closed during operation | Await all ops before closing |
+
+## page.evaluate() Gotchas
+
+```typescript
+// ❌ Outer scope not available
+const selector = "h1";
+await page.evaluate(() => document.querySelector(selector));
+
+// ✅ Pass as argument
+await page.evaluate((sel) => document.querySelector(sel)?.textContent, selector);
+```
+
+## Performance
+
+**waitUntil options (fastest to slowest):**
+1. `domcontentloaded` - DOM ready
+2. `load` - load event (default)
+3. `networkidle0` - no network for 500ms
+
+**Block unnecessary resources:**
+```typescript
+await page.setRequestInterception(true);
+page.on("request", (req) => {
+ if (["image", "stylesheet", "font"].includes(req.resourceType())) {
+ req.abort();
+ } else {
+ req.continue();
+ }
+});
+```
+
+**Session reuse:** Cold start ~1-2s, warm connect ~100-200ms. Store sessionId in KV for reuse.
diff --git a/.agents/skills/cloudflare-deploy/references/browser-rendering/patterns.md b/.agents/skills/cloudflare-deploy/references/browser-rendering/patterns.md
new file mode 100644
index 0000000..a652c2b
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/browser-rendering/patterns.md
@@ -0,0 +1,91 @@
+# Browser Rendering Patterns
+
+## Basic Worker
+
+```typescript
+import puppeteer from "@cloudflare/puppeteer";
+
+export default {
+ async fetch(request, env) {
+ const browser = await puppeteer.launch(env.MYBROWSER);
+ try {
+ const page = await browser.newPage();
+ await page.goto("https://example.com");
+ return new Response(await page.content());
+ } finally {
+ await browser.close(); // ALWAYS in finally
+ }
+ }
+};
+```
+
+## Session Reuse
+
+Keep sessions alive for performance:
+```typescript
+let sessionId = await env.SESSION_KV.get("browser-session");
+if (sessionId) {
+ browser = await puppeteer.connect(env.MYBROWSER, sessionId);
+} else {
+ browser = await puppeteer.launch(env.MYBROWSER, { keep_alive: 600000 });
+ await env.SESSION_KV.put("browser-session", browser.sessionId(), { expirationTtl: 600 });
+}
+// Don't close browser to keep session alive
+```
+
+## Common Operations
+
+| Task | Code |
+|------|------|
+| Screenshot | `await page.screenshot({ type: "png", fullPage: true })` |
+| PDF | `await page.pdf({ format: "A4", printBackground: true })` |
+| Extract data | `await page.evaluate(() => document.querySelector('h1').textContent)` |
+| Fill form | `await page.type('#input', 'value'); await page.click('button')` |
+| Wait nav | `await Promise.all([page.waitForNavigation(), page.click('a')])` |
+
+## Parallel Scraping
+
+```typescript
+const pages = await Promise.all(urls.map(() => browser.newPage()));
+await Promise.all(pages.map((p, i) => p.goto(urls[i])));
+const titles = await Promise.all(pages.map(p => p.title()));
+```
+
+## Playwright Selectors
+
+```typescript
+import { launch } from "@cloudflare/playwright";
+const browser = await launch(env.MYBROWSER);
+await page.getByRole("button", { name: "Sign in" }).click();
+await page.getByLabel("Email").fill("user@example.com");
+await page.getByTestId("submit-button").click();
+```
+
+## Incognito Contexts
+
+Isolated sessions without multiple browsers:
+```typescript
+const ctx1 = await browser.createIncognitoBrowserContext();
+const ctx2 = await browser.createIncognitoBrowserContext();
+// Each has isolated cookies/storage
+```
+
+## Quota Check
+
+```typescript
+const limits = await puppeteer.limits(env.MYBROWSER);
+if (limits.remaining < 60000) return new Response("Quota low", { status: 429 });
+```
+
+## Error Handling
+
+```typescript
+try {
+ await page.goto(url, { timeout: 30000, waitUntil: "networkidle0" });
+} catch (e) {
+ if (e.message.includes("timeout")) return new Response("Timeout", { status: 504 });
+ if (e.message.includes("Session limit")) return new Response("Too many sessions", { status: 429 });
+} finally {
+ if (browser) await browser.close();
+}
+```
diff --git a/.agents/skills/cloudflare-deploy/references/c3/README.md b/.agents/skills/cloudflare-deploy/references/c3/README.md
new file mode 100644
index 0000000..0516fc6
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/c3/README.md
@@ -0,0 +1,111 @@
+# C3 (create-cloudflare)
+
+Official CLI for scaffolding Cloudflare Workers and Pages projects with templates, TypeScript, and instant deployment.
+
+## Quick Start
+
+```bash
+# Interactive (recommended for first-time)
+npm create cloudflare@latest my-app
+
+# Worker (API/WebSocket/Cron)
+npm create cloudflare@latest my-api -- --type=hello-world --ts
+
+# Pages (static/SSG/full-stack)
+npm create cloudflare@latest my-site -- --type=web-app --framework=astro --platform=pages
+```
+
+## Platform Decision Tree
+
+```
+What are you building?
+
+├─ API / WebSocket / Cron / Email handler
+│ └─ Workers (default) - no --platform flag needed
+│ npm create cloudflare@latest my-api -- --type=hello-world
+
+├─ Static site / SSG / Documentation
+│ └─ Pages - requires --platform=pages
+│ npm create cloudflare@latest my-site -- --type=web-app --framework=astro --platform=pages
+
+├─ Full-stack app (Next.js/Remix/SvelteKit)
+│ ├─ Need Durable Objects, Queues, or Workers-only features?
+│ │ └─ Workers (default)
+│ └─ Otherwise use Pages for git integration and branch previews
+│ └─ Add --platform=pages
+
+└─ Convert existing project
+ └─ npm create cloudflare@latest . -- --type=pre-existing --existing-script=./src/worker.ts
+```
+
+**Critical:** Pages projects require `--platform=pages` flag. Without it, C3 defaults to Workers.
+
+## Interactive Flow
+
+When run without flags, C3 prompts in this order:
+
+1. **Project name** - Directory to create (defaults to current dir with `.`)
+2. **Application type** - `hello-world`, `web-app`, `demo`, `pre-existing`, `remote-template`
+3. **Platform** - `workers` (default) or `pages` (for web apps only)
+4. **Framework** - If web-app: `next`, `remix`, `astro`, `react-router`, `solid`, `svelte`, etc.
+5. **TypeScript** - `yes` (recommended) or `no`
+6. **Git** - Initialize repository? `yes` or `no`
+7. **Deploy** - Deploy now? `yes` or `no` (requires `wrangler login`)
+
+## Installation Methods
+
+```bash
+# NPM
+npm create cloudflare@latest
+
+# Yarn
+yarn create cloudflare
+
+# PNPM
+pnpm create cloudflare@latest
+```
+
+## In This Reference
+
+| File | Purpose | Use When |
+|------|---------|----------|
+| **api.md** | Complete CLI flag reference | Scripting, CI/CD, advanced usage |
+| **configuration.md** | Generated files, bindings, types | Understanding output, customization |
+| **patterns.md** | Workflows, CI/CD, monorepos | Real-world integration |
+| **gotchas.md** | Troubleshooting failures | Deployment blocked, errors |
+
+## Reading Order
+
+| Task | Read |
+|------|------|
+| Create first project | README only |
+| Set up CI/CD | README → api → patterns |
+| Debug failed deploy | gotchas |
+| Understand generated files | configuration |
+| Full CLI reference | api |
+| Create custom template | patterns → configuration |
+| Convert existing project | README → patterns |
+
+## Post-Creation
+
+```bash
+cd my-app
+
+# Local dev with hot reload
+npm run dev
+
+# Generate TypeScript types for bindings
+npm run cf-typegen
+
+# Deploy to Cloudflare
+npm run deploy
+```
+
+## See Also
+
+- **workers/README.md** - Workers runtime, bindings, APIs
+- **workers-ai/README.md** - AI/ML models
+- **pages/README.md** - Pages-specific features
+- **wrangler/README.md** - Wrangler CLI beyond initial setup
+- **d1/README.md** - SQLite database
+- **r2/README.md** - Object storage
diff --git a/.agents/skills/cloudflare-deploy/references/c3/api.md b/.agents/skills/cloudflare-deploy/references/c3/api.md
new file mode 100644
index 0000000..29c2b0c
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/c3/api.md
@@ -0,0 +1,71 @@
+# C3 CLI Reference
+
+## Invocation
+
+```bash
+npm create cloudflare@latest [name] [-- flags] # NPM requires --
+yarn create cloudflare [name] [flags]
+pnpm create cloudflare@latest [name] [-- flags]
+```
+
+## Core Flags
+
+| Flag | Values | Description |
+|------|--------|-------------|
+| `--type` | `hello-world`, `web-app`, `demo`, `pre-existing`, `remote-template` | Application type |
+| `--platform` | `workers` (default), `pages` | Target platform |
+| `--framework` | `next`, `remix`, `astro`, `react-router`, `solid`, `svelte`, `qwik`, `vue`, `angular`, `hono` | Web framework (requires `--type=web-app`) |
+| `--lang` | `ts`, `js`, `python` | Language (for `--type=hello-world`) |
+| `--ts` / `--no-ts` | - | TypeScript for web apps |
+
+## Deployment Flags
+
+| Flag | Description |
+|------|-------------|
+| `--deploy` / `--no-deploy` | Deploy immediately (prompts interactive, skips in CI) |
+| `--git` / `--no-git` | Initialize git (default: yes) |
+| `--open` | Open browser after deploy |
+
+## Advanced Flags
+
+| Flag | Description |
+|------|-------------|
+| `--template=user/repo` | GitHub template or local path |
+| `--existing-script=./src/worker.ts` | Existing script (requires `--type=pre-existing`) |
+| `--category=ai\|database\|realtime` | Demo filter (requires `--type=demo`) |
+| `--experimental` | Enable experimental features |
+| `--wrangler-defaults` | Skip wrangler prompts |
+
+## Environment Variables
+
+```bash
+CLOUDFLARE_API_TOKEN=xxx # For deployment
+CLOUDFLARE_ACCOUNT_ID=xxx # Account ID
+CF_TELEMETRY_DISABLED=1 # Disable telemetry
+```
+
+## Exit Codes
+
+`0` success, `1` user abort, `2` error
+
+## Examples
+
+```bash
+# TypeScript Worker
+npm create cloudflare@latest my-api -- --type=hello-world --lang=ts --no-deploy
+
+# Next.js on Pages
+npm create cloudflare@latest my-app -- --type=web-app --framework=next --platform=pages --ts
+
+# Astro blog
+npm create cloudflare@latest my-blog -- --type=web-app --framework=astro --ts --deploy
+
+# CI: non-interactive
+npm create cloudflare@latest my-app -- --type=web-app --framework=next --ts --no-git --no-deploy
+
+# GitHub template
+npm create cloudflare@latest -- --template=cloudflare/templates/worker-openapi
+
+# Convert existing project
+npm create cloudflare@latest . -- --type=pre-existing --existing-script=./build/worker.js
+```
diff --git a/.agents/skills/cloudflare-deploy/references/c3/configuration.md b/.agents/skills/cloudflare-deploy/references/c3/configuration.md
new file mode 100644
index 0000000..37f9f82
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/c3/configuration.md
@@ -0,0 +1,81 @@
+# C3 Generated Configuration
+
+## Output Structure
+
+```
+my-app/
+├── src/index.ts # Worker entry point
+├── wrangler.jsonc # Cloudflare config
+├── package.json # Scripts
+├── tsconfig.json
+└── .gitignore
+```
+
+## wrangler.jsonc
+
+```jsonc
+{
+ "$schema": "https://raw.githubusercontent.com/cloudflare/workers-sdk/main/packages/wrangler/config-schema.json",
+ "name": "my-app",
+ "main": "src/index.ts",
+ "compatibility_date": "2026-01-27"
+}
+```
+
+## Binding Placeholders
+
+C3 generates **placeholder IDs** that must be replaced before deploy:
+
+```jsonc
+{
+ "kv_namespaces": [{ "binding": "MY_KV", "id": "placeholder_kv_id" }],
+ "d1_databases": [{ "binding": "DB", "database_id": "00000000-..." }]
+}
+```
+
+**Replace with real IDs:**
+```bash
+npx wrangler kv namespace create MY_KV # Returns real ID
+npx wrangler d1 create my-database # Returns real database_id
+```
+
+**Deployment error if not replaced:**
+```
+Error: Invalid KV namespace ID "placeholder_kv_id"
+```
+
+## Scripts
+
+```json
+{
+ "scripts": {
+ "dev": "wrangler dev",
+ "deploy": "wrangler deploy",
+ "cf-typegen": "wrangler types"
+ }
+}
+```
+
+## Type Generation
+
+Run after adding bindings:
+```bash
+npm run cf-typegen
+```
+
+Generates `.wrangler/types/runtime.d.ts`:
+```typescript
+interface Env {
+ MY_KV: KVNamespace;
+ DB: D1Database;
+}
+```
+
+## Post-Creation Checklist
+
+1. Review `wrangler.jsonc` - check name, compatibility_date
+2. Replace placeholder binding IDs with real resource IDs
+3. Run `npm run cf-typegen`
+4. Test: `npm run dev`
+5. Deploy: `npm run deploy`
+6. Add secrets: `npx wrangler secret put SECRET_NAME`
diff --git a/.agents/skills/cloudflare-deploy/references/c3/gotchas.md b/.agents/skills/cloudflare-deploy/references/c3/gotchas.md
new file mode 100644
index 0000000..ecd664d
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/c3/gotchas.md
@@ -0,0 +1,92 @@
+# C3 Troubleshooting
+
+## Deployment Issues
+
+### Placeholder IDs
+
+**Error:** "Invalid namespace ID"
+**Fix:** Replace placeholders in wrangler.jsonc with real IDs:
+```bash
+npx wrangler kv namespace create MY_KV # Get real ID
+```
+
+### Authentication
+
+**Error:** "Not authenticated"
+**Fix:** `npx wrangler login` or set `CLOUDFLARE_API_TOKEN`
+
+### Name Conflict
+
+**Error:** "Worker already exists"
+**Fix:** Change `name` in wrangler.jsonc
+
+## Platform Selection
+
+| Need | Platform |
+|------|----------|
+| Git integration, branch previews | `--platform=pages` |
+| Durable Objects, D1, Queues | Workers (default) |
+
+Wrong platform? Recreate with correct `--platform` flag.
+
+## TypeScript Issues
+
+**"Cannot find name 'KVNamespace'"**
+```bash
+npm run cf-typegen # Regenerate types
+# Restart TS server in editor
+```
+
+**Missing types after config change:** Re-run `npm run cf-typegen`
+
+## Package Manager
+
+**Multiple lockfiles causing issues:**
+```bash
+rm pnpm-lock.yaml # If using npm
+rm package-lock.json # If using pnpm
+```
+
+## CI/CD
+
+**CI hangs on prompts:**
+```bash
+npm create cloudflare@latest my-app -- \
+ --type=hello-world --lang=ts --no-git --no-deploy
+```
+
+**Auth in CI:**
+```yaml
+env:
+ CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
+ CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
+```
+
+## Framework-Specific
+
+| Framework | Issue | Fix |
+|-----------|-------|-----|
+| Next.js | create-next-app failed | `npm cache clean --force`, retry |
+| Astro | Adapter missing | Install `@astrojs/cloudflare` |
+| Remix | Module errors | Update `@remix-run/cloudflare*` |
+
+## Compatibility Date
+
+**"Feature X requires compatibility_date >= ..."**
+**Fix:** Update `compatibility_date` in wrangler.jsonc to today's date
+
+## Node.js Version
+
+**"Node.js version not supported"**
+**Fix:** Install Node.js 18+ (`nvm install 20`)
+
+## Quick Reference
+
+| Error | Cause | Fix |
+|-------|-------|-----|
+| Invalid namespace ID | Placeholder binding | Create resource, update config |
+| Not authenticated | No login | `npx wrangler login` |
+| Cannot find KVNamespace | Missing types | `npm run cf-typegen` |
+| Worker already exists | Name conflict | Change `name` |
+| CI hangs | Missing flags | Add --type, --lang, --no-deploy |
+| Template not found | Bad name | Check cloudflare/templates |
diff --git a/.agents/skills/cloudflare-deploy/references/c3/patterns.md b/.agents/skills/cloudflare-deploy/references/c3/patterns.md
new file mode 100644
index 0000000..76379e3
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/c3/patterns.md
@@ -0,0 +1,82 @@
+# C3 Usage Patterns
+
+## Quick Workflows
+
+```bash
+# TypeScript API Worker
+npm create cloudflare@latest my-api -- --type=hello-world --lang=ts --deploy
+
+# Next.js on Pages
+npm create cloudflare@latest my-app -- --type=web-app --framework=next --platform=pages --ts --deploy
+
+# Astro static site
+npm create cloudflare@latest my-blog -- --type=web-app --framework=astro --platform=pages --ts
+```
+
+## CI/CD (GitHub Actions)
+
+```yaml
+- name: Deploy
+ run: npm run deploy
+ env:
+ CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
+ CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
+```
+
+**Non-interactive requires:**
+```bash
+--type= # Required
+--no-git # Recommended (CI already in git)
+--no-deploy # Deploy separately with secrets
+--framework= # For web-app
+--ts / --no-ts # Required
+```
+
+## Monorepo
+
+C3 detects workspace config (`package.json` workspaces or `pnpm-workspace.yaml`).
+
+```bash
+cd packages/
+npm create cloudflare@latest my-worker -- --type=hello-world --lang=ts --no-deploy
+```
+
+## Custom Templates
+
+```bash
+# GitHub repo
+npm create cloudflare@latest -- --template=username/repo
+npm create cloudflare@latest -- --template=cloudflare/templates/worker-openapi
+
+# Local path
+npm create cloudflare@latest my-app -- --template=../my-template
+```
+
+**Template requires `c3.config.json`:**
+```json
+{
+ "name": "my-template",
+ "category": "hello-world",
+ "copies": [{ "path": "src/" }, { "path": "wrangler.jsonc" }],
+ "transforms": [{ "path": "package.json", "jsonc": { "name": "{{projectName}}" }}]
+}
+```
+
+## Existing Projects
+
+```bash
+# Add Cloudflare to existing Worker
+npm create cloudflare@latest . -- --type=pre-existing --existing-script=./dist/index.js
+
+# Add to existing framework app
+npm create cloudflare@latest . -- --type=web-app --framework=next --platform=pages --ts
+```
+
+## Post-Creation Checklist
+
+1. Review `wrangler.jsonc` - set `compatibility_date`, verify `name`
+2. Create bindings: `wrangler kv namespace create`, `wrangler d1 create`, `wrangler r2 bucket create`
+3. Generate types: `npm run cf-typegen`
+4. Test: `npm run dev`
+5. Deploy: `npm run deploy`
+6. Set secrets: `wrangler secret put SECRET_NAME`
diff --git a/.agents/skills/cloudflare-deploy/references/cache-reserve/README.md b/.agents/skills/cloudflare-deploy/references/cache-reserve/README.md
new file mode 100644
index 0000000..395347a
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cache-reserve/README.md
@@ -0,0 +1,147 @@
+# Cloudflare Cache Reserve
+
+**Persistent cache storage built on R2 for long-term content retention**
+
+## Smart Shield Integration
+
+Cache Reserve is part of **Smart Shield**, Cloudflare's comprehensive security and performance suite:
+
+- **Smart Shield Advanced tier**: Includes 2TB Cache Reserve storage
+- **Standalone purchase**: Available separately if not using Smart Shield
+- **Migration**: Existing standalone customers can migrate to Smart Shield bundles
+
+**Decision**: Already on Smart Shield Advanced? Cache Reserve is included. Otherwise evaluate standalone purchase vs Smart Shield upgrade.
+
+## Overview
+
+Cache Reserve is Cloudflare's persistent, large-scale cache storage layer built on R2. It acts as the ultimate upper-tier cache, storing cacheable content for extended periods (30+ days) to maximize cache hits, reduce origin egress fees, and shield origins from repeated requests for long-tail content.
+
+## Core Concepts
+
+### What is Cache Reserve?
+
+- **Persistent storage layer**: Built on R2, sits above tiered cache hierarchy
+- **Long-term retention**: 30-day default retention, extended on each access
+- **Automatic operation**: Works seamlessly with existing CDN, no code changes required
+- **Origin shielding**: Dramatically reduces origin egress by serving cached content longer
+- **Usage-based pricing**: Pay only for storage + read/write operations
+
+### Cache Hierarchy
+
+```
+Visitor Request
+ ↓
+Lower-Tier Cache (closest to visitor)
+ ↓ (on miss)
+Upper-Tier Cache (closest to origin)
+ ↓ (on miss)
+Cache Reserve (R2 persistent storage)
+ ↓ (on miss)
+Origin Server
+```
+
+### How It Works
+
+1. **On cache miss**: Content fetched from origin �� written to Cache Reserve + edge caches simultaneously
+2. **On edge eviction**: Content may be evicted from edge cache but remains in Cache Reserve
+3. **On subsequent request**: If edge cache misses but Cache Reserve hits → content restored to edge caches
+4. **Retention**: Assets remain in Cache Reserve for 30 days since last access (configurable via TTL)
+
+## When to Use Cache Reserve
+
+```
+Need persistent caching?
+├─ High origin egress costs → Cache Reserve ✓
+├─ Long-tail content (archives, media libraries) → Cache Reserve ✓
+├─ Already using Smart Shield Advanced → Included! ✓
+├─ Video streaming with seeking (range requests) → ✗ Not supported
+├─ Dynamic/personalized content → ✗ Use edge cache only
+├─ Need per-request cache control from Workers → ✗ Use R2 directly
+└─ Frequently updated content (< 10hr lifetime) → ✗ Not eligible
+```
+
+## Asset Eligibility
+
+Cache Reserve only stores assets meeting **ALL** criteria:
+
+- Cacheable per Cloudflare's standard rules
+- Minimum 10-hour TTL (36000 seconds)
+- `Content-Length` header present
+- Original files only (not transformed images)
+
+### Eligibility Checklist
+
+Use this checklist to verify if an asset is eligible:
+
+- [ ] Zone has Cache Reserve enabled
+- [ ] Zone has Tiered Cache enabled (required)
+- [ ] Asset TTL ≥ 10 hours (36,000 seconds)
+- [ ] `Content-Length` header present on origin response
+- [ ] No `Set-Cookie` header (or uses private directive)
+- [ ] `Vary` header is NOT `*` (can be `Accept-Encoding`)
+- [ ] Not an image transformation variant (original images OK)
+- [ ] Not a range request (no HTTP 206 support)
+- [ ] Not O2O (Orange-to-Orange) proxied request
+
+**All boxes must be checked for Cache Reserve eligibility.**
+
+### Not Eligible
+
+- Assets with TTL < 10 hours
+- Responses without `Content-Length` header
+- Image transformation variants (original images are eligible)
+- Responses with `Set-Cookie` headers
+- Responses with `Vary: *` header
+- Assets from R2 public buckets on same zone
+- O2O (Orange-to-Orange) setup requests
+- **Range requests** (video seeking, partial content downloads)
+
+## Quick Start
+
+```bash
+# Enable via Dashboard
+https://dash.cloudflare.com/caching/cache-reserve
+# Click "Enable Storage Sync" or "Purchase" button
+```
+
+**Prerequisites:**
+- Paid Cache Reserve plan or Smart Shield Advanced required
+- Tiered Cache required for optimal performance
+
+## Essential Commands
+
+```bash
+# Check Cache Reserve status
+curl -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/cache/cache_reserve" \
+ -H "Authorization: Bearer $API_TOKEN"
+
+# Enable Cache Reserve
+curl -X PATCH "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/cache/cache_reserve" \
+ -H "Authorization: Bearer $API_TOKEN" \
+ -H "Content-Type: application/json" \
+ -d '{"value": "on"}'
+
+# Check asset cache status
+curl -I https://example.com/asset.jpg | grep -i cache
+```
+
+## In This Reference
+
+| Task | Files |
+|------|-------|
+| Evaluate if Cache Reserve fits your use case | README.md (this file) |
+| Enable Cache Reserve for your zone | README.md + [configuration.md](./configuration.md) |
+| Use with Workers (understand limitations) | [api.md](./api.md) |
+| Setup via SDKs or IaC (TypeScript, Python, Terraform) | [configuration.md](./configuration.md) |
+| Optimize costs and debug issues | [patterns.md](./patterns.md) + [gotchas.md](./gotchas.md) |
+| Understand eligibility and troubleshoot | [gotchas.md](./gotchas.md) → [patterns.md](./patterns.md) |
+
+**Files:**
+- [configuration.md](./configuration.md) - Setup, API, SDKs, and Cache Rules
+- [api.md](./api.md) - Purging, monitoring, Workers integration
+- [patterns.md](./patterns.md) - Best practices, cost optimization, debugging
+- [gotchas.md](./gotchas.md) - Common issues, limitations, troubleshooting
+
+## See Also
+- [r2](../r2/) - Cache Reserve built on R2 storage
+- [workers](../workers/) - Workers integration with Cache API
diff --git a/.agents/skills/cloudflare-deploy/references/cache-reserve/api.md b/.agents/skills/cloudflare-deploy/references/cache-reserve/api.md
new file mode 100644
index 0000000..18c49d8
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cache-reserve/api.md
@@ -0,0 +1,194 @@
+# Cache Reserve API
+
+## Workers Integration
+
+```
+┌────────────────────────────────────────────────────────────────┐
+│ CRITICAL: Workers Cache API ≠ Cache Reserve │
+│ │
+│ • Workers caches.default / cache.put() → edge cache ONLY │
+│ • Cache Reserve → zone-level setting, automatic, no per-req │
+│ • You CANNOT selectively write to Cache Reserve from Workers │
+│ • Cache Reserve works with standard fetch(), not cache.put() │
+└────────────────────────────────────────────────────────────────┘
+```
+
+Cache Reserve is a **zone-level configuration**, not a per-request API. It works automatically when enabled for the zone:
+
+### Standard Fetch (Recommended)
+
+```typescript
+// Cache Reserve works automatically via standard fetch
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ // Standard fetch uses Cache Reserve automatically
+ return await fetch(request);
+ }
+};
+```
+
+### Cache API Limitations
+
+**IMPORTANT**: `cache.put()` is **NOT compatible** with Cache Reserve or Tiered Cache.
+
+```typescript
+// ❌ WRONG: cache.put() bypasses Cache Reserve
+const cache = caches.default;
+let response = await cache.match(request);
+if (!response) {
+ response = await fetch(request);
+ await cache.put(request, response.clone()); // Bypasses Cache Reserve!
+}
+
+// ✅ CORRECT: Use standard fetch for Cache Reserve compatibility
+return await fetch(request);
+
+// ✅ CORRECT: Use Cache API only for custom cache namespaces
+const customCache = await caches.open('my-custom-cache');
+let response = await customCache.match(request);
+if (!response) {
+ response = await fetch(request);
+ await customCache.put(request, response.clone()); // Custom cache OK
+}
+```
+
+## Purging and Cache Management
+
+### Purge by URL (Instant)
+
+```typescript
+// Purge specific URL from Cache Reserve immediately
+const purgeCacheReserveByURL = async (
+ zoneId: string,
+ apiToken: string,
+ urls: string[]
+) => {
+ const response = await fetch(
+ `https://api.cloudflare.com/client/v4/zones/${zoneId}/purge_cache`,
+ {
+ method: 'POST',
+ headers: {
+ 'Authorization': `Bearer ${apiToken}`,
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify({ files: urls })
+ }
+ );
+ return await response.json();
+};
+
+// Example usage
+await purgeCacheReserveByURL('zone123', 'token456', [
+ 'https://example.com/image.jpg',
+ 'https://example.com/video.mp4'
+]);
+```
+
+### Purge by Tag/Host/Prefix (Revalidation)
+
+```typescript
+// Purge by cache tag - forces revalidation, not immediate removal
+await fetch(
+ `https://api.cloudflare.com/client/v4/zones/${zoneId}/purge_cache`,
+ {
+ method: 'POST',
+ headers: { 'Authorization': `Bearer ${apiToken}`, 'Content-Type': 'application/json' },
+ body: JSON.stringify({ tags: ['tag1', 'tag2'] })
+ }
+);
+```
+
+**Purge behavior:**
+- **By URL**: Immediate removal from Cache Reserve + edge cache
+- **By tag/host/prefix**: Revalidation only, assets remain in storage (costs continue)
+
+### Clear All Cache Reserve Data
+
+```typescript
+// Requires Cache Reserve OFF first
+await fetch(
+ `https://api.cloudflare.com/client/v4/zones/${zoneId}/cache/cache_reserve_clear`,
+ { method: 'POST', headers: { 'Authorization': `Bearer ${apiToken}` } }
+);
+
+// Check status: GET same endpoint returns { state: "In-progress" | "Completed" }
+```
+
+**Process**: Disable Cache Reserve → Call clear endpoint → Wait up to 24hr → Re-enable
+
+## Monitoring and Analytics
+
+### Dashboard Analytics
+
+Navigate to **Caching > Cache Reserve** to view:
+
+- **Egress Savings**: Total bytes served from Cache Reserve vs origin egress cost saved
+- **Requests Served**: Cache Reserve hits vs misses breakdown
+- **Storage Used**: Current GB stored in Cache Reserve (billed monthly)
+- **Operations**: Class A (writes) and Class B (reads) operation counts
+- **Cost Tracking**: Estimated monthly costs based on current usage
+
+### Logpush Integration
+
+```typescript
+// Logpush field: CacheReserveUsed (boolean) - filter for Cache Reserve hits
+// Query Cache Reserve hits in analytics
+const logpushQuery = `
+ SELECT
+ ClientRequestHost,
+ COUNT(*) as requests,
+ SUM(EdgeResponseBytes) as bytes_served,
+ COUNT(CASE WHEN CacheReserveUsed = true THEN 1 END) as cache_reserve_hits,
+ COUNT(CASE WHEN CacheReserveUsed = false THEN 1 END) as cache_reserve_misses
+ FROM http_requests
+ WHERE Timestamp >= NOW() - INTERVAL '24 hours'
+ GROUP BY ClientRequestHost
+ ORDER BY requests DESC
+`;
+
+// Filter only Cache Reserve hits
+const crHitsQuery = `
+ SELECT ClientRequestHost, COUNT(*) as requests, SUM(EdgeResponseBytes) as bytes
+ FROM http_requests
+ WHERE CacheReserveUsed = true AND Timestamp >= NOW() - INTERVAL '7 days'
+ GROUP BY ClientRequestHost
+ ORDER BY bytes DESC
+`;
+```
+
+### GraphQL Analytics
+
+```graphql
+query CacheReserveAnalytics($zoneTag: string, $since: string, $until: string) {
+ viewer {
+ zones(filter: { zoneTag: $zoneTag }) {
+ httpRequests1dGroups(
+ filter: { datetime_geq: $since, datetime_leq: $until }
+ limit: 1000
+ ) {
+ dimensions { date }
+ sum {
+ cachedBytes
+ cachedRequests
+ bytes
+ requests
+ }
+ }
+ }
+ }
+}
+```
+
+## Pricing
+
+```typescript
+// Storage: $0.015/GB-month | Class A (writes): $4.50/M | Class B (reads): $0.36/M
+// Cache miss: 1A + 1B | Cache hit: 1B | Assets >1GB: proportionally more ops
+```
+
+## See Also
+
+- [README](./README.md) - Overview and core concepts
+- [Configuration](./configuration.md) - Setup and Cache Rules
+- [Patterns](./patterns.md) - Best practices and optimization
+- [Gotchas](./gotchas.md) - Common issues and troubleshooting
diff --git a/.agents/skills/cloudflare-deploy/references/cache-reserve/configuration.md b/.agents/skills/cloudflare-deploy/references/cache-reserve/configuration.md
new file mode 100644
index 0000000..84a6616
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cache-reserve/configuration.md
@@ -0,0 +1,169 @@
+# Cache Reserve Configuration
+
+## Dashboard Setup
+
+**Minimum steps to enable:**
+
+```bash
+# Navigate to dashboard
+https://dash.cloudflare.com/caching/cache-reserve
+
+# Click "Enable Storage Sync" or "Purchase" button
+```
+
+**Prerequisites:**
+- Paid Cache Reserve plan or Smart Shield Advanced required
+- Tiered Cache **required** for Cache Reserve to function optimally
+
+## API Configuration
+
+### REST API
+
+```bash
+# Enable
+curl -X PATCH "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/cache/cache_reserve" \
+ -H "Authorization: Bearer $API_TOKEN" -H "Content-Type: application/json" \
+ -d '{"value": "on"}'
+
+# Check status
+curl -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/cache/cache_reserve" \
+ -H "Authorization: Bearer $API_TOKEN"
+```
+
+### TypeScript SDK
+
+```bash
+npm install cloudflare
+```
+
+```typescript
+import Cloudflare from 'cloudflare';
+
+const client = new Cloudflare({
+ apiToken: process.env.CLOUDFLARE_API_TOKEN,
+});
+
+// Enable Cache Reserve
+await client.cache.cacheReserve.edit({
+ zone_id: 'abc123',
+ value: 'on',
+});
+
+// Get Cache Reserve status
+const status = await client.cache.cacheReserve.get({
+ zone_id: 'abc123',
+});
+console.log(status.value); // 'on' or 'off'
+```
+
+### Python SDK
+
+```bash
+pip install cloudflare
+```
+
+```python
+from cloudflare import Cloudflare
+
+client = Cloudflare(api_token=os.environ.get("CLOUDFLARE_API_TOKEN"))
+
+# Enable Cache Reserve
+client.cache.cache_reserve.edit(
+ zone_id="abc123",
+ value="on"
+)
+
+# Get Cache Reserve status
+status = client.cache.cache_reserve.get(zone_id="abc123")
+print(status.value) # 'on' or 'off'
+```
+
+### Terraform
+
+```hcl
+terraform {
+ required_providers {
+ cloudflare = {
+ source = "cloudflare/cloudflare"
+ version = "~> 4.0"
+ }
+ }
+}
+
+provider "cloudflare" {
+ api_token = var.cloudflare_api_token
+}
+
+resource "cloudflare_zone_cache_reserve" "example" {
+ zone_id = var.zone_id
+ enabled = true
+}
+
+# Tiered Cache is required for Cache Reserve
+resource "cloudflare_tiered_cache" "example" {
+ zone_id = var.zone_id
+ cache_type = "smart"
+}
+```
+
+### Pulumi
+
+```typescript
+import * as cloudflare from "@pulumi/cloudflare";
+
+// Enable Cache Reserve
+const cacheReserve = new cloudflare.ZoneCacheReserve("example", {
+ zoneId: zoneId,
+ enabled: true,
+});
+
+// Enable Tiered Cache (required)
+const tieredCache = new cloudflare.TieredCache("example", {
+ zoneId: zoneId,
+ cacheType: "smart",
+});
+```
+
+### Required API Token Permissions
+
+- `Zone Settings Read`
+- `Zone Settings Write`
+- `Zone Read`
+- `Zone Write`
+
+## Cache Rules Integration
+
+Control Cache Reserve eligibility via Cache Rules:
+
+```typescript
+// Enable for static assets
+{
+ action: 'set_cache_settings',
+ action_parameters: {
+ cache_reserve: { eligible: true, minimum_file_ttl: 86400 },
+ edge_ttl: { mode: 'override_origin', default: 86400 },
+ cache: true
+ },
+ expression: '(http.request.uri.path matches "\\.(jpg|png|webp|pdf|zip)$")'
+}
+
+// Disable for APIs
+{
+ action: 'set_cache_settings',
+ action_parameters: { cache_reserve: { eligible: false } },
+ expression: '(http.request.uri.path matches "^/api/")'
+}
+
+// Create via API: PUT to zones/{zone_id}/rulesets/phases/http_request_cache_settings/entrypoint
+```
+
+## Wrangler Integration
+
+Cache Reserve works automatically with Workers deployed via Wrangler. No special wrangler.jsonc configuration needed - enable Cache Reserve via Dashboard or API for the zone.
+
+## See Also
+
+- [README](./README.md) - Overview and core concepts
+- [API Reference](./api.md) - Purging and monitoring APIs
+- [Patterns](./patterns.md) - Best practices and optimization
+- [Gotchas](./gotchas.md) - Common issues and troubleshooting
diff --git a/.agents/skills/cloudflare-deploy/references/cache-reserve/gotchas.md b/.agents/skills/cloudflare-deploy/references/cache-reserve/gotchas.md
new file mode 100644
index 0000000..9995cf8
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cache-reserve/gotchas.md
@@ -0,0 +1,132 @@
+# Cache Reserve Gotchas
+
+## Common Errors
+
+### "Assets Not Being Cached in Cache Reserve"
+
+**Cause:** Asset is not cacheable, TTL < 10 hours, Content-Length header missing, or blocking headers present (Set-Cookie, Vary: *)
+**Solution:** Ensure minimum TTL of 10+ hours (`Cache-Control: public, max-age=36000`), add Content-Length header, remove Set-Cookie header, and set `Vary: Accept-Encoding` (not *)
+
+### "Range Requests Not Working" (Video Seeking Fails)
+
+**Cause:** Cache Reserve does **NOT** support range requests (HTTP 206 Partial Content)
+**Solution:** Range requests bypass Cache Reserve entirely. For video streaming with seeking:
+- Use edge cache only (shorter TTLs)
+- Consider R2 with direct access for range-heavy workloads
+- Accept that seekable content won't benefit from Cache Reserve persistence
+
+### "Origin Bandwidth Higher Than Expected"
+
+**Cause:** Cache Reserve fetches **uncompressed** content from origin, even though it serves compressed to visitors
+**Solution:**
+- If origin charges by bandwidth, factor in uncompressed transfer costs
+- Cache Reserve compresses for visitors automatically (saves visitor bandwidth)
+- Compare: origin egress savings vs higher uncompressed fetch costs
+
+### "Cloudflare Images Not Caching with Cache Reserve"
+
+**Cause:** Cloudflare Images with `Vary: Accept` header (format negotiation) is incompatible with Cache Reserve
+**Solution:**
+- Cache Reserve silently skips images with Vary for format negotiation
+- Original images (non-transformed) may still be eligible
+- Use Cloudflare Images variants or edge cache for transformed images
+
+### "High Class A Operations Costs"
+
+**Cause:** Frequent cache misses, short TTLs, or frequent revalidation
+**Solution:** Increase TTL for stable content (24+ hours), enable Tiered Cache to reduce direct Cache Reserve misses, or use stale-while-revalidate
+
+### "Purge Not Working as Expected"
+
+**Cause:** Purge by tag only triggers revalidation but doesn't remove from Cache Reserve storage
+**Solution:** Use purge by URL for immediate removal, or disable Cache Reserve then clear all data for complete removal
+
+### "O2O (Orange-to-Orange) Assets Not Caching"
+
+**Cause:** Orange-to-Orange (proxied zone requesting another proxied zone on Cloudflare) bypasses Cache Reserve
+**Solution:**
+- **What is O2O**: Zone A (proxied) → Zone B (proxied), both on Cloudflare
+- **Detection**: Check `cf-cache-status` for `BYPASS` and review request path
+- **Workaround**: Use R2 or direct origin access instead of O2O proxy chains
+
+### "Cache Reserve must be OFF before clearing data"
+
+**Cause:** Attempting to clear Cache Reserve data while it's still enabled
+**Solution:** Disable Cache Reserve first, wait briefly for propagation (5s), then clear data (can take up to 24 hours)
+
+## Limits
+
+| Limit | Value | Notes |
+|-------|-------|-------|
+| Minimum TTL | 10 hours (36000 seconds) | Assets with shorter TTL not eligible |
+| Default retention | 30 days (2592000 seconds) | Configurable |
+| Maximum file size | Same as R2 limits | No practical limit |
+| Purge/clear time | Up to 24 hours | Complete propagation time |
+| Plan requirement | Paid Cache Reserve or Smart Shield | Not available on free plans |
+| Content-Length header | Required | Must be present for eligibility |
+| Set-Cookie header | Blocks caching | Must not be present (or use private directive) |
+| Vary header | Cannot be * | Can use Vary: Accept-Encoding |
+| Image transformations | Variants not eligible | Original images only |
+| Range requests | NOT supported | HTTP 206 bypasses Cache Reserve |
+| Compression | Fetches uncompressed | Serves compressed to visitors |
+| Worker control | Zone-level only | Cannot control per-request |
+| O2O requests | Bypassed | Orange-to-Orange not eligible |
+
+## Additional Resources
+
+- **Official Docs**: https://developers.cloudflare.com/cache/advanced-configuration/cache-reserve/
+- **API Reference**: https://developers.cloudflare.com/api/resources/cache/subresources/cache_reserve/
+- **Cache Rules**: https://developers.cloudflare.com/cache/how-to/cache-rules/
+- **Workers Cache API**: https://developers.cloudflare.com/workers/runtime-apis/cache/
+- **R2 Documentation**: https://developers.cloudflare.com/r2/
+- **Smart Shield**: https://developers.cloudflare.com/smart-shield/
+- **Tiered Cache**: https://developers.cloudflare.com/cache/how-to/tiered-cache/
+
+## Troubleshooting Flowchart
+
+Asset not caching in Cache Reserve?
+
+```
+1. Is Cache Reserve enabled for zone?
+ → No: Enable via Dashboard or API
+ → Yes: Continue to step 2
+
+2. Is Tiered Cache enabled?
+ → No: Enable Tiered Cache (required!)
+ → Yes: Continue to step 3
+
+3. Does asset have TTL ≥ 10 hours?
+ → No: Increase via Cache Rules (edge_ttl override)
+ → Yes: Continue to step 4
+
+4. Is Content-Length header present?
+ → No: Fix origin to include Content-Length
+ → Yes: Continue to step 5
+
+5. Is Set-Cookie header present?
+ → Yes: Remove Set-Cookie or scope appropriately
+ → No: Continue to step 6
+
+6. Is Vary header set to *?
+ → Yes: Change to specific value (e.g., Accept-Encoding)
+ → No: Continue to step 7
+
+7. Is this a range request?
+ → Yes: Range requests bypass Cache Reserve (not supported)
+ → No: Continue to step 8
+
+8. Is this an O2O (Orange-to-Orange) request?
+ → Yes: O2O bypasses Cache Reserve
+ → No: Continue to step 9
+
+9. Check Logpush CacheReserveUsed field
+ → Filter logs to see if assets ever hit Cache Reserve
+ → Verify cf-cache-status header (should be HIT after first request)
+```
+
+## See Also
+
+- [README](./README.md) - Overview and core concepts
+- [Configuration](./configuration.md) - Setup and Cache Rules
+- [API Reference](./api.md) - Purging and monitoring
+- [Patterns](./patterns.md) - Best practices and optimization
diff --git a/.agents/skills/cloudflare-deploy/references/cache-reserve/patterns.md b/.agents/skills/cloudflare-deploy/references/cache-reserve/patterns.md
new file mode 100644
index 0000000..65f9488
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cache-reserve/patterns.md
@@ -0,0 +1,197 @@
+# Cache Reserve Patterns
+
+## Best Practices
+
+### 1. Always Enable Tiered Cache
+
+```typescript
+// Cache Reserve is designed for use WITH Tiered Cache
+const configuration = {
+ tieredCache: 'enabled', // Required for optimal performance
+ cacheReserve: 'enabled', // Works best with Tiered Cache
+
+ hierarchy: [
+ 'Lower-Tier Cache (visitor)',
+ 'Upper-Tier Cache (origin region)',
+ 'Cache Reserve (persistent)',
+ 'Origin'
+ ]
+};
+```
+
+### 2. Set Appropriate Cache-Control Headers
+
+```typescript
+// Origin response headers for Cache Reserve eligibility
+const originHeaders = {
+ 'Cache-Control': 'public, max-age=86400', // 24hr (minimum 10hr)
+ 'Content-Length': '1024000', // Required
+ 'Cache-Tag': 'images,product-123', // Optional: purging
+ 'ETag': '"abc123"', // Optional: revalidation
+ // Avoid: 'Set-Cookie' and 'Vary: *' prevent caching
+};
+```
+
+### 3. Use Cache Rules for Fine-Grained Control
+
+```typescript
+// Different TTLs for different content types
+const cacheRules = [
+ {
+ description: 'Long-term cache for immutable assets',
+ expression: '(http.request.uri.path matches "^/static/.*\\.[a-f0-9]{8}\\.")',
+ action_parameters: {
+ cache_reserve: { eligible: true },
+ edge_ttl: { mode: 'override_origin', default: 2592000 }, // 30 days
+ cache: true
+ }
+ },
+ {
+ description: 'Moderate cache for regular images',
+ expression: '(http.request.uri.path matches "\\.(jpg|png|webp)$")',
+ action_parameters: {
+ cache_reserve: { eligible: true },
+ edge_ttl: { mode: 'override_origin', default: 86400 }, // 24 hours
+ cache: true
+ }
+ },
+ {
+ description: 'Exclude API from Cache Reserve',
+ expression: '(http.request.uri.path matches "^/api/")',
+ action_parameters: { cache_reserve: { eligible: false }, cache: false }
+ }
+];
+```
+
+### 4. Making Assets Cache Reserve Eligible from Workers
+
+**Note**: This modifies response headers to meet eligibility criteria but does NOT directly control Cache Reserve storage (which is zone-level automatic).
+
+```typescript
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const response = await fetch(request);
+ if (!response.ok) return response;
+
+ const headers = new Headers(response.headers);
+ headers.set('Cache-Control', 'public, max-age=36000'); // 10hr minimum
+ headers.delete('Set-Cookie'); // Blocks caching
+
+ // Ensure Content-Length present
+ if (!headers.has('Content-Length')) {
+ const blob = await response.blob();
+ headers.set('Content-Length', blob.size.toString());
+ return new Response(blob, { status: response.status, headers });
+ }
+
+ return new Response(response.body, { status: response.status, headers });
+ }
+};
+```
+
+### 5. Hostname Best Practices
+
+Use Worker's hostname for efficient caching - avoid overriding hostname unnecessarily.
+
+## Architecture Patterns
+
+### Multi-Tier Caching + Immutable Assets
+
+```typescript
+// Optimal: L1 (visitor) → L2 (region) → L3 (Cache Reserve) → Origin
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const url = new URL(request.url);
+ const isImmutable = /\.[a-f0-9]{8,}\.(js|css|jpg|png|woff2)$/.test(url.pathname);
+ const response = await fetch(request);
+
+ if (isImmutable) {
+ const headers = new Headers(response.headers);
+ headers.set('Cache-Control', 'public, max-age=31536000, immutable');
+ return new Response(response.body, { status: response.status, headers });
+ }
+ return response;
+ }
+};
+```
+
+## Cost Optimization
+
+### Cost Calculator
+
+```typescript
+interface CacheReserveEstimate {
+ avgAssetSizeGB: number;
+ uniqueAssets: number;
+ monthlyReads: number;
+ monthlyWrites: number;
+ originEgressCostPerGB: number; // e.g., AWS: $0.09/GB
+}
+
+function estimateMonthlyCost(input: CacheReserveEstimate) {
+ // Cache Reserve pricing
+ const storageCostPerGBMonth = 0.015;
+ const classAPerMillion = 4.50; // writes
+ const classBPerMillion = 0.36; // reads
+
+ // Calculate Cache Reserve costs
+ const totalStorageGB = input.avgAssetSizeGB * input.uniqueAssets;
+ const storageCost = totalStorageGB * storageCostPerGBMonth;
+ const writeCost = (input.monthlyWrites / 1_000_000) * classAPerMillion;
+ const readCost = (input.monthlyReads / 1_000_000) * classBPerMillion;
+
+ const cacheReserveCost = storageCost + writeCost + readCost;
+
+ // Calculate origin egress cost (what you'd pay without Cache Reserve)
+ const totalTrafficGB = (input.monthlyReads * input.avgAssetSizeGB);
+ const originEgressCost = totalTrafficGB * input.originEgressCostPerGB;
+
+ // Savings calculation
+ const savings = originEgressCost - cacheReserveCost;
+ const savingsPercent = ((savings / originEgressCost) * 100).toFixed(1);
+
+ return {
+ cacheReserveCost: `$${cacheReserveCost.toFixed(2)}`,
+ originEgressCost: `$${originEgressCost.toFixed(2)}`,
+ monthlySavings: `$${savings.toFixed(2)}`,
+ savingsPercent: `${savingsPercent}%`,
+ breakdown: {
+ storage: `$${storageCost.toFixed(2)}`,
+ writes: `$${writeCost.toFixed(2)}`,
+ reads: `$${readCost.toFixed(2)}`,
+ }
+ };
+}
+
+// Example: Media library
+const mediaLibrary = estimateMonthlyCost({
+ avgAssetSizeGB: 0.005, // 5MB images
+ uniqueAssets: 10_000,
+ monthlyReads: 5_000_000,
+ monthlyWrites: 50_000,
+ originEgressCostPerGB: 0.09, // AWS S3
+});
+
+console.log(mediaLibrary);
+// {
+// cacheReserveCost: "$9.98",
+// originEgressCost: "$25.00",
+// monthlySavings: "$15.02",
+// savingsPercent: "60.1%",
+// breakdown: { storage: "$0.75", writes: "$0.23", reads: "$9.00" }
+// }
+```
+
+### Optimization Guidelines
+
+- **Set appropriate TTLs**: 10hr minimum, 24hr+ optimal for stable content, 30d max cautiously
+- **Cache high-value stable assets**: Images, media, fonts, archives, documentation
+- **Exclude frequently changing**: APIs, user-specific content, real-time data
+- **Compression note**: Cache Reserve fetches uncompressed from origin, serves compressed to visitors - factor in origin egress costs
+
+## See Also
+
+- [README](./README.md) - Overview and core concepts
+- [Configuration](./configuration.md) - Setup and Cache Rules
+- [API Reference](./api.md) - Purging and monitoring
+- [Gotchas](./gotchas.md) - Common issues and troubleshooting
diff --git a/.agents/skills/cloudflare-deploy/references/containers/README.md b/.agents/skills/cloudflare-deploy/references/containers/README.md
new file mode 100644
index 0000000..a6c488d
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/containers/README.md
@@ -0,0 +1,85 @@
+# Cloudflare Containers Skill Reference
+
+**APPLIES TO: Cloudflare Containers ONLY - NOT general Cloudflare Workers**
+
+Use when working with Cloudflare Containers: deploying containerized apps on Workers platform, configuring container-enabled Durable Objects, managing container lifecycle, or implementing stateful/stateless container patterns.
+
+## Beta Status
+
+⚠️ Containers is currently in **beta**. API may change without notice. No SLA guarantees. Custom instance types added Jan 2026.
+
+## Core Concepts
+
+**Container as Durable Object:** Each container is a Durable Object with persistent identity. Accessed via `getByName(id)` or `getRandom()`.
+
+**Image deployment:** Images pre-fetched globally. Deployments use rolling strategy (not instant like Workers).
+
+**Lifecycle:** cold start (2-3s) → running → `sleepAfter` timeout → stopped. No autoscaling - manual load balancing via `getRandom()`.
+
+**Persistent identity, ephemeral disk:** Container ID persists, but disk resets on stop. Use Durable Object storage for persistence.
+
+## Quick Start
+
+```typescript
+import { Container } from "@cloudflare/containers";
+
+export class MyContainer extends Container {
+ defaultPort = 8080;
+ sleepAfter = "30m";
+}
+
+export default {
+ async fetch(request: Request, env: Env) {
+ const container = env.MY_CONTAINER.getByName("instance-1");
+ await container.startAndWaitForPorts();
+ return container.fetch(request);
+ }
+};
+```
+
+## Reading Order
+
+| Task | Files |
+|------|-------|
+| Setup new container project | README → configuration.md |
+| Implement container logic | README → api.md → patterns.md |
+| Choose routing pattern | patterns.md (routing section) |
+| Debug issues | gotchas.md |
+| Production hardening | gotchas.md → patterns.md (lifecycle) |
+
+## Routing Decision Tree
+
+**How should requests reach containers?**
+
+- **Same user/session → same container:** Use `getByName(sessionId)` for session affinity
+- **Stateless, spread load:** Use `getRandom()` for load balancing
+- **Job per container:** Use `getByName(jobId)` + explicit lifecycle management
+- **Single global instance:** Use `getByName("singleton")`
+
+## When to Use Containers vs Workers
+
+**Use Containers when:**
+- Need stateful, long-lived processes (sessions, WebSockets, games)
+- Running existing containerized apps (Node.js, Python, custom binaries)
+- Need filesystem access or specific system dependencies
+- Per-user/session isolation with dedicated compute
+
+**Use Workers when:**
+- Stateless HTTP handlers
+- Sub-millisecond cold starts required
+- Auto-scaling to zero critical
+- Simple request/response patterns
+
+## In This Reference
+
+- **[configuration.md](configuration.md)** - Wrangler config, instance types, Container class properties, environment variables, account limits
+- **[api.md](api.md)** - Container class API, startup methods, communication (HTTP/TCP/WebSocket), routing helpers, lifecycle hooks, scheduling, state inspection
+- **[patterns.md](patterns.md)** - Routing patterns (session affinity, load balancing, singleton), WebSocket forwarding, graceful shutdown, Workflow/Queue integration
+- **[gotchas.md](gotchas.md)** - Critical gotchas (WebSocket, startup methods), common errors with solutions, specific limits, beta caveats
+
+## See Also
+
+- [Durable Objects](../durable-objects/) - Containers extend Durable Objects
+- [Workflows](../workflows/) - Orchestrate container operations
+- [Queues](../queues/) - Trigger containers from queue messages
+- [Cloudflare Docs](https://developers.cloudflare.com/containers/)
diff --git a/.agents/skills/cloudflare-deploy/references/containers/api.md b/.agents/skills/cloudflare-deploy/references/containers/api.md
new file mode 100644
index 0000000..c41f721
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/containers/api.md
@@ -0,0 +1,187 @@
+## Container Class API
+
+```typescript
+import { Container } from "@cloudflare/containers";
+
+export class MyContainer extends Container {
+ defaultPort = 8080;
+ requiredPorts = [8080];
+ sleepAfter = "30m";
+ enableInternet = true;
+ pingEndpoint = "/health";
+ envVars = {};
+ entrypoint = [];
+
+ onStart() { /* container started */ }
+ onStop() { /* container stopping */ }
+ onError(error: Error) { /* container error */ }
+ onActivityExpired(): boolean { /* timeout, return true to stay alive */ }
+ async alarm() { /* scheduled task */ }
+}
+```
+
+## Routing
+
+**getByName(id)** - Named instance for session affinity, per-user state
+**getRandom()** - Random instance for load balancing stateless services
+
+```typescript
+const container = env.MY_CONTAINER.getByName("user-123");
+const container = env.MY_CONTAINER.getRandom();
+```
+
+## Startup Methods
+
+### start() - Basic start (8s timeout)
+
+```typescript
+await container.start();
+await container.start({ envVars: { KEY: "value" } });
+```
+
+Returns when **process starts**, NOT when ports ready. Use for fire-and-forget.
+
+### startAndWaitForPorts() - Recommended (20s timeout)
+
+```typescript
+await container.startAndWaitForPorts(); // Uses requiredPorts
+await container.startAndWaitForPorts({ ports: [8080, 9090] });
+await container.startAndWaitForPorts({
+ ports: [8080],
+ startOptions: { envVars: { KEY: "value" } }
+});
+```
+
+Returns when **ports listening**. Use before HTTP/TCP requests.
+
+**Port resolution:** explicit ports → requiredPorts → defaultPort → port 33
+
+### waitForPort() - Wait for specific port
+
+```typescript
+await container.waitForPort(8080);
+await container.waitForPort(8080, { timeout: 30000 });
+```
+
+## Communication
+
+### fetch() - HTTP with WebSocket support
+
+```typescript
+// ✅ Supports WebSocket upgrades
+const response = await container.fetch(request);
+const response = await container.fetch("http://container/api", {
+ method: "POST",
+ body: JSON.stringify({ data: "value" })
+});
+```
+
+**Use for:** All HTTP, especially WebSocket.
+
+### containerFetch() - HTTP only (no WebSocket)
+
+```typescript
+// ❌ No WebSocket support
+const response = await container.containerFetch(request);
+```
+
+**⚠️ Critical:** Use `fetch()` for WebSocket, not `containerFetch()`.
+
+### TCP Connections
+
+```typescript
+const port = this.ctx.container.getTcpPort(8080);
+const conn = port.connect();
+await conn.opened;
+
+if (request.body) await request.body.pipeTo(conn.writable);
+return new Response(conn.readable);
+```
+
+### switchPort() - Change default port
+
+```typescript
+this.switchPort(8081); // Subsequent fetch() uses this port
+```
+
+## Lifecycle Hooks
+
+### onStart()
+
+Called when container process starts (ports may not be ready). Runs in `blockConcurrencyWhile` - no concurrent requests.
+
+```typescript
+onStart() {
+ console.log("Container starting");
+}
+```
+
+### onStop()
+
+Called when SIGTERM received. 15 minutes until SIGKILL. Use for graceful shutdown.
+
+```typescript
+onStop() {
+ // Save state, close connections, flush logs
+}
+```
+
+### onError()
+
+Called when container crashes or fails to start.
+
+```typescript
+onError(error: Error) {
+ console.error("Container error:", error);
+}
+```
+
+### onActivityExpired()
+
+Called when `sleepAfter` timeout reached. Return `true` to stay alive, `false` to stop.
+
+```typescript
+onActivityExpired(): boolean {
+ if (this.hasActiveConnections()) return true; // Keep alive
+ return false; // OK to stop
+}
+```
+
+## Scheduling
+
+```typescript
+export class ScheduledContainer extends Container {
+ async fetch(request: Request) {
+ await this.schedule(Date.now() + 60000); // 1 minute
+ await this.schedule("2026-01-28T00:00:00Z"); // ISO string
+ return new Response("Scheduled");
+ }
+
+ async alarm() {
+ // Called when schedule fires (SQLite-backed, survives restarts)
+ }
+}
+```
+
+**⚠️ Don't override `alarm()` directly when using `schedule()` helper.**
+
+## State Inspection
+
+### External state check
+
+```typescript
+const state = await container.getState();
+// state.status: "starting" | "running" | "stopping" | "stopped"
+```
+
+### Internal state check
+
+```typescript
+export class MyContainer extends Container {
+ async fetch(request: Request) {
+ if (this.ctx.container.running) { ... }
+ }
+}
+```
+
+**⚠️ Use `getState()` for external checks, `ctx.container.running` for internal.**
diff --git a/.agents/skills/cloudflare-deploy/references/containers/configuration.md b/.agents/skills/cloudflare-deploy/references/containers/configuration.md
new file mode 100644
index 0000000..fd39cc4
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/containers/configuration.md
@@ -0,0 +1,188 @@
+## Wrangler Configuration
+
+### Basic Container Config
+
+```jsonc
+{
+ "name": "my-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2026-01-10",
+ "containers": [
+ {
+ "class_name": "MyContainer",
+ "image": "./Dockerfile", // Path to Dockerfile or directory with Dockerfile
+ "instance_type": "standard-1", // Predefined or custom (see below)
+ "max_instances": 10
+ }
+ ],
+ "durable_objects": {
+ "bindings": [
+ {
+ "name": "MY_CONTAINER",
+ "class_name": "MyContainer"
+ }
+ ]
+ },
+ "migrations": [
+ {
+ "tag": "v1",
+ "new_sqlite_classes": ["MyContainer"] // Must use new_sqlite_classes
+ }
+ ]
+}
+```
+
+Key config requirements:
+- `image` - Path to Dockerfile or directory containing Dockerfile
+- `class_name` - Must match Container class export name
+- `max_instances` - Max concurrent container instances
+- Must configure Durable Objects binding AND migrations
+
+### Instance Types
+
+#### Predefined Types
+
+| Type | vCPU | Memory | Disk |
+|------|------|--------|------|
+| lite | 1/16 | 256 MiB | 2 GB |
+| basic | 1/4 | 1 GiB | 4 GB |
+| standard-1 | 1/2 | 4 GiB | 8 GB |
+| standard-2 | 1 | 6 GiB | 12 GB |
+| standard-3 | 2 | 8 GiB | 16 GB |
+| standard-4 | 4 | 12 GiB | 20 GB |
+
+```jsonc
+{
+ "containers": [
+ {
+ "class_name": "MyContainer",
+ "image": "./Dockerfile",
+ "instance_type": "standard-2" // Use predefined type
+ }
+ ]
+}
+```
+
+#### Custom Types (Jan 2026 Feature)
+
+```jsonc
+{
+ "containers": [
+ {
+ "class_name": "MyContainer",
+ "image": "./Dockerfile",
+ "instance_type_custom": {
+ "vcpu": 2, // 1-4 vCPU
+ "memory_mib": 8192, // 512-12288 MiB (up to 12 GiB)
+ "disk_mib": 16384 // 2048-20480 MiB (up to 20 GB)
+ }
+ }
+ ]
+}
+```
+
+**Custom type constraints:**
+- Minimum 3 GiB memory per vCPU
+- Maximum 2 GB disk per 1 GiB memory
+- Max 4 vCPU, 12 GiB memory, 20 GB disk per container
+
+### Account Limits
+
+| Resource | Limit | Notes |
+|----------|-------|-------|
+| Total memory (all containers) | 400 GiB | Across all running containers |
+| Total vCPU (all containers) | 100 | Across all running containers |
+| Total disk (all containers) | 2 TB | Across all running containers |
+| Image storage per account | 50 GB | Stored container images |
+
+### Container Class Properties
+
+```typescript
+import { Container } from "@cloudflare/containers";
+
+export class MyContainer extends Container {
+ // Port Configuration
+ defaultPort = 8080; // Default port for fetch() calls
+ requiredPorts = [8080, 9090]; // Ports to wait for in startAndWaitForPorts()
+
+ // Lifecycle
+ sleepAfter = "30m"; // Inactivity timeout (5m, 30m, 2h, etc.)
+
+ // Network
+ enableInternet = true; // Allow outbound internet access
+
+ // Health Check
+ pingEndpoint = "/health"; // Health check endpoint path
+
+ // Environment
+ envVars = { // Environment variables passed to container
+ NODE_ENV: "production",
+ LOG_LEVEL: "info"
+ };
+
+ // Startup
+ entrypoint = ["/bin/start.sh"]; // Override image entrypoint (optional)
+}
+```
+
+**Property details:**
+
+- **`defaultPort`**: Port used when calling `container.fetch()` without explicit port. Falls back to port 33 if not set.
+
+- **`requiredPorts`**: Array of ports that must be listening before `startAndWaitForPorts()` returns. First port becomes default if `defaultPort` not set.
+
+- **`sleepAfter`**: Duration string (e.g., "5m", "30m", "2h"). Container stops after this period of inactivity. Timer resets on each request.
+
+- **`enableInternet`**: Boolean. If `true`, container can make outbound HTTP/TCP requests.
+
+- **`pingEndpoint`**: Path used for health checks. Should respond with 2xx status.
+
+- **`envVars`**: Object of environment variables. Merged with runtime-provided vars (see below).
+
+- **`entrypoint`**: Array of strings. Overrides container image's CMD/ENTRYPOINT.
+
+### Runtime Environment Variables
+
+Cloudflare automatically provides these environment variables to containers:
+
+| Variable | Description |
+|----------|-------------|
+| `CLOUDFLARE_APPLICATION_ID` | Worker application ID |
+| `CLOUDFLARE_COUNTRY_A2` | Two-letter country code of request origin |
+| `CLOUDFLARE_LOCATION` | Cloudflare data center location |
+| `CLOUDFLARE_REGION` | Region identifier |
+| `CLOUDFLARE_DURABLE_OBJECT_ID` | Container's Durable Object ID |
+
+Custom `envVars` from Container class are merged with these. Custom vars override runtime vars if names conflict.
+
+### Image Management
+
+**Distribution model:** Images pre-fetched to all global locations before deployment. Ensures fast cold starts (2-3s typical).
+
+**Rolling deploys:** Unlike Workers (instant), container deployments roll out gradually. Old versions continue running during rollout.
+
+**Ephemeral disk:** Container disk is ephemeral and resets on each stop. Use Durable Object storage (`this.ctx.storage`) for persistence.
+
+## wrangler.toml Format
+
+```toml
+name = "my-worker"
+main = "src/index.ts"
+compatibility_date = "2026-01-10"
+
+[[containers]]
+class_name = "MyContainer"
+image = "./Dockerfile"
+instance_type = "standard-2"
+max_instances = 10
+
+[[durable_objects.bindings]]
+name = "MY_CONTAINER"
+class_name = "MyContainer"
+
+[[migrations]]
+tag = "v1"
+new_sqlite_classes = ["MyContainer"]
+```
+
+Both `wrangler.jsonc` and `wrangler.toml` are supported. Use `wrangler.jsonc` for comments and better IDE support.
diff --git a/.agents/skills/cloudflare-deploy/references/containers/gotchas.md b/.agents/skills/cloudflare-deploy/references/containers/gotchas.md
new file mode 100644
index 0000000..306e8c5
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/containers/gotchas.md
@@ -0,0 +1,178 @@
+## Critical Gotchas
+
+### ⚠️ WebSocket: fetch() vs containerFetch()
+
+**Problem:** WebSocket connections fail silently
+
+**Cause:** `containerFetch()` doesn't support WebSocket upgrades
+
+**Fix:** Always use `fetch()` for WebSocket
+
+```typescript
+// ❌ WRONG
+return container.containerFetch(request);
+
+// ✅ CORRECT
+return container.fetch(request);
+```
+
+### ⚠️ startAndWaitForPorts() vs start()
+
+**Problem:** "connection refused" after `start()`
+
+**Cause:** `start()` returns when process starts, NOT when ports ready
+
+**Fix:** Use `startAndWaitForPorts()` before requests
+
+```typescript
+// ❌ WRONG
+await container.start();
+return container.fetch(request);
+
+// ✅ CORRECT
+await container.startAndWaitForPorts();
+return container.fetch(request);
+```
+
+### ⚠️ Activity Timeout on Long Operations
+
+**Problem:** Container stops during long work
+
+**Cause:** `sleepAfter` based on request activity, not internal work
+
+**Fix:** Renew timeout by touching storage
+
+```typescript
+const interval = setInterval(() => {
+ this.ctx.storage.put("keepalive", Date.now());
+}, 60000);
+
+try {
+ await this.doLongWork(data);
+} finally {
+ clearInterval(interval);
+}
+```
+
+### ⚠️ blockConcurrencyWhile for Startup
+
+**Problem:** Race conditions during initialization
+
+**Fix:** Use `blockConcurrencyWhile` for atomic initialization
+
+```typescript
+await this.ctx.blockConcurrencyWhile(async () => {
+ if (!this.initialized) {
+ await this.startAndWaitForPorts();
+ this.initialized = true;
+ }
+});
+```
+
+### ⚠️ Lifecycle Hooks Block Requests
+
+**Problem:** Container unresponsive during `onStart()`
+
+**Cause:** Hooks run in `blockConcurrencyWhile` - no concurrent requests
+
+**Fix:** Keep hooks fast, avoid long operations
+
+### ⚠️ Don't Override alarm() When Using schedule()
+
+**Problem:** Scheduled tasks don't execute
+
+**Cause:** `schedule()` uses `alarm()` internally
+
+**Fix:** Implement `alarm()` to handle scheduled tasks
+
+## Common Errors
+
+### "Container start timeout"
+
+**Cause:** Container took >8s (`start()`) or >20s (`startAndWaitForPorts()`)
+
+**Solutions:**
+- Optimize image (smaller base, fewer layers)
+- Check `entrypoint` correct
+- Verify app listens on correct ports
+- Increase timeout if needed
+
+### "Port not available"
+
+**Cause:** Calling `fetch()` before port ready
+
+**Solution:** Use `startAndWaitForPorts()`
+
+### "Container memory exceeded"
+
+**Cause:** Using more memory than instance type allows
+
+**Solutions:**
+- Use larger instance type (standard-2, standard-3, standard-4)
+- Optimize app memory usage
+- Use custom instance type
+
+```jsonc
+"instance_type_custom": {
+ "vcpu": 2,
+ "memory_mib": 8192
+}
+```
+
+### "Max instances reached"
+
+**Cause:** All `max_instances` slots in use
+
+**Solutions:**
+- Increase `max_instances`
+- Implement proper `sleepAfter`
+- Use `getRandom()` for distribution
+- Check for instance leaks
+
+### "No container instance available"
+
+**Cause:** Account capacity limits reached
+
+**Solutions:**
+- Check account limits
+- Review instance types across containers
+- Contact Cloudflare support
+
+## Limits
+
+| Resource | Limit | Notes |
+|----------|-------|-------|
+| Cold start | 2-3s | Image pre-fetched globally |
+| Graceful shutdown | 15 min | SIGTERM → SIGKILL |
+| `start()` timeout | 8s | Process start |
+| `startAndWaitForPorts()` timeout | 20s | Port ready |
+| Max vCPU per container | 4 | standard-4 or custom |
+| Max memory per container | 12 GiB | standard-4 or custom |
+| Max disk per container | 20 GB | Ephemeral, resets |
+| Account total memory | 400 GiB | All containers |
+| Account total vCPU | 100 | All containers |
+| Account total disk | 2 TB | All containers |
+| Image storage | 50 GB | Per account |
+| Disk persistence | None | Use DO storage |
+
+## Best Practices
+
+1. **Use `startAndWaitForPorts()` by default** - Prevents port errors
+2. **Set appropriate `sleepAfter`** - Balance resources vs cold starts
+3. **Use `fetch()` for WebSocket** - Not `containerFetch()`
+4. **Design for restarts** - Ephemeral disk, implement graceful shutdown
+5. **Monitor resources** - Stay within account limits
+6. **Keep hooks fast** - Run in `blockConcurrencyWhile`
+7. **Renew activity for long ops** - Touch storage to prevent timeout
+
+## Beta Caveats
+
+⚠️ Containers in **beta**:
+
+- **API may change** without notice
+- **No SLA** guarantees
+- **Limited regions** initially
+- **No autoscaling** - manual via `getRandom()`
+- **Rolling deploys** only (not instant like Workers)
+
+Plan for API changes, test thoroughly before production.
diff --git a/.agents/skills/cloudflare-deploy/references/containers/patterns.md b/.agents/skills/cloudflare-deploy/references/containers/patterns.md
new file mode 100644
index 0000000..9204294
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/containers/patterns.md
@@ -0,0 +1,202 @@
+## Routing Patterns
+
+### Session Affinity (Stateful)
+
+```typescript
+export class SessionBackend extends Container {
+ defaultPort = 3000;
+ sleepAfter = "30m";
+}
+
+export default {
+ async fetch(request: Request, env: Env) {
+ const sessionId = request.headers.get("X-Session-ID") || crypto.randomUUID();
+ const container = env.SESSION_BACKEND.getByName(sessionId);
+ await container.startAndWaitForPorts();
+ return container.fetch(request);
+ }
+};
+```
+
+**Use:** User sessions, WebSocket, stateful games, per-user caching.
+
+### Load Balancing (Stateless)
+
+```typescript
+export default {
+ async fetch(request: Request, env: Env) {
+ const container = env.STATELESS_API.getRandom();
+ await container.startAndWaitForPorts();
+ return container.fetch(request);
+ }
+};
+```
+
+**Use:** Stateless HTTP APIs, CPU-intensive work, read-only queries.
+
+### Singleton Pattern
+
+```typescript
+export default {
+ async fetch(request: Request, env: Env) {
+ const container = env.GLOBAL_SERVICE.getByName("singleton");
+ await container.startAndWaitForPorts();
+ return container.fetch(request);
+ }
+};
+```
+
+**Use:** Global cache, centralized coordinator, single source of truth.
+
+## WebSocket Forwarding
+
+```typescript
+export default {
+ async fetch(request: Request, env: Env) {
+ if (request.headers.get("Upgrade") === "websocket") {
+ const sessionId = request.headers.get("X-Session-ID") || crypto.randomUUID();
+ const container = env.WS_BACKEND.getByName(sessionId);
+ await container.startAndWaitForPorts();
+
+ // ⚠️ MUST use fetch(), not containerFetch()
+ return container.fetch(request);
+ }
+ return new Response("Not a WebSocket request", { status: 400 });
+ }
+};
+```
+
+**⚠️ Critical:** Always use `fetch()` for WebSocket.
+
+## Graceful Shutdown
+
+```typescript
+export class GracefulContainer extends Container {
+ private connections = new Set();
+
+ onStop() {
+ // SIGTERM received, 15 minutes until SIGKILL
+ for (const ws of this.connections) {
+ ws.close(1001, "Server shutting down");
+ }
+ this.ctx.storage.put("shutdown-time", Date.now());
+ }
+
+ onActivityExpired(): boolean {
+ return this.connections.size > 0; // Keep alive if connections
+ }
+}
+```
+
+## Concurrent Request Handling
+
+```typescript
+export class SafeContainer extends Container {
+ private initialized = false;
+
+ async fetch(request: Request) {
+ await this.ctx.blockConcurrencyWhile(async () => {
+ if (!this.initialized) {
+ await this.startAndWaitForPorts();
+ this.initialized = true;
+ }
+ });
+ return super.fetch(request);
+ }
+}
+```
+
+**Use:** One-time initialization, preventing concurrent startup.
+
+## Activity Timeout Renewal
+
+```typescript
+export class LongRunningContainer extends Container {
+ sleepAfter = "5m";
+
+ async processLongJob(data: unknown) {
+ const interval = setInterval(() => {
+ this.ctx.storage.put("keepalive", Date.now());
+ }, 60000);
+
+ try {
+ await this.doLongWork(data);
+ } finally {
+ clearInterval(interval);
+ }
+ }
+}
+```
+
+**Use:** Long operations exceeding `sleepAfter`.
+
+## Multiple Port Routing
+
+```typescript
+export class MultiPortContainer extends Container {
+ requiredPorts = [8080, 8081, 9090];
+
+ async fetch(request: Request) {
+ const path = new URL(request.url).pathname;
+ if (path.startsWith("/grpc")) this.switchPort(8081);
+ else if (path.startsWith("/metrics")) this.switchPort(9090);
+ return super.fetch(request);
+ }
+}
+```
+
+**Use:** Multi-protocol services (HTTP + gRPC), separate metrics endpoints.
+
+## Workflow Integration
+
+```typescript
+import { WorkflowEntrypoint } from "cloudflare:workers";
+
+export class ProcessingWorkflow extends WorkflowEntrypoint {
+ async run(event, step) {
+ const container = this.env.PROCESSOR.getByName(event.payload.jobId);
+
+ await step.do("start", async () => {
+ await container.startAndWaitForPorts();
+ });
+
+ const result = await step.do("process", async () => {
+ return container.fetch("/process", {
+ method: "POST",
+ body: JSON.stringify(event.payload.data)
+ }).then(r => r.json());
+ });
+
+ return result;
+ }
+}
+```
+
+**Use:** Orchestrating multi-step container operations, durable execution.
+
+## Queue Consumer Integration
+
+```typescript
+export default {
+ async queue(batch, env) {
+ for (const msg of batch.messages) {
+ try {
+ const container = env.PROCESSOR.getByName(msg.body.jobId);
+ await container.startAndWaitForPorts();
+
+ const response = await container.fetch("/process", {
+ method: "POST",
+ body: JSON.stringify(msg.body)
+ });
+
+ response.ok ? msg.ack() : msg.retry();
+ } catch (err) {
+ console.error("Queue processing error:", err);
+ msg.retry();
+ }
+ }
+ }
+};
+```
+
+**Use:** Asynchronous job processing, batch operations, event-driven execution.
diff --git a/.agents/skills/cloudflare-deploy/references/cron-triggers/README.md b/.agents/skills/cloudflare-deploy/references/cron-triggers/README.md
new file mode 100644
index 0000000..67c00f8
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cron-triggers/README.md
@@ -0,0 +1,99 @@
+# Cloudflare Cron Triggers
+
+Schedule Workers execution using cron expressions. Runs on Cloudflare's global network during underutilized periods.
+
+## Key Features
+
+- **UTC-only execution** - All schedules run on UTC time
+- **5-field cron syntax** - Quartz scheduler extensions (L, W, #)
+- **Global propagation** - 15min deployment delay
+- **At-least-once delivery** - Rare duplicate executions possible
+- **Workflow integration** - Trigger long-running multi-step tasks
+- **Green Compute** - Optional carbon-aware scheduling during low-carbon periods
+
+## Cron Syntax
+
+```
+ ┌─────────── minute (0-59)
+ │ ┌───────── hour (0-23)
+ │ │ ┌─────── day of month (1-31)
+ │ │ │ ┌───── month (1-12, JAN-DEC)
+ │ │ │ │ ┌─── day of week (1-7, SUN-SAT, 1=Sunday)
+ * * * * *
+```
+
+**Special chars:** `*` (any), `,` (list), `-` (range), `/` (step), `L` (last), `W` (weekday), `#` (nth)
+
+## Common Schedules
+
+```bash
+*/5 * * * * # Every 5 minutes
+0 * * * * # Hourly
+0 2 * * * # Daily 2am UTC (off-peak)
+0 9 * * MON-FRI # Weekdays 9am UTC
+0 0 1 * * # Monthly 1st midnight UTC
+0 9 L * * # Last day of month 9am UTC
+0 10 * * MON#2 # 2nd Monday 10am UTC
+*/10 9-17 * * MON-FRI # Every 10min, 9am-5pm weekdays
+```
+
+## Quick Start
+
+**wrangler.jsonc:**
+```jsonc
+{
+ "name": "my-cron-worker",
+ "triggers": {
+ "crons": ["*/5 * * * *", "0 2 * * *"]
+ }
+}
+```
+
+**Handler:**
+```typescript
+export default {
+ async scheduled(
+ controller: ScheduledController,
+ env: Env,
+ ctx: ExecutionContext,
+ ): Promise {
+ console.log("Cron:", controller.cron);
+ console.log("Time:", new Date(controller.scheduledTime));
+
+ ctx.waitUntil(asyncTask(env)); // Non-blocking
+ },
+};
+```
+
+**Test locally:**
+```bash
+npx wrangler dev
+curl "http://localhost:8787/__scheduled?cron=*/5+*+*+*+*"
+```
+
+## Limits
+
+- **Free:** 3 triggers/worker, 10ms CPU
+- **Paid:** Unlimited triggers, 50ms CPU
+- **Propagation:** 15min global deployment
+- **Timezone:** UTC only
+
+## Reading Order
+
+**New to cron triggers?** Start here:
+1. This README - Overview and quick start
+2. [configuration.md](./configuration.md) - Set up your first cron trigger
+3. [api.md](./api.md) - Understand the handler API
+4. [patterns.md](./patterns.md) - Common use cases and examples
+
+**Troubleshooting?** Jump to [gotchas.md](./gotchas.md)
+
+## In This Reference
+- [configuration.md](./configuration.md) - wrangler config, env-specific schedules, Green Compute
+- [api.md](./api.md) - ScheduledController, noRetry(), waitUntil, testing patterns
+- [patterns.md](./patterns.md) - Use cases, monitoring, queue integration, Durable Objects
+- [gotchas.md](./gotchas.md) - Timezone issues, idempotency, security, testing
+
+## See Also
+- [workflows](../workflows/) - Alternative for long-running scheduled tasks
+- [workers](../workers/) - Worker runtime documentation
diff --git a/.agents/skills/cloudflare-deploy/references/cron-triggers/api.md b/.agents/skills/cloudflare-deploy/references/cron-triggers/api.md
new file mode 100644
index 0000000..b0242d7
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cron-triggers/api.md
@@ -0,0 +1,196 @@
+# Cron Triggers API
+
+## Basic Handler
+
+```typescript
+export default {
+ async scheduled(controller: ScheduledController, env: Env, ctx: ExecutionContext): Promise {
+ console.log("Cron executed:", new Date(controller.scheduledTime));
+ },
+};
+```
+
+**JavaScript:** Same signature without types
+**Python:** `class Default(WorkerEntrypoint): async def scheduled(self, controller, env, ctx)`
+
+## ScheduledController
+
+```typescript
+interface ScheduledController {
+ scheduledTime: number; // Unix ms when scheduled to run
+ cron: string; // Expression that triggered (e.g., "*/5 * * * *")
+ type: string; // Always "scheduled"
+ noRetry(): void; // Prevent automatic retry on failure
+}
+```
+
+**Prevent retry on failure:**
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ try {
+ await riskyOperation(env);
+ } catch (error) {
+ // Don't retry - failure is expected/acceptable
+ controller.noRetry();
+ console.error("Operation failed, not retrying:", error);
+ }
+ },
+};
+```
+
+**When to use noRetry():**
+- External API failures outside your control (avoid hammering failed services)
+- Rate limit errors (retry would fail again immediately)
+- Duplicate execution detected (idempotency check failed)
+- Non-critical operations where skip is acceptable (analytics, caching)
+- Validation errors that won't resolve on retry
+
+## Handler Parameters
+
+**`controller: ScheduledController`**
+- Access cron expression and scheduled time
+
+**`env: Env`**
+- All bindings: KV, R2, D1, secrets, service bindings
+
+**`ctx: ExecutionContext`**
+- `ctx.waitUntil(promise)` - Extend execution for async tasks (logging, cleanup, external APIs)
+- First `waitUntil` failure recorded in Cron Events
+
+## Multiple Schedules
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ switch (controller.cron) {
+ case "*/3 * * * *": ctx.waitUntil(updateRecentData(env)); break;
+ case "0 * * * *": ctx.waitUntil(processHourlyAggregation(env)); break;
+ case "0 2 * * *": ctx.waitUntil(performDailyMaintenance(env)); break;
+ default: console.warn(`Unhandled: ${controller.cron}`);
+ }
+ },
+};
+```
+
+## ctx.waitUntil Usage
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const data = await fetchCriticalData(); // Critical path
+
+ // Non-blocking background tasks
+ ctx.waitUntil(Promise.all([
+ logToAnalytics(data),
+ cleanupOldRecords(env.DB),
+ notifyWebhook(env.WEBHOOK_URL, data),
+ ]));
+ },
+};
+```
+
+## Workflow Integration
+
+```typescript
+import { WorkflowEntrypoint } from "cloudflare:workers";
+
+export class DataProcessingWorkflow extends WorkflowEntrypoint {
+ async run(event, step) {
+ const data = await step.do("fetch-data", () => fetchLargeDataset());
+ const processed = await step.do("process-data", () => processDataset(data));
+ await step.do("store-results", () => storeResults(processed));
+ }
+}
+
+export default {
+ async scheduled(controller, env, ctx) {
+ const instance = await env.MY_WORKFLOW.create({
+ params: { scheduledTime: controller.scheduledTime, cron: controller.cron },
+ });
+ console.log(`Started workflow: ${instance.id}`);
+ },
+};
+```
+
+## Testing Handler
+
+**Local development (/__scheduled endpoint):**
+```bash
+# Start dev server
+npx wrangler dev
+
+# Trigger any cron
+curl "http://localhost:8787/__scheduled?cron=*/5+*+*+*+*"
+
+# Trigger specific cron with custom time
+curl "http://localhost:8787/__scheduled?cron=0+2+*+*+*&scheduledTime=1704067200000"
+```
+
+**Query parameters:**
+- `cron` - Required. URL-encoded cron expression (use `+` for spaces)
+- `scheduledTime` - Optional. Unix timestamp in milliseconds (defaults to current time)
+
+**Production security:** The `/__scheduled` endpoint is available in production and can be triggered by anyone. Block it or implement authentication - see [gotchas.md](./gotchas.md#security-concerns)
+
+**Unit testing (Vitest):**
+```typescript
+// test/scheduled.test.ts
+import { describe, it, expect } from "vitest";
+import { env } from "cloudflare:test";
+import worker from "../src/index";
+
+describe("Scheduled Handler", () => {
+ it("processes scheduled event", async () => {
+ const controller = { scheduledTime: Date.now(), cron: "*/5 * * * *", type: "scheduled" as const, noRetry: () => {} };
+ const ctx = { waitUntil: (p: Promise) => p, passThroughOnException: () => {} };
+ await worker.scheduled(controller, env, ctx);
+ expect(await env.MY_KV.get("last_run")).toBeDefined();
+ });
+
+ it("handles multiple crons", async () => {
+ const ctx = { waitUntil: () => {}, passThroughOnException: () => {} };
+ await worker.scheduled({ scheduledTime: Date.now(), cron: "*/5 * * * *", type: "scheduled", noRetry: () => {} }, env, ctx);
+ expect(await env.MY_KV.get("last_type")).toBe("frequent");
+ });
+});
+```
+
+## Error Handling
+
+**Automatic retries:**
+- Failed cron executions are retried automatically unless `noRetry()` is called
+- Retry happens after a delay (typically minutes)
+- Only first `waitUntil()` failure is recorded in Cron Events
+
+**Best practices:**
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ try {
+ await criticalOperation(env);
+ } catch (error) {
+ // Log error details
+ console.error("Cron failed:", {
+ cron: controller.cron,
+ scheduledTime: controller.scheduledTime,
+ error: error.message,
+ stack: error.stack,
+ });
+
+ // Decide: retry or skip
+ if (error.message.includes("rate limit")) {
+ controller.noRetry(); // Skip retry for rate limits
+ }
+ // Otherwise allow automatic retry
+ throw error;
+ }
+ },
+};
+```
+
+## See Also
+
+- [README.md](./README.md) - Overview
+- [patterns.md](./patterns.md) - Use cases, examples
+- [gotchas.md](./gotchas.md) - Common errors, testing issues
diff --git a/.agents/skills/cloudflare-deploy/references/cron-triggers/configuration.md b/.agents/skills/cloudflare-deploy/references/cron-triggers/configuration.md
new file mode 100644
index 0000000..b584369
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cron-triggers/configuration.md
@@ -0,0 +1,180 @@
+# Cron Triggers Configuration
+
+## wrangler.jsonc
+
+```jsonc
+{
+ "$schema": "./node_modules/wrangler/config-schema.json",
+ "name": "my-cron-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-01-01", // Use current date for new projects
+
+ "triggers": {
+ "crons": [
+ "*/5 * * * *", // Every 5 minutes
+ "0 */2 * * *", // Every 2 hours
+ "0 9 * * MON-FRI", // Weekdays at 9am UTC
+ "0 2 1 * *" // Monthly on 1st at 2am UTC
+ ]
+ }
+}
+```
+
+## Green Compute (Beta)
+
+Schedule crons during low-carbon periods for carbon-aware execution:
+
+```jsonc
+{
+ "name": "eco-cron-worker",
+ "triggers": {
+ "crons": ["0 2 * * *"]
+ },
+ "placement": {
+ "mode": "smart" // Runs during low-carbon periods
+ }
+}
+```
+
+**Modes:**
+- `"smart"` - Carbon-aware scheduling (may delay up to 24h for optimal window)
+- Default (no placement config) - Standard scheduling (no delay)
+
+**How it works:**
+- Cloudflare delays execution until grid carbon intensity is lower
+- Maximum delay: 24 hours from scheduled time
+- Ideal for batch jobs with flexible timing requirements
+
+**Use cases:**
+- Nightly data processing and ETL pipelines
+- Weekly/monthly report generation
+- Database backups and maintenance
+- Analytics aggregation
+- ML model training
+
+**Not suitable for:**
+- Time-sensitive operations (SLA requirements)
+- User-facing features requiring immediate execution
+- Real-time monitoring and alerting
+- Compliance tasks with strict time windows
+
+## Environment-Specific Schedules
+
+```jsonc
+{
+ "name": "my-cron-worker",
+ "triggers": {
+ "crons": ["0 */6 * * *"] // Prod: every 6 hours
+ },
+ "env": {
+ "staging": {
+ "triggers": {
+ "crons": ["*/15 * * * *"] // Staging: every 15min
+ }
+ },
+ "dev": {
+ "triggers": {
+ "crons": ["*/5 * * * *"] // Dev: every 5min
+ }
+ }
+ }
+}
+```
+
+## Schedule Format
+
+**Structure:** `minute hour day-of-month month day-of-week`
+
+**Special chars:** `*` (any), `,` (list), `-` (range), `/` (step), `L` (last), `W` (weekday), `#` (nth)
+
+## Managing Triggers
+
+**Remove all:** `"triggers": { "crons": [] }`
+**Preserve existing:** Omit `"triggers"` field entirely
+
+## Deployment
+
+```bash
+# Deploy with config crons
+npx wrangler deploy
+
+# Deploy specific environment
+npx wrangler deploy --env production
+
+# View deployments
+npx wrangler deployments list
+```
+
+**⚠️ Changes take up to 15 minutes to propagate globally**
+
+## API Management
+
+**Get triggers:**
+```bash
+curl "https://api.cloudflare.com/client/v4/accounts/{account_id}/workers/scripts/{script_name}/schedules" \
+ -H "Authorization: Bearer {api_token}"
+```
+
+**Update triggers:**
+```bash
+curl -X PUT "https://api.cloudflare.com/client/v4/accounts/{account_id}/workers/scripts/{script_name}/schedules" \
+ -H "Authorization: Bearer {api_token}" \
+ -H "Content-Type: application/json" \
+ -d '{"crons": ["*/5 * * * *", "0 2 * * *"]}'
+```
+
+**Delete all:**
+```bash
+curl -X PUT "https://api.cloudflare.com/client/v4/accounts/{account_id}/workers/scripts/{script_name}/schedules" \
+ -H "Authorization: Bearer {api_token}" \
+ -H "Content-Type: application/json" \
+ -d '{"crons": []}'
+```
+
+## Combining Multiple Workers
+
+For complex schedules, use multiple workers:
+
+```jsonc
+// worker-frequent.jsonc
+{
+ "name": "data-sync-frequent",
+ "triggers": { "crons": ["*/5 * * * *"] }
+}
+
+// worker-daily.jsonc
+{
+ "name": "reports-daily",
+ "triggers": { "crons": ["0 2 * * *"] },
+ "placement": { "mode": "smart" }
+}
+
+// worker-weekly.jsonc
+{
+ "name": "cleanup-weekly",
+ "triggers": { "crons": ["0 3 * * SUN"] }
+}
+```
+
+**Benefits:**
+- Separate CPU limits per worker
+- Independent error isolation
+- Different Green Compute policies
+- Easier to maintain and debug
+
+## Validation
+
+**Test cron syntax:**
+- [crontab.guru](https://crontab.guru/) - Interactive validator
+- Wrangler validates on deploy but won't catch logic errors
+
+**Common mistakes:**
+- `0 0 * * *` runs daily at midnight UTC, not your local timezone
+- `*/60 * * * *` is invalid (use `0 * * * *` for hourly)
+- `0 2 31 * *` only runs on months with 31 days
+
+## See Also
+
+- [README.md](./README.md) - Overview, quick start
+- [api.md](./api.md) - Handler implementation
+- [patterns.md](./patterns.md) - Multi-cron routing examples
diff --git a/.agents/skills/cloudflare-deploy/references/cron-triggers/gotchas.md b/.agents/skills/cloudflare-deploy/references/cron-triggers/gotchas.md
new file mode 100644
index 0000000..5906c3a
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cron-triggers/gotchas.md
@@ -0,0 +1,199 @@
+# Cron Triggers Gotchas
+
+## Common Errors
+
+### "Timezone Issues"
+
+**Problem:** Cron runs at wrong time relative to local timezone
+**Cause:** All crons execute in UTC, no local timezone support
+**Solution:** Convert local time to UTC manually
+
+**Conversion formula:** `utcHour = (localHour - utcOffset + 24) % 24`
+
+**Examples:**
+- 9am PST (UTC-8) → `(9 - (-8) + 24) % 24 = 17` → `0 17 * * *`
+- 2am EST (UTC-5) → `(2 - (-5) + 24) % 24 = 7` → `0 7 * * *`
+- 6pm JST (UTC+9) → `(18 - 9 + 24) % 24 = 33 % 24 = 9` → `0 9 * * *`
+
+**Daylight Saving Time:** Adjust manually when DST changes, or schedule at times unaffected by DST (e.g., 2am-4am local time usually safe)
+
+### "Cron Not Executing"
+
+**Cause:** Missing `scheduled()` export, invalid syntax, propagation delay (<15min), or plan limits
+**Solution:** Verify export exists, validate at crontab.guru, wait 15+ min after deploy, check plan limits
+
+### "Duplicate Executions"
+
+**Cause:** At-least-once delivery
+**Solution:** Track execution IDs in KV - see idempotency pattern below
+
+### "Execution Failures"
+
+**Cause:** CPU exceeded, unhandled exceptions, network timeouts, binding errors
+**Solution:** Use try-catch, AbortController timeouts, `ctx.waitUntil()` for long ops, or Workflows for heavy tasks
+
+### "Local Testing Not Working"
+
+**Problem:** `/__scheduled` endpoint returns 404 or doesn't trigger handler
+**Cause:** Missing `scheduled()` export, wrangler not running, or incorrect endpoint format
+**Solution:**
+
+1. Verify `scheduled()` is exported:
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ console.log("Cron triggered");
+ },
+};
+```
+
+2. Start dev server:
+```bash
+npx wrangler dev
+```
+
+3. Use correct endpoint format (URL-encode spaces as `+`):
+```bash
+# Correct
+curl "http://localhost:8787/__scheduled?cron=*/5+*+*+*+*"
+
+# Wrong (will fail)
+curl "http://localhost:8787/__scheduled?cron=*/5 * * * *"
+```
+
+4. Update Wrangler if outdated:
+```bash
+npm install -g wrangler@latest
+```
+
+### "waitUntil() Tasks Not Completing"
+
+**Problem:** Background tasks in `ctx.waitUntil()` fail silently or don't execute
+**Cause:** Promises rejected without error handling, or handler returns before promise settles
+**Solution:** Always await or handle errors in waitUntil promises:
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ // BAD: Silent failures
+ ctx.waitUntil(riskyOperation());
+
+ // GOOD: Explicit error handling
+ ctx.waitUntil(
+ riskyOperation().catch(err => {
+ console.error("Background task failed:", err);
+ return logError(err, env);
+ })
+ );
+ },
+};
+```
+
+### "Idempotency Issues"
+
+**Problem:** At-least-once delivery causes duplicate side effects (double charges, duplicate emails)
+**Cause:** No deduplication mechanism
+**Solution:** Use KV to track execution IDs:
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const executionId = `${controller.cron}-${controller.scheduledTime}`;
+ const existing = await env.EXECUTIONS.get(executionId);
+
+ if (existing) {
+ console.log("Already executed, skipping");
+ controller.noRetry();
+ return;
+ }
+
+ await env.EXECUTIONS.put(executionId, "1", { expirationTtl: 86400 }); // 24h TTL
+ await performIdempotentOperation(env);
+ },
+};
+```
+
+### "Security Concerns"
+
+**Problem:** `__scheduled` endpoint exposed in production allows unauthorized cron triggering
+**Cause:** Testing endpoint available in deployed Workers
+**Solution:** Block `__scheduled` in production:
+
+```typescript
+export default {
+ async fetch(request, env, ctx) {
+ const url = new URL(request.url);
+
+ // Block __scheduled in production
+ if (url.pathname === "/__scheduled" && env.ENVIRONMENT === "production") {
+ return new Response("Not Found", { status: 404 });
+ }
+
+ return handleRequest(request, env, ctx);
+ },
+
+ async scheduled(controller, env, ctx) {
+ // Your cron logic
+ },
+};
+```
+
+**Also:** Use `env.API_KEY` for secrets (never hardcode)
+
+**Alternative:** Add middleware to verify request origin:
+```typescript
+export default {
+ async fetch(request, env, ctx) {
+ const url = new URL(request.url);
+
+ if (url.pathname === "/__scheduled") {
+ // Check Cloudflare headers to verify internal request
+ const cfRay = request.headers.get("cf-ray");
+ if (!cfRay && env.ENVIRONMENT === "production") {
+ return new Response("Not Found", { status: 404 });
+ }
+ }
+
+ return handleRequest(request, env, ctx);
+ },
+
+ async scheduled(controller, env, ctx) {
+ // Your cron logic
+ },
+};
+```
+
+## Limits & Quotas
+
+| Limit | Free | Paid | Notes |
+|-------|------|------|-------|
+| Triggers per Worker | 3 | Unlimited | Maximum cron schedules per Worker |
+| CPU time | 10ms | 50ms | May need `ctx.waitUntil()` or Workflows |
+| Execution guarantee | At-least-once | At-least-once | Duplicates possible - use idempotency |
+| Propagation delay | Up to 15 minutes | Up to 15 minutes | Time for changes to take effect globally |
+| Min interval | 1 minute | 1 minute | Cannot schedule more frequently |
+| Cron accuracy | ±1 minute | ±1 minute | Execution may drift slightly |
+
+## Testing Best Practices
+
+**Unit tests:**
+- Mock `ScheduledController`, `ExecutionContext`, and bindings
+- Test each cron expression separately
+- Verify `noRetry()` is called when expected
+- Use Vitest with `@cloudflare/vitest-pool-workers` for realistic env
+
+**Integration tests:**
+- Test via `/__scheduled` endpoint in dev environment
+- Verify idempotency logic with duplicate `scheduledTime` values
+- Test error handling and retry behavior
+
+**Production:** Start with long intervals (`*/30 * * * *`), monitor Cron Events for 24h, set up alerts before reducing interval
+
+## Resources
+
+- [Cron Triggers Docs](https://developers.cloudflare.com/workers/configuration/cron-triggers/)
+- [Scheduled Handler API](https://developers.cloudflare.com/workers/runtime-apis/handlers/scheduled/)
+- [Cloudflare Workflows](https://developers.cloudflare.com/workflows/)
+- [Workers Limits](https://developers.cloudflare.com/workers/platform/limits/)
+- [Crontab Guru](https://crontab.guru/) - Validator
+- [Vitest Pool Workers](https://github.com/cloudflare/workers-sdk/tree/main/fixtures/vitest-pool-workers-examples)
diff --git a/.agents/skills/cloudflare-deploy/references/cron-triggers/patterns.md b/.agents/skills/cloudflare-deploy/references/cron-triggers/patterns.md
new file mode 100644
index 0000000..a1f1823
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/cron-triggers/patterns.md
@@ -0,0 +1,190 @@
+# Cron Triggers Patterns
+
+## API Data Sync
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const response = await fetch("https://api.example.com/data", {headers: { "Authorization": `Bearer ${env.API_KEY}` }});
+ if (!response.ok) throw new Error(`API error: ${response.status}`);
+ ctx.waitUntil(env.MY_KV.put("cached_data", JSON.stringify(await response.json()), {expirationTtl: 3600}));
+ },
+};
+```
+
+## Database Cleanup
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const result = await env.DB.prepare(`DELETE FROM sessions WHERE expires_at < datetime('now')`).run();
+ console.log(`Deleted ${result.meta.changes} expired sessions`);
+ ctx.waitUntil(env.DB.prepare("VACUUM").run());
+ },
+};
+```
+
+## Report Generation
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const startOfWeek = new Date(); startOfWeek.setDate(startOfWeek.getDate() - 7);
+ const { results } = await env.DB.prepare(`SELECT date, revenue, orders FROM daily_stats WHERE date >= ? ORDER BY date`).bind(startOfWeek.toISOString()).all();
+ const report = {period: "weekly", totalRevenue: results.reduce((sum, d) => sum + d.revenue, 0), totalOrders: results.reduce((sum, d) => sum + d.orders, 0), dailyBreakdown: results};
+ const reportKey = `reports/weekly-${Date.now()}.json`;
+ await env.REPORTS_BUCKET.put(reportKey, JSON.stringify(report));
+ ctx.waitUntil(env.SEND_EMAIL.fetch("https://example.com/send", {method: "POST", body: JSON.stringify({to: "team@example.com", subject: "Weekly Report", reportUrl: `https://reports.example.com/${reportKey}`})}));
+ },
+};
+```
+
+## Health Checks
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const services = [{name: "API", url: "https://api.example.com/health"}, {name: "CDN", url: "https://cdn.example.com/health"}];
+ const checks = await Promise.all(services.map(async (service) => {
+ const start = Date.now();
+ try {
+ const response = await fetch(service.url, { signal: AbortSignal.timeout(5000) });
+ return {name: service.name, status: response.ok ? "up" : "down", responseTime: Date.now() - start};
+ } catch (error) {
+ return {name: service.name, status: "down", responseTime: Date.now() - start, error: error.message};
+ }
+ }));
+ ctx.waitUntil(env.STATUS_KV.put("health_status", JSON.stringify(checks)));
+ const failures = checks.filter(c => c.status === "down");
+ if (failures.length > 0) ctx.waitUntil(fetch(env.ALERT_WEBHOOK, {method: "POST", body: JSON.stringify({text: `${failures.length} service(s) down: ${failures.map(f => f.name).join(", ")}`})}));
+ },
+};
+```
+
+## Batch Processing (Rate-Limited)
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const queueData = await env.QUEUE_KV.get("pending_items", "json");
+ if (!queueData || queueData.length === 0) return;
+ const batch = queueData.slice(0, 100);
+ const results = await Promise.allSettled(batch.map(item => fetch("https://api.example.com/process", {method: "POST", headers: {"Authorization": `Bearer ${env.API_KEY}`, "Content-Type": "application/json"}, body: JSON.stringify(item)})));
+ console.log(`Processed ${results.filter(r => r.status === "fulfilled").length}/${batch.length} items`);
+ ctx.waitUntil(env.QUEUE_KV.put("pending_items", JSON.stringify(queueData.slice(100))));
+ },
+};
+```
+
+## Queue Integration
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const batch = await env.MY_QUEUE.receive({ batchSize: 100 });
+ const results = await Promise.allSettled(batch.messages.map(async (msg) => {
+ await processMessage(msg.body, env);
+ await msg.ack();
+ }));
+ console.log(`Processed ${results.filter(r => r.status === "fulfilled").length}/${batch.messages.length}`);
+ },
+};
+```
+
+## Monitoring & Observability
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const startTime = Date.now();
+ const meta = { cron: controller.cron, scheduledTime: controller.scheduledTime };
+ console.log("[START]", meta);
+ try {
+ const result = await performTask(env);
+ console.log("[SUCCESS]", { ...meta, duration: Date.now() - startTime, count: result.count });
+ ctx.waitUntil(env.METRICS.put(`cron:${controller.scheduledTime}`, JSON.stringify({ ...meta, status: "success" }), { expirationTtl: 2592000 }));
+ } catch (error) {
+ console.error("[ERROR]", { ...meta, duration: Date.now() - startTime, error: error.message });
+ ctx.waitUntil(fetch(env.ALERT_WEBHOOK, { method: "POST", body: JSON.stringify({ text: `Cron failed: ${controller.cron}`, error: error.message }) }));
+ throw error;
+ }
+ },
+};
+```
+
+**View logs:** `npx wrangler tail` or Dashboard → Workers & Pages → Worker → Logs
+
+## Durable Objects Coordination
+
+```typescript
+export default {
+ async scheduled(controller, env, ctx) {
+ const stub = env.COORDINATOR.get(env.COORDINATOR.idFromName("cron-lock"));
+ const acquired = await stub.tryAcquireLock(controller.scheduledTime);
+ if (!acquired) {
+ controller.noRetry();
+ return;
+ }
+ try {
+ await performTask(env);
+ } finally {
+ await stub.releaseLock();
+ }
+ },
+};
+```
+
+## Python Handler
+
+```python
+from workers import WorkerEntrypoint
+
+class Default(WorkerEntrypoint):
+ async def scheduled(self, controller, env, ctx):
+ data = await env.MY_KV.get("key")
+ ctx.waitUntil(env.DB.execute("DELETE FROM logs WHERE created_at < datetime('now', '-7 days')"))
+```
+
+## Testing Patterns
+
+**Local testing with /__scheduled:**
+```bash
+# Start dev server
+npx wrangler dev
+
+# Test specific cron
+curl "http://localhost:8787/__scheduled?cron=*/5+*+*+*+*"
+
+# Test with specific time
+curl "http://localhost:8787/__scheduled?cron=0+2+*+*+*&scheduledTime=1704067200000"
+```
+
+**Unit tests:**
+```typescript
+// test/scheduled.test.ts
+import { describe, it, expect, vi } from "vitest";
+import { env } from "cloudflare:test";
+import worker from "../src/index";
+
+describe("Scheduled Handler", () => {
+ it("executes cron", async () => {
+ const controller = { scheduledTime: Date.now(), cron: "*/5 * * * *", type: "scheduled" as const, noRetry: vi.fn() };
+ const ctx = { waitUntil: vi.fn(), passThroughOnException: vi.fn() };
+ await worker.scheduled(controller, env, ctx);
+ expect(await env.MY_KV.get("last_run")).toBeDefined();
+ });
+
+ it("calls noRetry on duplicate", async () => {
+ const controller = { scheduledTime: 1704067200000, cron: "0 2 * * *", type: "scheduled" as const, noRetry: vi.fn() };
+ await env.EXECUTIONS.put("0 2 * * *-1704067200000", "1");
+ await worker.scheduled(controller, env, { waitUntil: vi.fn(), passThroughOnException: vi.fn() });
+ expect(controller.noRetry).toHaveBeenCalled();
+ });
+});
+```
+
+## See Also
+
+- [README.md](./README.md) - Overview
+- [api.md](./api.md) - Handler implementation
+- [gotchas.md](./gotchas.md) - Troubleshooting
diff --git a/.agents/skills/cloudflare-deploy/references/d1/README.md b/.agents/skills/cloudflare-deploy/references/d1/README.md
new file mode 100644
index 0000000..e40d44c
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/d1/README.md
@@ -0,0 +1,133 @@
+# Cloudflare D1 Database
+
+Expert guidance for Cloudflare D1, a serverless SQLite database designed for horizontal scale-out across multiple databases.
+
+## Overview
+
+D1 is Cloudflare's managed, serverless database with:
+- SQLite SQL semantics and compatibility
+- Built-in disaster recovery via Time Travel (30-day point-in-time recovery)
+- Horizontal scale-out architecture (10 GB per database)
+- Worker and HTTP API access
+- Pricing based on query and storage costs only
+
+**Architecture Philosophy**: D1 is optimized for per-user, per-tenant, or per-entity database patterns rather than single large databases.
+
+## Quick Start
+
+```bash
+# Create database
+wrangler d1 create
+
+# Execute migration
+wrangler d1 migrations apply --remote
+
+# Local development
+wrangler dev
+```
+
+## Core Query Methods
+
+```typescript
+// .all() - Returns all rows; .first() - First row or null; .first(col) - Single column value
+// .run() - INSERT/UPDATE/DELETE; .raw() - Array of arrays (efficient)
+const { results, success, meta } = await env.DB.prepare('SELECT * FROM users WHERE active = ?').bind(true).all();
+const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
+```
+
+## Batch Operations
+
+```typescript
+// Multiple queries in single round trip (atomic transaction)
+const results = await env.DB.batch([
+ env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(1),
+ env.DB.prepare('SELECT * FROM posts WHERE author_id = ?').bind(1),
+ env.DB.prepare('UPDATE users SET last_access = ? WHERE id = ?').bind(Date.now(), 1)
+]);
+```
+
+## Sessions API (Paid Plans)
+
+```typescript
+// Create long-running session for analytics/migrations (up to 15 minutes)
+const session = env.DB.withSession();
+try {
+ await session.prepare('CREATE INDEX idx_heavy ON large_table(column)').run();
+ await session.prepare('ANALYZE').run();
+} finally {
+ session.close(); // Always close to release resources
+}
+```
+
+## Read Replication (Paid Plans)
+
+```typescript
+// Read from nearest replica for lower latency (automatic failover)
+const user = await env.DB_REPLICA.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
+
+// Writes always go to primary
+await env.DB.prepare('UPDATE users SET last_login = ? WHERE id = ?').bind(Date.now(), userId).run();
+```
+
+## Platform Limits
+
+| Limit | Free Tier | Paid Plans |
+|-------|-----------|------------|
+| Database size | 500 MB | 10 GB per database |
+| Row size | 1 MB max | 1 MB max |
+| Query timeout | 30 seconds | 30 seconds |
+| Batch size | 1,000 statements | 10,000 statements |
+| Time Travel retention | 7 days | 30 days |
+| Read replicas | Not available | Yes (paid add-on) |
+
+**Pricing**: $5/month per database beyond free tier + $0.001 per 1K reads + $1 per 1M writes + $0.75/GB storage/month
+
+## CLI Commands
+
+```bash
+# Database management
+wrangler d1 create
+wrangler d1 list
+wrangler d1 delete
+
+# Migrations
+wrangler d1 migrations create # Create new migration file
+wrangler d1 migrations apply --remote # Apply pending migrations
+wrangler d1 migrations apply --local # Apply locally
+wrangler d1 migrations list --remote # Show applied migrations
+
+# Direct SQL execution
+wrangler d1 execute --remote --command="SELECT * FROM users"
+wrangler d1 execute --local --file=./schema.sql
+
+# Backups & Import/Export
+wrangler d1 export --remote --output=./backup.sql # Full export with schema
+wrangler d1 export --remote --no-schema --output=./data.sql # Data only
+wrangler d1 time-travel restore --timestamp="2024-01-15T14:30:00Z" # Point-in-time recovery
+
+# Development
+wrangler dev --persist-to=./.wrangler/state
+```
+
+## Reading Order
+
+**Start here**: Quick Start above → configuration.md (setup) → api.md (queries)
+
+**Common tasks**:
+- First time setup: configuration.md → Run migrations
+- Adding queries: api.md → Prepared statements
+- Pagination/caching: patterns.md
+- Production optimization: Read Replication + Sessions API (this file)
+- Debugging: gotchas.md
+
+## In This Reference
+
+- [configuration.md](./configuration.md) - wrangler.jsonc setup, migrations, TypeScript types, ORMs, local dev
+- [api.md](./api.md) - Query methods (.all/.first/.run/.raw), batch, sessions, read replicas, error handling
+- [patterns.md](./patterns.md) - Pagination, bulk operations, caching, multi-tenant, sessions, analytics
+- [gotchas.md](./gotchas.md) - SQL injection, limits by plan tier, performance, common errors
+
+## See Also
+
+- [workers](../workers/) - Worker runtime and fetch handler patterns
+- [hyperdrive](../hyperdrive/) - Connection pooling for external databases
diff --git a/.agents/skills/cloudflare-deploy/references/d1/api.md b/.agents/skills/cloudflare-deploy/references/d1/api.md
new file mode 100644
index 0000000..b3c26de
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/d1/api.md
@@ -0,0 +1,196 @@
+# D1 API Reference
+
+## Prepared Statements (Required for Security)
+
+```typescript
+// ❌ NEVER: Direct string interpolation (SQL injection risk)
+const result = await env.DB.prepare(`SELECT * FROM users WHERE id = ${userId}`).all();
+
+// ✅ CORRECT: Prepared statements with bind()
+const result = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).all();
+
+// Multiple parameters
+const result = await env.DB.prepare('SELECT * FROM users WHERE email = ? AND active = ?').bind(email, true).all();
+```
+
+## Query Execution Methods
+
+```typescript
+// .all() - Returns all rows
+const { results, success, meta } = await env.DB.prepare('SELECT * FROM users WHERE active = ?').bind(true).all();
+// results: Array of row objects; success: boolean
+// meta: { duration: number, rows_read: number, rows_written: number }
+
+// .first() - Returns first row or null
+const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
+
+// .first(columnName) - Returns single column value
+const email = await env.DB.prepare('SELECT email FROM users WHERE id = ?').bind(userId).first('email');
+// Returns string | number | null
+
+// .run() - For INSERT/UPDATE/DELETE (no row data returned)
+const result = await env.DB.prepare('UPDATE users SET last_login = ? WHERE id = ?').bind(Date.now(), userId).run();
+// result.meta: { duration, rows_read, rows_written, last_row_id, changes }
+
+// .raw() - Returns array of arrays (efficient for large datasets)
+const rawResults = await env.DB.prepare('SELECT id, name FROM users').raw();
+// [[1, 'Alice'], [2, 'Bob']]
+```
+
+## Batch Operations
+
+```typescript
+// Execute multiple queries in single round trip (atomic transaction)
+const results = await env.DB.batch([
+ env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(1),
+ env.DB.prepare('SELECT * FROM posts WHERE author_id = ?').bind(1),
+ env.DB.prepare('UPDATE users SET last_access = ? WHERE id = ?').bind(Date.now(), 1)
+]);
+// results is array: [result1, result2, result3]
+
+// Batch with same prepared statement, different params
+const userIds = [1, 2, 3];
+const stmt = env.DB.prepare('SELECT * FROM users WHERE id = ?');
+const results = await env.DB.batch(userIds.map(id => stmt.bind(id)));
+```
+
+## Transactions (via batch)
+
+```typescript
+// D1 executes batch() as atomic transaction - all succeed or all fail
+const results = await env.DB.batch([
+ env.DB.prepare('INSERT INTO accounts (id, balance) VALUES (?, ?)').bind(1, 100),
+ env.DB.prepare('INSERT INTO accounts (id, balance) VALUES (?, ?)').bind(2, 200),
+ env.DB.prepare('UPDATE accounts SET balance = balance - ? WHERE id = ?').bind(50, 1),
+ env.DB.prepare('UPDATE accounts SET balance = balance + ? WHERE id = ?').bind(50, 2)
+]);
+```
+
+## Sessions API (Paid Plans)
+
+Long-running sessions for operations exceeding 30s timeout (up to 15 min).
+
+```typescript
+const session = env.DB.withSession({ timeout: 600 }); // 10 min (1-900s)
+try {
+ await session.prepare('CREATE INDEX idx_large ON big_table(column)').run();
+ await session.prepare('ANALYZE').run();
+} finally {
+ session.close(); // CRITICAL: always close to prevent leaks
+}
+```
+
+**Use cases**: Migrations, ANALYZE, large index creation, bulk transformations
+
+## Read Replication (Paid Plans)
+
+Routes queries to nearest replica for lower latency. Writes always go to primary.
+
+```typescript
+interface Env {
+ DB: D1Database; // Primary (writes)
+ DB_REPLICA: D1Database; // Replica (reads)
+}
+
+// Reads: use replica
+const user = await env.DB_REPLICA.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
+
+// Writes: use primary
+await env.DB.prepare('UPDATE users SET last_login = ? WHERE id = ?').bind(Date.now(), userId).run();
+
+// Read-after-write: use primary for consistency (replication lag <100ms-2s)
+await env.DB.prepare('INSERT INTO posts (title) VALUES (?)').bind(title).run();
+const post = await env.DB.prepare('SELECT * FROM posts WHERE title = ?').bind(title).first(); // Primary
+```
+
+## Error Handling
+
+```typescript
+async function getUser(userId: number, env: Env): Promise {
+ try {
+ const result = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).all();
+ if (!result.success) return new Response('Database error', { status: 500 });
+ if (result.results.length === 0) return new Response('User not found', { status: 404 });
+ return Response.json(result.results[0]);
+ } catch (error) {
+ return new Response('Internal error', { status: 500 });
+ }
+}
+
+// Constraint violations
+try {
+ await env.DB.prepare('INSERT INTO users (email, name) VALUES (?, ?)').bind(email, name).run();
+} catch (error) {
+ if (error.message?.includes('UNIQUE constraint failed')) return new Response('Email exists', { status: 409 });
+ throw error;
+}
+```
+
+## REST API (HTTP) Access
+
+Access D1 from external services (non-Worker contexts) using Cloudflare API.
+
+```typescript
+// Single query
+const response = await fetch(
+ `https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/d1/database/${DATABASE_ID}/query`,
+ {
+ method: 'POST',
+ headers: {
+ 'Authorization': `Bearer ${CLOUDFLARE_API_TOKEN}`,
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ sql: 'SELECT * FROM users WHERE id = ?',
+ params: [userId]
+ })
+ }
+);
+
+const { result, success, errors } = await response.json();
+// result: [{ results: [...], success: true, meta: {...} }]
+
+// Batch queries via HTTP
+const response = await fetch(
+ `https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/d1/database/${DATABASE_ID}/query`,
+ {
+ method: 'POST',
+ headers: {
+ 'Authorization': `Bearer ${CLOUDFLARE_API_TOKEN}`,
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify([
+ { sql: 'SELECT * FROM users WHERE id = ?', params: [1] },
+ { sql: 'SELECT * FROM posts WHERE author_id = ?', params: [1] }
+ ])
+ }
+);
+```
+
+**Use cases**: Server-side scripts, CI/CD migrations, administrative tools, non-Worker integrations
+
+## Testing & Debugging
+
+```typescript
+// Vitest with unstable_dev
+import { unstable_dev } from 'wrangler';
+describe('D1', () => {
+ let worker: Awaited>;
+ beforeAll(async () => { worker = await unstable_dev('src/index.ts'); });
+ afterAll(async () => { await worker.stop(); });
+ it('queries users', async () => { expect((await worker.fetch('/users')).status).toBe(200); });
+});
+
+// Debug query performance
+const result = await env.DB.prepare('SELECT * FROM users').all();
+console.log('Duration:', result.meta.duration, 'ms');
+
+// Query plan analysis
+const plan = await env.DB.prepare('EXPLAIN QUERY PLAN SELECT * FROM users WHERE email = ?').bind(email).all();
+```
+
+```bash
+# Inspect local database
+sqlite3 .wrangler/state/v3/d1/.sqlite
+.tables; .schema users; PRAGMA table_info(users);
+```
diff --git a/.agents/skills/cloudflare-deploy/references/d1/configuration.md b/.agents/skills/cloudflare-deploy/references/d1/configuration.md
new file mode 100644
index 0000000..8a073fc
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/d1/configuration.md
@@ -0,0 +1,188 @@
+# D1 Configuration
+
+## wrangler.jsonc Setup
+
+```jsonc
+{
+ "name": "your-worker-name",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-01-01", // Use current date for new projects
+ "d1_databases": [
+ {
+ "binding": "DB", // Env variable name
+ "database_name": "your-db-name", // Human-readable name
+ "database_id": "your-database-id", // UUID from dashboard/CLI
+ "migrations_dir": "migrations" // Optional: default is "migrations"
+ },
+ // Read replica (paid plans only)
+ {
+ "binding": "DB_REPLICA",
+ "database_name": "your-db-name",
+ "database_id": "your-database-id" // Same ID, different binding
+ },
+ // Multiple databases
+ {
+ "binding": "ANALYTICS_DB",
+ "database_name": "analytics-db",
+ "database_id": "yyy-yyy-yyy"
+ }
+ ]
+}
+```
+
+## TypeScript Types
+
+```typescript
+interface Env { DB: D1Database; ANALYTICS_DB?: D1Database; }
+
+export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ const result = await env.DB.prepare('SELECT * FROM users').all();
+ return Response.json(result.results);
+ }
+}
+```
+
+## Migrations
+
+File structure: `migrations/0001_initial_schema.sql`, `0002_add_posts.sql`, etc.
+
+### Example Migration
+
+```sql
+-- migrations/0001_initial_schema.sql
+CREATE TABLE IF NOT EXISTS users (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ email TEXT UNIQUE NOT NULL,
+ name TEXT NOT NULL,
+ created_at TEXT DEFAULT CURRENT_TIMESTAMP,
+ updated_at TEXT DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE INDEX idx_users_email ON users(email);
+
+CREATE TABLE IF NOT EXISTS posts (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ user_id INTEGER NOT NULL,
+ title TEXT NOT NULL,
+ content TEXT,
+ published BOOLEAN DEFAULT 0,
+ created_at TEXT DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
+);
+
+CREATE INDEX idx_posts_user_id ON posts(user_id);
+CREATE INDEX idx_posts_published ON posts(published);
+```
+
+### Running Migrations
+
+```bash
+# Create new migration file
+wrangler d1 migrations create add_users_table
+# Creates: migrations/0001_add_users_table.sql
+
+# Apply migrations
+wrangler d1 migrations apply --local # Apply to local DB
+wrangler d1 migrations apply --remote # Apply to production DB
+
+# List applied migrations
+wrangler d1 migrations list --remote
+
+# Direct SQL execution (bypasses migration tracking)
+wrangler d1 execute --remote --command="SELECT * FROM users"
+wrangler d1 execute --local --file=./schema.sql
+```
+
+**Migration tracking**: Wrangler creates `d1_migrations` table automatically to track applied migrations
+
+## Indexing Strategy
+
+```sql
+-- Index frequently queried columns
+CREATE INDEX idx_users_email ON users(email);
+
+-- Composite indexes for multi-column queries
+CREATE INDEX idx_posts_user_published ON posts(user_id, published);
+
+-- Covering indexes (include queried columns)
+CREATE INDEX idx_users_email_name ON users(email, name);
+
+-- Partial indexes for filtered queries
+CREATE INDEX idx_active_users ON users(email) WHERE active = 1;
+
+-- Check if query uses index
+EXPLAIN QUERY PLAN SELECT * FROM users WHERE email = ?;
+```
+
+## Drizzle ORM
+
+```typescript
+// drizzle.config.ts
+export default {
+ schema: './src/schema.ts', out: './migrations', dialect: 'sqlite', driver: 'd1-http',
+ dbCredentials: { accountId: process.env.CLOUDFLARE_ACCOUNT_ID!, databaseId: process.env.D1_DATABASE_ID!, token: process.env.CLOUDFLARE_API_TOKEN! }
+} satisfies Config;
+
+// schema.ts
+import { sqliteTable, text, integer } from 'drizzle-orm/sqlite-core';
+export const users = sqliteTable('users', {
+ id: integer('id').primaryKey({ autoIncrement: true }),
+ email: text('email').notNull().unique(),
+ name: text('name').notNull()
+});
+
+// worker.ts
+import { drizzle } from 'drizzle-orm/d1';
+import { users } from './schema';
+export default {
+ async fetch(request: Request, env: Env) {
+ const db = drizzle(env.DB);
+ return Response.json(await db.select().from(users));
+ }
+}
+```
+
+## Import & Export
+
+```bash
+# Export full database (schema + data)
+wrangler d1 export --remote --output=./backup.sql
+
+# Export data only (no schema)
+wrangler d1 export --remote --no-schema --output=./data-only.sql
+
+# Export with foreign key constraints preserved
+# (Default: foreign keys are disabled during export for import compatibility)
+
+# Import SQL file
+wrangler d1 execute --remote --file=./backup.sql
+
+# Limitations
+# - BLOB data may not export correctly (use R2 for binary files)
+# - Very large exports (>1GB) may timeout (split into chunks)
+# - Import is NOT atomic (use batch() for transactional imports in Workers)
+```
+
+## Plan Tiers
+
+| Feature | Free | Paid |
+|---------|------|------|
+| Database size | 500 MB | 10 GB |
+| Batch size | 1,000 statements | 10,000 statements |
+| Time Travel | 7 days | 30 days |
+| Read replicas | ❌ | ✅ |
+| Sessions API | ❌ | ✅ (up to 15 min) |
+| Pricing | Free | $5/mo + usage |
+
+**Usage pricing** (paid plans): $0.001 per 1K reads + $1 per 1M writes + $0.75/GB storage/month
+
+## Local Development
+
+```bash
+wrangler dev --persist-to=./.wrangler/state # Persist across restarts
+# Local DB: .wrangler/state/v3/d1/.sqlite
+sqlite3 .wrangler/state/v3/d1/.sqlite # Inspect
+
+# Local dev uses free tier limits by default
+```
diff --git a/.agents/skills/cloudflare-deploy/references/d1/gotchas.md b/.agents/skills/cloudflare-deploy/references/d1/gotchas.md
new file mode 100644
index 0000000..9f9a95a
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/d1/gotchas.md
@@ -0,0 +1,98 @@
+# D1 Gotchas & Troubleshooting
+
+## Common Errors
+
+### "SQL Injection Vulnerability"
+
+**Cause:** Using string interpolation instead of prepared statements with bind()
+**Solution:** ALWAYS use prepared statements: `env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).all()` instead of string interpolation which allows attackers to inject malicious SQL
+
+### "no such table"
+
+**Cause:** Table doesn't exist because migrations haven't been run, or using wrong database binding
+**Solution:** Run migrations using `wrangler d1 migrations apply --remote` and verify binding name in wrangler.jsonc matches code
+
+### "UNIQUE constraint failed"
+
+**Cause:** Attempting to insert duplicate value in column with UNIQUE constraint
+**Solution:** Catch error and return 409 Conflict status code
+
+### "Query Timeout (30s exceeded)"
+
+**Cause:** Query execution exceeds 30 second timeout limit
+**Solution:** Break into smaller queries, add indexes to speed up queries, or reduce dataset size
+
+### "N+1 Query Problem"
+
+**Cause:** Making multiple individual queries in a loop instead of single optimized query
+**Solution:** Use JOIN to fetch related data in single query or use `batch()` method for multiple queries
+
+### "Missing Indexes"
+
+**Cause:** Queries performing full table scans without indexes
+**Solution:** Use `EXPLAIN QUERY PLAN` to check if index is used, then create index with `CREATE INDEX idx_users_email ON users(email)`
+
+### "Boolean Type Issues"
+
+**Cause:** SQLite uses INTEGER (0/1) not native boolean type
+**Solution:** Bind 1 or 0 instead of true/false when working with boolean values
+
+### "Date/Time Type Issues"
+
+**Cause:** SQLite doesn't have native DATE/TIME types
+**Solution:** Use TEXT (ISO 8601 format) or INTEGER (unix timestamp) for date/time values
+
+## Plan Tier Limits
+
+| Limit | Free Tier | Paid Plans | Notes |
+|-------|-----------|------------|-------|
+| Database size | 500 MB | 10 GB | Design for multiple DBs per tenant on paid |
+| Row size | 1 MB | 1 MB | Store large files in R2, not D1 |
+| Query timeout | 30s | 30s (900s with sessions) | Use sessions API for migrations |
+| Batch size | 1,000 statements | 10,000 statements | Split large batches accordingly |
+| Time Travel | 7 days | 30 days | Point-in-time recovery window |
+| Read replicas | ❌ Not available | ✅ Available | Paid add-on for lower latency |
+| Sessions API | ❌ Not available | ✅ Up to 15 min | For migrations and heavy operations |
+| Concurrent requests | 10,000/min | Higher | Contact support for custom limits |
+
+## Production Gotchas
+
+### "Batch size exceeded"
+
+**Cause:** Attempting to send >1,000 statements on free tier or >10,000 on paid
+**Solution:** Chunk batches: `for (let i = 0; i < stmts.length; i += MAX_BATCH) await env.DB.batch(stmts.slice(i, i + MAX_BATCH))`
+
+### "Session not closed / resource leak"
+
+**Cause:** Forgot to call `session.close()` after using sessions API
+**Solution:** Always use try/finally block: `try { await session.prepare(...) } finally { session.close() }`
+
+### "Replication lag causing stale reads"
+
+**Cause:** Reading from replica immediately after write - replication lag can be 100ms-2s
+**Solution:** Use primary for read-after-write: `await env.DB.prepare(...)` not `env.DB_REPLICA`
+
+### "Migration applied to local but not remote"
+
+**Cause:** Forgot `--remote` flag when applying migrations
+**Solution:** Always run `wrangler d1 migrations apply --remote` for production
+
+### "Foreign key constraint failed"
+
+**Cause:** Inserting row with FK to non-existent parent, or deleting parent before children
+**Solution:** Enable FK enforcement: `PRAGMA foreign_keys = ON;` and use ON DELETE CASCADE in schema
+
+### "BLOB data corrupted on export"
+
+**Cause:** D1 export may not handle BLOB correctly
+**Solution:** Store binary files in R2, only store R2 URLs/keys in D1
+
+### "Database size approaching limit"
+
+**Cause:** Storing too much data in single database
+**Solution:** Horizontal scale-out: create per-tenant/per-user databases, archive old data, or upgrade to paid plan
+
+### "Local dev vs production behavior differs"
+
+**Cause:** Local uses SQLite file, production uses distributed D1 - different performance/limits
+**Solution:** Always test migrations on remote with `--remote` flag before production rollout
diff --git a/.agents/skills/cloudflare-deploy/references/d1/patterns.md b/.agents/skills/cloudflare-deploy/references/d1/patterns.md
new file mode 100644
index 0000000..f01c7bd
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/d1/patterns.md
@@ -0,0 +1,189 @@
+# D1 Patterns & Best Practices
+
+## Pagination
+
+```typescript
+async function getUsers({ page, pageSize }: { page: number; pageSize: number }, env: Env) {
+ const offset = (page - 1) * pageSize;
+ const [countResult, dataResult] = await env.DB.batch([
+ env.DB.prepare('SELECT COUNT(*) as total FROM users'),
+ env.DB.prepare('SELECT * FROM users ORDER BY created_at DESC LIMIT ? OFFSET ?').bind(pageSize, offset)
+ ]);
+ return { data: dataResult.results, total: countResult.results[0].total, page, pageSize, totalPages: Math.ceil(countResult.results[0].total / pageSize) };
+}
+```
+
+## Conditional Queries
+
+```typescript
+async function searchUsers(filters: { name?: string; email?: string; active?: boolean }, env: Env) {
+ const conditions: string[] = [], params: (string | number | boolean | null)[] = [];
+ if (filters.name) { conditions.push('name LIKE ?'); params.push(`%${filters.name}%`); }
+ if (filters.email) { conditions.push('email = ?'); params.push(filters.email); }
+ if (filters.active !== undefined) { conditions.push('active = ?'); params.push(filters.active ? 1 : 0); }
+ const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
+ return await env.DB.prepare(`SELECT * FROM users ${whereClause}`).bind(...params).all();
+}
+```
+
+## Bulk Insert
+
+```typescript
+async function bulkInsertUsers(users: Array<{ name: string; email: string }>, env: Env) {
+ const stmt = env.DB.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
+ const batch = users.map(user => stmt.bind(user.name, user.email));
+ return await env.DB.batch(batch);
+}
+```
+
+## Caching with KV
+
+```typescript
+async function getCachedUser(userId: number, env: { DB: D1Database; CACHE: KVNamespace }) {
+ const cacheKey = `user:${userId}`;
+ const cached = await env.CACHE?.get(cacheKey, 'json');
+ if (cached) return cached;
+ const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
+ if (user) await env.CACHE?.put(cacheKey, JSON.stringify(user), { expirationTtl: 300 });
+ return user;
+}
+```
+
+## Query Optimization
+
+```typescript
+// ✅ Use indexes in WHERE clauses
+const users = await env.DB.prepare('SELECT * FROM users WHERE email = ?').bind(email).all();
+
+// ✅ Limit result sets
+const recentPosts = await env.DB.prepare('SELECT * FROM posts ORDER BY created_at DESC LIMIT 100').all();
+
+// ✅ Use batch() for multiple independent queries
+const [user, posts, comments] = await env.DB.batch([
+ env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId),
+ env.DB.prepare('SELECT * FROM posts WHERE user_id = ?').bind(userId),
+ env.DB.prepare('SELECT * FROM comments WHERE user_id = ?').bind(userId)
+]);
+
+// ❌ Avoid N+1 queries
+for (const post of posts) {
+ const author = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(post.user_id).first(); // Bad: multiple round trips
+}
+
+// ✅ Use JOINs instead
+const postsWithAuthors = await env.DB.prepare(`
+ SELECT posts.*, users.name as author_name
+ FROM posts
+ JOIN users ON posts.user_id = users.id
+`).all();
+```
+
+## Multi-Tenant SaaS
+
+```typescript
+// Each tenant gets own database
+export default {
+ async fetch(request: Request, env: { [key: `TENANT_${string}`]: D1Database }) {
+ const tenantId = request.headers.get('X-Tenant-ID');
+ const data = await env[`TENANT_${tenantId}`].prepare('SELECT * FROM records').all();
+ return Response.json(data.results);
+ }
+}
+```
+
+## Session Storage
+
+```typescript
+async function createSession(userId: number, token: string, env: Env) {
+ const expiresAt = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString();
+ return await env.DB.prepare('INSERT INTO sessions (user_id, token, expires_at) VALUES (?, ?, ?)').bind(userId, token, expiresAt).run();
+}
+
+async function validateSession(token: string, env: Env) {
+ return await env.DB.prepare('SELECT s.*, u.email FROM sessions s JOIN users u ON s.user_id = u.id WHERE s.token = ? AND s.expires_at > CURRENT_TIMESTAMP').bind(token).first();
+}
+```
+
+## Analytics/Events
+
+```typescript
+async function logEvent(event: { type: string; userId?: number; metadata: object }, env: Env) {
+ return await env.DB.prepare('INSERT INTO events (type, user_id, metadata) VALUES (?, ?, ?)').bind(event.type, event.userId || null, JSON.stringify(event.metadata)).run();
+}
+
+async function getEventStats(startDate: string, endDate: string, env: Env) {
+ return await env.DB.prepare('SELECT type, COUNT(*) as count FROM events WHERE timestamp BETWEEN ? AND ? GROUP BY type ORDER BY count DESC').bind(startDate, endDate).all();
+}
+```
+
+## Read Replication Pattern (Paid Plans)
+
+```typescript
+interface Env { DB: D1Database; DB_REPLICA: D1Database; }
+
+export default {
+ async fetch(request: Request, env: Env) {
+ if (request.method === 'GET') {
+ // Reads: use replica for lower latency
+ const users = await env.DB_REPLICA.prepare('SELECT * FROM users WHERE active = 1').all();
+ return Response.json(users.results);
+ }
+
+ if (request.method === 'POST') {
+ const { name, email } = await request.json();
+ const result = await env.DB.prepare('INSERT INTO users (name, email) VALUES (?, ?)').bind(name, email).run();
+
+ // Read-after-write: use primary for consistency (replication lag <100ms-2s)
+ const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(result.meta.last_row_id).first();
+ return Response.json(user, { status: 201 });
+ }
+ }
+}
+```
+
+**Use replicas for**: Analytics dashboards, search results, public queries (eventual consistency OK)
+**Use primary for**: Read-after-write, financial transactions, authentication (consistency required)
+
+## Sessions API Pattern (Paid Plans)
+
+```typescript
+// Migration with long-running session (up to 15 min)
+async function runMigration(env: Env) {
+ const session = env.DB.withSession({ timeout: 600 }); // 10 min
+ try {
+ await session.prepare('CREATE INDEX idx_users_email ON users(email)').run();
+ await session.prepare('CREATE INDEX idx_posts_user ON posts(user_id)').run();
+ await session.prepare('ANALYZE').run();
+ } finally {
+ session.close(); // Always close to prevent leaks
+ }
+}
+
+// Bulk transformation with batching
+async function transformLargeDataset(env: Env) {
+ const session = env.DB.withSession({ timeout: 900 }); // 15 min max
+ try {
+ const BATCH_SIZE = 1000;
+ let offset = 0;
+ while (true) {
+ const rows = await session.prepare('SELECT id, data FROM legacy LIMIT ? OFFSET ?').bind(BATCH_SIZE, offset).all();
+ if (rows.results.length === 0) break;
+ const updates = rows.results.map(row =>
+ session.prepare('UPDATE legacy SET new_data = ? WHERE id = ?').bind(transform(row.data), row.id)
+ );
+ await session.batch(updates);
+ offset += BATCH_SIZE;
+ }
+ } finally { session.close(); }
+}
+```
+
+## Time Travel & Backups
+
+```bash
+wrangler d1 time-travel restore --timestamp="2024-01-15T14:30:00Z" # Point-in-time
+wrangler d1 time-travel info # List restore points (7 days free, 30 days paid)
+wrangler d1 export --remote --output=./backup.sql # Full export
+wrangler d1 export --remote --no-schema --output=./data.sql # Data only
+wrangler d1 execute --remote --file=./backup.sql # Import
+```
diff --git a/.agents/skills/cloudflare-deploy/references/ddos/README.md b/.agents/skills/cloudflare-deploy/references/ddos/README.md
new file mode 100644
index 0000000..117dd21
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/ddos/README.md
@@ -0,0 +1,41 @@
+# Cloudflare DDoS Protection
+
+Autonomous, always-on protection against DDoS attacks across L3/4 and L7.
+
+## Protection Types
+
+- **HTTP DDoS (L7)**: Protects HTTP/HTTPS traffic, phase `ddos_l7`, zone/account level
+- **Network DDoS (L3/4)**: UDP/SYN/DNS floods, phase `ddos_l4`, account level only
+- **Adaptive DDoS**: Learns 7-day baseline, detects deviations, 4 profile types (Origins, User-Agents, Locations, Protocols)
+
+## Plan Availability
+
+| Feature | Free | Pro | Business | Enterprise | Enterprise Advanced |
+|---------|------|-----|----------|------------|---------------------|
+| HTTP DDoS (L7) | ✓ | ✓ | ✓ | ✓ | ✓ |
+| Network DDoS (L3/4) | ✓ | ✓ | ✓ | ✓ | ✓ |
+| Override rules | 1 | 1 | 1 | 1 | 10 |
+| Custom expressions | ✗ | ✗ | ✗ | ✗ | ✓ |
+| Log action | ✗ | ✗ | ✗ | ✗ | ✓ |
+| Adaptive DDoS | ✗ | ✗ | ✗ | ✓ | ✓ |
+| Alert filters | Basic | Basic | Basic | Advanced | Advanced |
+
+## Actions & Sensitivity
+
+- **Actions**: `block`, `managed_challenge`, `challenge`, `log` (Enterprise Advanced only)
+- **Sensitivity**: `default` (high), `medium`, `low`, `eoff` (essentially off)
+- **Override**: By category/tag or individual rule ID
+- **Scope**: Zone-level overrides take precedence over account-level
+
+## Reading Order
+
+| File | Purpose | Start Here If... |
+|------|---------|------------------|
+| [configuration.md](./configuration.md) | Dashboard setup, rule structure, adaptive profiles | You're setting up DDoS protection for the first time |
+| [api.md](./api.md) | API endpoints, SDK usage, ruleset ID discovery | You're automating configuration or need programmatic access |
+| [patterns.md](./patterns.md) | Protection strategies, defense-in-depth, dynamic response | You need implementation patterns or layered security |
+| [gotchas.md](./gotchas.md) | False positives, tuning, error handling | You're troubleshooting or optimizing existing protection |
+
+## See Also
+- [waf](../waf/) - Application-layer security rules
+- [bot-management](../bot-management/) - Bot detection and mitigation
diff --git a/.agents/skills/cloudflare-deploy/references/ddos/api.md b/.agents/skills/cloudflare-deploy/references/ddos/api.md
new file mode 100644
index 0000000..b96284a
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/ddos/api.md
@@ -0,0 +1,164 @@
+# DDoS API
+
+## Endpoints
+
+### HTTP DDoS (L7)
+
+```typescript
+// Zone-level
+PUT /zones/{zoneId}/rulesets/phases/ddos_l7/entrypoint
+GET /zones/{zoneId}/rulesets/phases/ddos_l7/entrypoint
+
+// Account-level (Enterprise Advanced)
+PUT /accounts/{accountId}/rulesets/phases/ddos_l7/entrypoint
+GET /accounts/{accountId}/rulesets/phases/ddos_l7/entrypoint
+```
+
+### Network DDoS (L3/4)
+
+```typescript
+// Account-level only
+PUT /accounts/{accountId}/rulesets/phases/ddos_l4/entrypoint
+GET /accounts/{accountId}/rulesets/phases/ddos_l4/entrypoint
+```
+
+## TypeScript SDK
+
+**SDK Version**: Requires `cloudflare` >= 3.0.0 for ruleset phase methods.
+
+```typescript
+import Cloudflare from "cloudflare";
+
+const client = new Cloudflare({ apiToken: process.env.CLOUDFLARE_API_TOKEN });
+
+// STEP 1: Discover managed ruleset ID (required for overrides)
+const allRulesets = await client.rulesets.list({ zone_id: zoneId });
+const ddosRuleset = allRulesets.result.find(
+ (r) => r.kind === "managed" && r.phase === "ddos_l7"
+);
+if (!ddosRuleset) throw new Error("DDoS managed ruleset not found");
+const managedRulesetId = ddosRuleset.id;
+
+// STEP 2: Get current HTTP DDoS configuration
+const entrypointRuleset = await client.zones.rulesets.phases.entrypoint.get("ddos_l7", {
+ zone_id: zoneId,
+});
+
+// STEP 3: Update HTTP DDoS ruleset with overrides
+await client.zones.rulesets.phases.entrypoint.update("ddos_l7", {
+ zone_id: zoneId,
+ rules: [
+ {
+ action: "execute",
+ expression: "true",
+ action_parameters: {
+ id: managedRulesetId, // From discovery step
+ overrides: {
+ sensitivity_level: "medium",
+ action: "managed_challenge",
+ },
+ },
+ },
+ ],
+});
+
+// Network DDoS (account level, L3/4)
+const l4Rulesets = await client.rulesets.list({ account_id: accountId });
+const l4DdosRuleset = l4Rulesets.result.find(
+ (r) => r.kind === "managed" && r.phase === "ddos_l4"
+);
+const l4Ruleset = await client.accounts.rulesets.phases.entrypoint.get("ddos_l4", {
+ account_id: accountId,
+});
+```
+
+## Alert Configuration
+
+```typescript
+interface DDoSAlertConfig {
+ name: string;
+ enabled: boolean;
+ alert_type: "http_ddos_attack_alert" | "layer_3_4_ddos_attack_alert"
+ | "advanced_http_ddos_attack_alert" | "advanced_layer_3_4_ddos_attack_alert";
+ filters?: {
+ zones?: string[];
+ hostnames?: string[];
+ requests_per_second?: number;
+ packets_per_second?: number;
+ megabits_per_second?: number;
+ ip_prefixes?: string[]; // CIDR
+ ip_addresses?: string[];
+ protocols?: string[];
+ };
+ mechanisms: {
+ email?: Array<{ id: string }>;
+ webhooks?: Array<{ id: string }>;
+ pagerduty?: Array<{ id: string }>;
+ };
+}
+
+// Create alert
+await fetch(
+ `https://api.cloudflare.com/client/v4/accounts/${accountId}/alerting/v3/policies`,
+ {
+ method: "POST",
+ headers: {
+ Authorization: `Bearer ${apiToken}`,
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify(alertConfig),
+ }
+);
+```
+
+## Typed Override Examples
+
+```typescript
+// Override by category
+interface CategoryOverride {
+ action: "execute";
+ expression: string;
+ action_parameters: {
+ id: string;
+ overrides: {
+ categories?: Array<{
+ category: "http-flood" | "http-anomaly" | "udp-flood" | "syn-flood";
+ sensitivity_level?: "default" | "medium" | "low" | "eoff";
+ action?: "block" | "managed_challenge" | "challenge" | "log";
+ }>;
+ };
+ };
+}
+
+// Override by rule ID
+interface RuleOverride {
+ action: "execute";
+ expression: string;
+ action_parameters: {
+ id: string;
+ overrides: {
+ rules?: Array<{
+ id: string;
+ action?: "block" | "managed_challenge" | "challenge" | "log";
+ sensitivity_level?: "default" | "medium" | "low" | "eoff";
+ }>;
+ };
+ };
+}
+
+// Example: Override specific adaptive rule
+const adaptiveOverride: RuleOverride = {
+ action: "execute",
+ expression: "true",
+ action_parameters: {
+ id: managedRulesetId,
+ overrides: {
+ rules: [
+ { id: "...adaptive-origins-rule-id...", sensitivity_level: "low" },
+ ],
+ },
+ },
+};
+```
+
+See [patterns.md](./patterns.md) for complete implementation patterns.
diff --git a/.agents/skills/cloudflare-deploy/references/ddos/configuration.md b/.agents/skills/cloudflare-deploy/references/ddos/configuration.md
new file mode 100644
index 0000000..14c6e32
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/ddos/configuration.md
@@ -0,0 +1,93 @@
+# DDoS Configuration
+
+## Dashboard Setup
+
+1. Navigate to Security > DDoS
+2. Select HTTP DDoS or Network-layer DDoS
+3. Configure sensitivity & action per ruleset/category/rule
+4. Apply overrides with optional expressions (Enterprise Advanced)
+5. Enable Adaptive DDoS toggle (Enterprise/Enterprise Advanced, requires 7 days traffic history)
+
+## Rule Structure
+
+```typescript
+interface DDoSOverride {
+ description: string;
+ rules: Array<{
+ action: "execute";
+ expression: string; // Custom expression (Enterprise Advanced) or "true" for all
+ action_parameters: {
+ id: string; // Managed ruleset ID (discover via api.md)
+ overrides: {
+ sensitivity_level?: "default" | "medium" | "low" | "eoff";
+ action?: "block" | "managed_challenge" | "challenge" | "log"; // log = Enterprise Advanced only
+ categories?: Array<{
+ category: string; // e.g., "http-flood", "udp-flood"
+ sensitivity_level?: string;
+ }>;
+ rules?: Array<{
+ id: string;
+ action?: string;
+ sensitivity_level?: string;
+ }>;
+ };
+ };
+ }>;
+}
+```
+
+## Expression Availability
+
+| Plan | Custom Expressions | Example |
+|------|-------------------|---------|
+| Free/Pro/Business | ✗ | Use `"true"` only |
+| Enterprise | ✗ | Use `"true"` only |
+| Enterprise Advanced | ✓ | `ip.src in {...}`, `http.request.uri.path matches "..."` |
+
+## Sensitivity Mapping
+
+| UI | API | Threshold |
+|----|-----|-----------|
+| High | `default` | Most aggressive |
+| Medium | `medium` | Balanced |
+| Low | `low` | Less aggressive |
+| Essentially Off | `eoff` | Minimal mitigation |
+
+## Common Categories
+
+- `http-flood`, `http-anomaly` (L7)
+- `udp-flood`, `syn-flood`, `dns-flood` (L3/4)
+
+## Override Precedence
+
+Multiple override layers apply in this order (higher precedence wins):
+
+```
+Zone-level > Account-level
+Individual Rule > Category > Global sensitivity/action
+```
+
+**Example**: Zone rule for `/api/*` overrides account-level global settings.
+
+## Adaptive DDoS Profiles
+
+**Availability**: Enterprise, Enterprise Advanced
+**Learning period**: 7 days of traffic history required
+
+| Profile Type | Description | Detects |
+|--------------|-------------|---------|
+| **Origins** | Traffic patterns per origin server | Anomalous requests to specific origins |
+| **User-Agents** | Traffic patterns per User-Agent | Malicious/anomalous user agent strings |
+| **Locations** | Traffic patterns per geo-location | Attacks from specific countries/regions |
+| **Protocols** | Traffic patterns per protocol (L3/4) | Protocol-specific flood attacks |
+
+Configure by targeting specific adaptive rule IDs via API (see api.md#typed-override-examples).
+
+## Alerting
+
+Configure via Notifications:
+- Alert types: `http_ddos_attack_alert`, `layer_3_4_ddos_attack_alert`, `advanced_*` variants
+- Filters: zones, hostnames, RPS/PPS/Mbps thresholds, IPs, protocols
+- Mechanisms: email, webhooks, PagerDuty
+
+See [api.md](./api.md#alert-configuration) for API examples.
diff --git a/.agents/skills/cloudflare-deploy/references/ddos/gotchas.md b/.agents/skills/cloudflare-deploy/references/ddos/gotchas.md
new file mode 100644
index 0000000..f2a97d1
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/ddos/gotchas.md
@@ -0,0 +1,107 @@
+# DDoS Gotchas
+
+## Common Errors
+
+### "False positives blocking legitimate traffic"
+
+**Cause**: Sensitivity too high, wrong action, or missing exceptions
+**Solution**:
+1. Lower sensitivity for specific rule/category
+2. Use `log` action first to validate (Enterprise Advanced)
+3. Add exception with custom expression (e.g., allowlist IPs)
+4. Query flagged requests via GraphQL Analytics API to identify patterns
+
+### "Attacks getting through"
+
+**Cause**: Sensitivity too low or wrong action
+**Solution**: Increase to `default` sensitivity and use `block` action:
+```typescript
+const config = {
+ rules: [{
+ expression: "true",
+ action: "execute",
+ action_parameters: { id: managedRulesetId, overrides: { sensitivity_level: "default", action: "block" } },
+ }],
+};
+```
+
+### "Adaptive rules not working"
+
+**Cause**: Insufficient traffic history (needs 7 days)
+**Solution**: Wait for baseline to establish, check dashboard for adaptive rule status
+
+### "Zone override ignored"
+
+**Cause**: Account overrides conflict with zone overrides
+**Solution**: Configure at zone level OR remove zone overrides to use account-level
+
+### "Log action not available"
+
+**Cause**: Not on Enterprise Advanced DDoS plan
+**Solution**: Use `managed_challenge` with low sensitivity for testing
+
+### "Rule limit exceeded"
+
+**Cause**: Too many override rules (Free/Pro/Business: 1, Enterprise Advanced: 10)
+**Solution**: Combine conditions in single expression using `and`/`or`
+
+### "Cannot override rule"
+
+**Cause**: Rule is read-only
+**Solution**: Check API response for read-only indicator, use different rule
+
+### "Cannot disable DDoS protection"
+
+**Cause**: DDoS managed rulesets cannot be fully disabled (always-on protection)
+**Solution**: Set `sensitivity_level: "eoff"` for minimal mitigation
+
+### "Expression not allowed"
+
+**Cause**: Custom expressions require Enterprise Advanced plan
+**Solution**: Use `expression: "true"` for all traffic, or upgrade plan
+
+### "Managed ruleset not found"
+
+**Cause**: Zone/account doesn't have DDoS managed ruleset, or incorrect phase
+**Solution**: Verify ruleset exists via `client.rulesets.list()`, check phase name (`ddos_l7` or `ddos_l4`)
+
+## API Error Codes
+
+| Error Code | Message | Cause | Solution |
+|------------|---------|-------|----------|
+| 10000 | Authentication error | Invalid/missing API token | Check token has DDoS permissions |
+| 81000 | Ruleset validation failed | Invalid rule structure | Verify `action_parameters.id` is managed ruleset ID |
+| 81020 | Expression not allowed | Custom expressions on wrong plan | Use `"true"` or upgrade to Enterprise Advanced |
+| 81021 | Rule limit exceeded | Too many override rules | Reduce rules or upgrade (Enterprise Advanced: 10) |
+| 81022 | Invalid sensitivity level | Wrong sensitivity value | Use: `default`, `medium`, `low`, `eoff` |
+| 81023 | Invalid action | Wrong action for plan | Enterprise Advanced only: `log` action |
+
+## Limits
+
+| Resource/Limit | Free/Pro/Business | Enterprise | Enterprise Advanced |
+|----------------|-------------------|------------|---------------------|
+| Override rules per zone | 1 | 1 | 10 |
+| Custom expressions | ✗ | ✗ | ✓ |
+| Log action | ✗ | ✗ | ✓ |
+| Adaptive DDoS | ✗ | ✓ | ✓ |
+| Traffic history required | - | 7 days | 7 days |
+
+## Tuning Strategy
+
+1. Start with `log` action + `medium` sensitivity
+2. Monitor for 24-48 hours
+3. Identify false positives, add exceptions
+4. Gradually increase to `default` sensitivity
+5. Change action from `log` → `managed_challenge` → `block`
+6. Document all adjustments
+
+## Best Practices
+
+- Test during low-traffic periods
+- Use zone-level for per-site tuning
+- Reference IP lists for easier management
+- Set appropriate alert thresholds (avoid noise)
+- Combine with WAF for layered defense
+- Avoid over-tuning (keep config simple)
+
+See [patterns.md](./patterns.md) for progressive rollout examples.
diff --git a/.agents/skills/cloudflare-deploy/references/ddos/patterns.md b/.agents/skills/cloudflare-deploy/references/ddos/patterns.md
new file mode 100644
index 0000000..a46ef2f
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/ddos/patterns.md
@@ -0,0 +1,174 @@
+# DDoS Protection Patterns
+
+## Allowlist Trusted IPs
+
+```typescript
+const config = {
+ description: "Allowlist trusted IPs",
+ rules: [{
+ expression: "ip.src in { 203.0.113.0/24 192.0.2.1 }",
+ action: "execute",
+ action_parameters: {
+ id: managedRulesetId,
+ overrides: { sensitivity_level: "eoff" },
+ },
+ }],
+};
+
+await client.accounts.rulesets.phases.entrypoint.update("ddos_l7", {
+ account_id: accountId,
+ ...config,
+});
+```
+
+## Route-specific Sensitivity
+
+```typescript
+const config = {
+ description: "Route-specific protection",
+ rules: [
+ {
+ expression: "not http.request.uri.path matches \"^/api/\"",
+ action: "execute",
+ action_parameters: {
+ id: managedRulesetId,
+ overrides: { sensitivity_level: "default", action: "block" },
+ },
+ },
+ {
+ expression: "http.request.uri.path matches \"^/api/\"",
+ action: "execute",
+ action_parameters: {
+ id: managedRulesetId,
+ overrides: { sensitivity_level: "low", action: "managed_challenge" },
+ },
+ },
+ ],
+};
+```
+
+## Progressive Enhancement
+
+```typescript
+enum ProtectionLevel { MONITORING = "monitoring", LOW = "low", MEDIUM = "medium", HIGH = "high" }
+
+const levelConfig = {
+ [ProtectionLevel.MONITORING]: { action: "log", sensitivity: "eoff" },
+ [ProtectionLevel.LOW]: { action: "managed_challenge", sensitivity: "low" },
+ [ProtectionLevel.MEDIUM]: { action: "managed_challenge", sensitivity: "medium" },
+ [ProtectionLevel.HIGH]: { action: "block", sensitivity: "default" },
+} as const;
+
+async function setProtectionLevel(zoneId: string, level: ProtectionLevel, rulesetId: string, client: Cloudflare) {
+ const settings = levelConfig[level];
+ return client.zones.rulesets.phases.entrypoint.update("ddos_l7", {
+ zone_id: zoneId,
+ rules: [{
+ expression: "true",
+ action: "execute",
+ action_parameters: { id: rulesetId, overrides: { action: settings.action, sensitivity_level: settings.sensitivity } },
+ }],
+ });
+}
+```
+
+## Dynamic Response to Attacks
+
+```typescript
+interface Env { CLOUDFLARE_API_TOKEN: string; ZONE_ID: string; KV: KVNamespace; }
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ if (request.url.includes("/attack-detected")) {
+ const attackData = await request.json();
+ await env.KV.put(`attack:${Date.now()}`, JSON.stringify(attackData), { expirationTtl: 86400 });
+ const recentAttacks = await getRecentAttacks(env.KV);
+ if (recentAttacks.length > 5) {
+ await setProtectionLevel(env.ZONE_ID, ProtectionLevel.HIGH, managedRulesetId, client);
+ return new Response("Protection increased");
+ }
+ }
+ return new Response("OK");
+ },
+ async scheduled(event: ScheduledEvent, env: Env): Promise {
+ const recentAttacks = await getRecentAttacks(env.KV);
+ if (recentAttacks.length === 0) await setProtectionLevel(env.ZONE_ID, ProtectionLevel.MEDIUM, managedRulesetId, client);
+ },
+};
+```
+
+## Multi-rule Tiered Protection (Enterprise Advanced)
+
+```typescript
+const config = {
+ description: "Multi-tier DDoS protection",
+ rules: [
+ {
+ expression: "not ip.src in $known_ips and not cf.bot_management.score gt 30",
+ action: "execute",
+ action_parameters: { id: managedRulesetId, overrides: { sensitivity_level: "default", action: "block" } },
+ },
+ {
+ expression: "cf.bot_management.verified_bot",
+ action: "execute",
+ action_parameters: { id: managedRulesetId, overrides: { sensitivity_level: "medium", action: "managed_challenge" } },
+ },
+ {
+ expression: "ip.src in $trusted_ips",
+ action: "execute",
+ action_parameters: { id: managedRulesetId, overrides: { sensitivity_level: "low" } },
+ },
+ ],
+};
+```
+
+## Defense in Depth
+
+Layered security stack: DDoS + WAF + Rate Limiting + Bot Management.
+
+```typescript
+// Layer 1: DDoS (volumetric attacks)
+await client.zones.rulesets.phases.entrypoint.update("ddos_l7", {
+ zone_id: zoneId,
+ rules: [{ expression: "true", action: "execute", action_parameters: { id: ddosRulesetId, overrides: { sensitivity_level: "medium" } } }],
+});
+
+// Layer 2: WAF (exploit protection)
+await client.zones.rulesets.phases.entrypoint.update("http_request_firewall_managed", {
+ zone_id: zoneId,
+ rules: [{ expression: "true", action: "execute", action_parameters: { id: wafRulesetId } }],
+});
+
+// Layer 3: Rate Limiting (abuse prevention)
+await client.zones.rulesets.phases.entrypoint.update("http_ratelimit", {
+ zone_id: zoneId,
+ rules: [{ expression: "http.request.uri.path eq \"/api/login\"", action: "block", ratelimit: { characteristics: ["ip.src"], period: 60, requests_per_period: 5 } }],
+});
+
+// Layer 4: Bot Management (automation detection)
+await client.zones.rulesets.phases.entrypoint.update("http_request_sbfm", {
+ zone_id: zoneId,
+ rules: [{ expression: "cf.bot_management.score lt 30", action: "managed_challenge" }],
+});
+```
+
+## Cache Strategy for DDoS Mitigation
+
+Exclude query strings from cache key to counter randomized query parameter attacks.
+
+```typescript
+const cacheRule = {
+ expression: "http.request.uri.path matches \"^/api/\"",
+ action: "set_cache_settings",
+ action_parameters: {
+ cache: true,
+ cache_key: { ignore_query_strings_order: true, custom_key: { query_string: { exclude: { all: true } } } },
+ },
+};
+
+await client.zones.rulesets.phases.entrypoint.update("http_request_cache_settings", { zone_id: zoneId, rules: [cacheRule] });
+```
+
+**Rationale**: Attackers randomize query strings (`?random=123456`) to bypass cache. Excluding query params ensures cache hits absorb attack traffic.
+
+See [configuration.md](./configuration.md) for rule structure details.
diff --git a/.agents/skills/cloudflare-deploy/references/do-storage/README.md b/.agents/skills/cloudflare-deploy/references/do-storage/README.md
new file mode 100644
index 0000000..426d2c4
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/do-storage/README.md
@@ -0,0 +1,75 @@
+# Cloudflare Durable Objects Storage
+
+Persistent storage API for Durable Objects with SQLite and KV backends, PITR, and automatic concurrency control.
+
+## Overview
+
+DO Storage provides:
+- SQLite-backed (recommended) or KV-backed
+- SQL API + synchronous/async KV APIs
+- Automatic input/output gates (race-free)
+- 30-day point-in-time recovery (PITR)
+- Transactions and alarms
+
+**Use cases:** Stateful coordination, real-time collaboration, counters, sessions, rate limiters
+
+**Billing:** Charged by request, GB-month storage, and rowsRead/rowsWritten for SQL operations
+
+## Quick Start
+
+```typescript
+export class Counter extends DurableObject {
+ sql: SqlStorage;
+
+ constructor(ctx: DurableObjectState, env: Env) {
+ super(ctx, env);
+ this.sql = ctx.storage.sql;
+ this.sql.exec('CREATE TABLE IF NOT EXISTS data(key TEXT PRIMARY KEY, value INTEGER)');
+ }
+
+ async increment(): Promise {
+ const result = this.sql.exec(
+ 'INSERT INTO data VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = value + 1 RETURNING value',
+ 'counter', 1
+ ).one();
+ return result?.value || 1;
+ }
+}
+```
+
+## Storage Backends
+
+| Backend | Create Method | APIs | PITR |
+|---------|---------------|------|------|
+| SQLite (recommended) | `new_sqlite_classes` | SQL + sync KV + async KV | ✅ |
+| KV (legacy) | `new_classes` | async KV only | ❌ |
+
+## Core APIs
+
+- **SQL API** (`ctx.storage.sql`): Full SQLite with extensions (FTS5, JSON, math)
+- **Sync KV** (`ctx.storage.kv`): Synchronous key-value (SQLite only)
+- **Async KV** (`ctx.storage`): Asynchronous key-value (both backends)
+- **Transactions** (`transactionSync()`, `transaction()`)
+- **PITR** (`getBookmarkForTime()`, `onNextSessionRestoreBookmark()`)
+- **Alarms** (`setAlarm()`, `alarm()` handler)
+
+## Reading Order
+
+**New to DO storage:** configuration.md → api.md → patterns.md → gotchas.md
+**Building features:** patterns.md → api.md → gotchas.md
+**Debugging issues:** gotchas.md → api.md
+**Writing tests:** testing.md
+
+## In This Reference
+
+- [configuration.md](./configuration.md) - wrangler.jsonc migrations, SQLite vs KV setup, RPC binding
+- [api.md](./api.md) - SQL exec/cursors, KV methods, storage options, transactions, alarms, PITR
+- [patterns.md](./patterns.md) - Schema migrations, caching, rate limiting, batch processing, parent-child coordination
+- [gotchas.md](./gotchas.md) - Concurrency gates, INTEGER precision, transaction rules, SQL limits
+- [testing.md](./testing.md) - vitest-pool-workers setup, testing DOs with SQL/alarms/PITR
+
+## See Also
+
+- [durable-objects](../durable-objects/) - DO fundamentals and coordination patterns
+- [workers](../workers/) - Worker runtime for DO stubs
+- [d1](../d1/) - Shared database alternative to per-DO storage
diff --git a/.agents/skills/cloudflare-deploy/references/do-storage/api.md b/.agents/skills/cloudflare-deploy/references/do-storage/api.md
new file mode 100644
index 0000000..e659598
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/do-storage/api.md
@@ -0,0 +1,102 @@
+# DO Storage API Reference
+
+## SQL API
+
+```typescript
+const cursor = this.sql.exec('SELECT * FROM users WHERE email = ?', email);
+for (let row of cursor) {} // Objects: { id, name, email }
+cursor.toArray(); cursor.one(); // Single row (throws if != 1)
+for (let row of cursor.raw()) {} // Arrays: [1, "Alice", "..."]
+
+// Manual iteration
+const iter = cursor[Symbol.iterator]();
+const first = iter.next(); // { value: {...}, done: false }
+
+cursor.columnNames; // ["id", "name", "email"]
+cursor.rowsRead; cursor.rowsWritten; // Billing
+
+type User = { id: number; name: string; email: string };
+const user = this.sql.exec('...', userId).one();
+```
+
+## Sync KV API (SQLite only)
+
+```typescript
+this.ctx.storage.kv.get("counter"); // undefined if missing
+this.ctx.storage.kv.put("counter", 42);
+this.ctx.storage.kv.put("user", { name: "Alice", age: 30 });
+this.ctx.storage.kv.delete("counter"); // true if existed
+
+for (let [key, value] of this.ctx.storage.kv.list()) {}
+
+// List options: start, prefix, reverse, limit
+this.ctx.storage.kv.list({ start: "user:", prefix: "user:", reverse: true, limit: 100 });
+```
+
+## Async KV API (Both backends)
+
+```typescript
+await this.ctx.storage.get("key"); // Single
+await this.ctx.storage.get(["key1", "key2"]); // Multiple (max 128)
+await this.ctx.storage.put("key", value); // Single
+await this.ctx.storage.put({ "key1": "v1", "key2": { nested: true } }); // Multiple (max 128)
+await this.ctx.storage.delete("key");
+await this.ctx.storage.delete(["key1", "key2"]);
+await this.ctx.storage.list({ prefix: "user:", limit: 100 });
+
+// Options: allowConcurrency, noCache, allowUnconfirmed
+await this.ctx.storage.get("key", { allowConcurrency: true, noCache: true });
+await this.ctx.storage.put("key", value, { allowUnconfirmed: true, noCache: true });
+```
+
+### Storage Options
+
+| Option | Methods | Effect | Use Case |
+|--------|---------|--------|----------|
+| `allowConcurrency` | get, list | Skip input gate; allow concurrent requests during read | Read-heavy metrics that don't need strict consistency |
+| `noCache` | get, put, list | Skip in-memory cache; always read from disk | Rarely-accessed data or testing storage directly |
+| `allowUnconfirmed` | put, delete | Return before write confirms (still protected by output gate) | Non-critical writes where latency matters more than confirmation |
+
+## Transactions
+
+```typescript
+// Sync (SQL/sync KV only)
+this.ctx.storage.transactionSync(() => {
+ this.sql.exec('UPDATE accounts SET balance = balance - ? WHERE id = ?', 100, 1);
+ this.sql.exec('UPDATE accounts SET balance = balance + ? WHERE id = ?', 100, 2);
+ return "result";
+});
+
+// Async
+await this.ctx.storage.transaction(async () => {
+ const value = await this.ctx.storage.get("counter");
+ await this.ctx.storage.put("counter", value + 1);
+ if (value > 100) this.ctx.storage.rollback(); // Explicit rollback
+});
+```
+
+## Point-in-Time Recovery
+
+```typescript
+await this.ctx.storage.getCurrentBookmark();
+await this.ctx.storage.getBookmarkForTime(Date.now() - 2 * 24 * 60 * 60 * 1000);
+await this.ctx.storage.onNextSessionRestoreBookmark(bookmark);
+this.ctx.abort(); // Restart to apply; bookmarks lexically comparable (earlier < later)
+```
+
+## Alarms
+
+```typescript
+await this.ctx.storage.setAlarm(Date.now() + 60000); // Timestamp or Date
+await this.ctx.storage.getAlarm();
+await this.ctx.storage.deleteAlarm();
+
+async alarm() { await this.doScheduledWork(); }
+```
+
+## Misc
+
+```typescript
+await this.ctx.storage.deleteAll(); // Atomic for SQLite; alarm NOT included
+this.ctx.storage.sql.databaseSize; // Bytes
+```
diff --git a/.agents/skills/cloudflare-deploy/references/do-storage/configuration.md b/.agents/skills/cloudflare-deploy/references/do-storage/configuration.md
new file mode 100644
index 0000000..18b41bb
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/do-storage/configuration.md
@@ -0,0 +1,112 @@
+# DO Storage Configuration
+
+## SQLite-backed (Recommended)
+
+**wrangler.jsonc:**
+```jsonc
+{
+ "migrations": [
+ {
+ "tag": "v1",
+ "new_sqlite_classes": ["Counter", "Session", "RateLimiter"]
+ }
+ ]
+}
+```
+
+**Migration lifecycle:** Migrations run once per deployment. Existing DO instances get new storage backend on next invocation. Renaming/removing classes requires `renamed_classes` or `deleted_classes` entries.
+
+## KV-backed (Legacy)
+
+**wrangler.jsonc:**
+```jsonc
+{
+ "migrations": [
+ {
+ "tag": "v1",
+ "new_classes": ["OldCounter"]
+ }
+ ]
+}
+```
+
+## TypeScript Setup
+
+```typescript
+export class MyDurableObject extends DurableObject {
+ sql: SqlStorage;
+
+ constructor(ctx: DurableObjectState, env: Env) {
+ super(ctx, env);
+ this.sql = ctx.storage.sql;
+
+ // Initialize schema
+ this.sql.exec(`
+ CREATE TABLE IF NOT EXISTS users(
+ id INTEGER PRIMARY KEY,
+ name TEXT NOT NULL,
+ email TEXT UNIQUE
+ );
+ `);
+ }
+}
+
+// Binding
+interface Env {
+ MY_DO: DurableObjectNamespace;
+}
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const id = env.MY_DO.idFromName('singleton');
+ const stub = env.MY_DO.get(id);
+
+ // Modern RPC: call methods directly (recommended)
+ const result = await stub.someMethod();
+ return Response.json(result);
+
+ // Legacy: forward request (still works)
+ // return stub.fetch(request);
+ }
+}
+```
+
+## CPU Limits
+
+```jsonc
+{
+ "limits": {
+ "cpu_ms": 300000 // 5 minutes (default 30s)
+ }
+}
+```
+
+## Location Control
+
+```typescript
+// Jurisdiction (GDPR/FedRAMP)
+const euNamespace = env.MY_DO.jurisdiction("eu");
+const id = euNamespace.newUniqueId();
+const stub = euNamespace.get(id);
+
+// Location hint (best effort)
+const stub = env.MY_DO.get(id, { locationHint: "enam" });
+// Hints: wnam, enam, sam, weur, eeur, apac, oc, afr, me
+```
+
+## Initialization
+
+```typescript
+export class Counter extends DurableObject {
+ value: number;
+
+ constructor(ctx: DurableObjectState, env: Env) {
+ super(ctx, env);
+
+ // Block concurrent requests during init
+ ctx.blockConcurrencyWhile(async () => {
+ this.value = (await ctx.storage.get("value")) || 0;
+ });
+ }
+}
+```
diff --git a/.agents/skills/cloudflare-deploy/references/do-storage/gotchas.md b/.agents/skills/cloudflare-deploy/references/do-storage/gotchas.md
new file mode 100644
index 0000000..8898f08
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/do-storage/gotchas.md
@@ -0,0 +1,150 @@
+# DO Storage Gotchas & Troubleshooting
+
+## Concurrency Model (CRITICAL)
+
+Durable Objects use **input/output gates** to prevent race conditions:
+
+### Input Gates
+Block new requests during storage reads from CURRENT request:
+
+```typescript
+// SAFE: Input gate active during await
+async increment() {
+ const val = await this.ctx.storage.get("counter"); // Input gate blocks other requests
+ await this.ctx.storage.put("counter", val + 1);
+ return val;
+}
+```
+
+### Output Gates
+Hold response until ALL writes from current request confirm:
+
+```typescript
+// SAFE: Output gate waits for put() to confirm before returning response
+async increment() {
+ const val = await this.ctx.storage.get("counter");
+ this.ctx.storage.put("counter", val + 1); // No await
+ return new Response(String(val)); // Response delayed until write confirms
+}
+```
+
+### Write Coalescing
+Multiple writes to same key = atomic (last write wins):
+
+```typescript
+// SAFE: All three writes coalesce atomically
+this.ctx.storage.put("key", 1);
+this.ctx.storage.put("key", 2);
+this.ctx.storage.put("key", 3); // Final value: 3
+```
+
+### Breaking Gates (DANGER)
+
+**fetch() breaks input/output gates** → allows request interleaving:
+
+```typescript
+// UNSAFE: fetch() allows another request to interleave
+async unsafe() {
+ const val = await this.ctx.storage.get("counter");
+ await fetch("https://api.example.com"); // Gate broken!
+ await this.ctx.storage.put("counter", val + 1); // Race condition possible
+}
+```
+
+**Solution:** Use `blockConcurrencyWhile()` or `transaction()`:
+
+```typescript
+// SAFE: Block concurrent requests explicitly
+async safe() {
+ return await this.ctx.blockConcurrencyWhile(async () => {
+ const val = await this.ctx.storage.get("counter");
+ await fetch("https://api.example.com");
+ await this.ctx.storage.put("counter", val + 1);
+ return val;
+ });
+}
+```
+
+### allowConcurrency Option
+
+Opt out of input gate for reads that don't need protection:
+
+```typescript
+// Allow concurrent reads (no consistency guarantee)
+const val = await this.ctx.storage.get("metrics", { allowConcurrency: true });
+```
+
+## Common Errors
+
+### "Race Condition in Concurrent Calls"
+
+**Cause:** Multiple concurrent storage operations initiated from same event (e.g., `Promise.all()`) are not protected by input gate
+**Solution:** Avoid concurrent storage operations within single event; input gate only serializes requests from different events, not operations within same event
+
+### "Direct SQL Transaction Statements"
+
+**Cause:** Using `BEGIN TRANSACTION` directly instead of transaction methods
+**Solution:** Use `this.ctx.storage.transactionSync()` for sync operations or `this.ctx.storage.transaction()` for async operations
+
+### "Async in transactionSync"
+
+**Cause:** Using async operations inside `transactionSync()` callback
+**Solution:** Use async `transaction()` method instead of `transactionSync()` when async operations needed
+
+### "TypeScript Type Mismatch at Runtime"
+
+**Cause:** Query doesn't return all fields specified in TypeScript type
+**Solution:** Ensure SQL query selects all columns that match the TypeScript type definition
+
+### "Silent Data Corruption with Large IDs"
+
+**Cause:** JavaScript numbers have 53-bit precision; SQLite INTEGER is 64-bit
+**Symptom:** IDs > 9007199254740991 (Number.MAX_SAFE_INTEGER) silently truncate/corrupt
+**Solution:** Store large IDs as TEXT:
+
+```typescript
+// BAD: Snowflake/Twitter IDs will corrupt
+this.sql.exec("CREATE TABLE events(id INTEGER PRIMARY KEY)");
+this.sql.exec("INSERT INTO events VALUES (?)", 1234567890123456789n); // Corrupts!
+
+// GOOD: Store as TEXT
+this.sql.exec("CREATE TABLE events(id TEXT PRIMARY KEY)");
+this.sql.exec("INSERT INTO events VALUES (?)", "1234567890123456789");
+```
+
+### "Alarm Not Deleted with deleteAll()"
+
+**Cause:** `deleteAll()` doesn't delete alarms automatically
+**Solution:** Call `deleteAlarm()` explicitly before `deleteAll()` to remove alarm
+
+### "Slow Performance"
+
+**Cause:** Using async KV API instead of sync API
+**Solution:** Use sync KV API (`ctx.storage.kv`) for better performance with simple key-value operations
+
+### "High Billing from Storage Operations"
+
+**Cause:** Excessive `rowsRead`/`rowsWritten` or unused objects not cleaned up
+**Solution:** Monitor `rowsRead`/`rowsWritten` metrics and ensure unused objects call `deleteAll()`
+
+### "Durable Object Overloaded"
+
+**Cause:** Single DO exceeding ~1K req/sec soft limit
+**Solution:** Shard across multiple DOs with random IDs or other distribution strategy
+
+## Limits
+
+| Limit | Value | Notes |
+|-------|-------|-------|
+| Max columns per table | 100 | SQL limitation |
+| Max string/BLOB per row | 2 MB | SQL limitation |
+| Max row size | 2 MB | SQL limitation |
+| Max SQL statement size | 100 KB | SQL limitation |
+| Max SQL parameters | 100 | SQL limitation |
+| Max LIKE/GLOB pattern | 50 B | SQL limitation |
+| SQLite storage per object | 10 GB | SQLite-backed storage |
+| SQLite key+value size | 2 MB | SQLite-backed storage |
+| KV storage per object | Unlimited | KV-style storage |
+| KV key size | 2 KiB | KV-style storage |
+| KV value size | 128 KiB | KV-style storage |
+| Request throughput | ~1K req/sec | Soft limit per DO |
diff --git a/.agents/skills/cloudflare-deploy/references/do-storage/patterns.md b/.agents/skills/cloudflare-deploy/references/do-storage/patterns.md
new file mode 100644
index 0000000..2885915
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/do-storage/patterns.md
@@ -0,0 +1,182 @@
+# DO Storage Patterns & Best Practices
+
+## Schema Migration
+
+```typescript
+export class MyDurableObject extends DurableObject {
+ constructor(ctx: DurableObjectState, env: Env) {
+ super(ctx, env);
+ this.sql = ctx.storage.sql;
+
+ // Use SQLite's built-in user_version pragma
+ const ver = this.sql.exec("PRAGMA user_version").one()?.user_version || 0;
+
+ if (ver === 0) {
+ this.sql.exec(`CREATE TABLE users(id INTEGER PRIMARY KEY, name TEXT)`);
+ this.sql.exec("PRAGMA user_version = 1");
+ }
+ if (ver === 1) {
+ this.sql.exec(`ALTER TABLE users ADD COLUMN email TEXT`);
+ this.sql.exec("PRAGMA user_version = 2");
+ }
+ }
+}
+```
+
+## In-Memory Caching
+
+```typescript
+export class UserCache extends DurableObject {
+ cache = new Map();
+ async getUser(id: string): Promise {
+ if (this.cache.has(id)) {
+ const cached = this.cache.get(id);
+ if (cached) return cached;
+ }
+ const user = await this.ctx.storage.get(`user:${id}`);
+ if (user) this.cache.set(id, user);
+ return user;
+ }
+ async updateUser(id: string, data: Partial) {
+ const updated = { ...await this.getUser(id), ...data };
+ this.cache.set(id, updated);
+ await this.ctx.storage.put(`user:${id}`, updated);
+ return updated;
+ }
+}
+```
+
+## Rate Limiting
+
+```typescript
+export class RateLimiter extends DurableObject {
+ async checkLimit(key: string, limit: number, window: number): Promise {
+ const now = Date.now();
+ this.sql.exec('DELETE FROM requests WHERE key = ? AND timestamp < ?', key, now - window);
+ const count = this.sql.exec('SELECT COUNT(*) as count FROM requests WHERE key = ?', key).one().count;
+ if (count >= limit) return false;
+ this.sql.exec('INSERT INTO requests (key, timestamp) VALUES (?, ?)', key, now);
+ return true;
+ }
+}
+```
+
+## Batch Processing with Alarms
+
+```typescript
+export class BatchProcessor extends DurableObject {
+ pending: string[] = [];
+ async addItem(item: string) {
+ this.pending.push(item);
+ if (!await this.ctx.storage.getAlarm()) await this.ctx.storage.setAlarm(Date.now() + 5000);
+ }
+ async alarm() {
+ const items = [...this.pending];
+ this.pending = [];
+ this.sql.exec(`INSERT INTO processed_items (item, timestamp) VALUES ${items.map(() => "(?, ?)").join(", ")}`, ...items.flatMap(item => [item, Date.now()]));
+ }
+}
+```
+
+## Initialization Pattern
+
+```typescript
+export class Counter extends DurableObject {
+ value: number;
+ constructor(ctx: DurableObjectState, env: Env) {
+ super(ctx, env);
+ ctx.blockConcurrencyWhile(async () => { this.value = (await ctx.storage.get("value")) || 0; });
+ }
+ async increment() {
+ this.value++;
+ this.ctx.storage.put("value", this.value); // Don't await (output gate protects)
+ return this.value;
+ }
+}
+```
+
+## Safe Counter / Optimized Write
+
+```typescript
+// Input gate blocks other requests
+async getUniqueNumber(): Promise {
+ let val = await this.ctx.storage.get("counter");
+ await this.ctx.storage.put("counter", val + 1);
+ return val;
+}
+
+// No await on write - output gate delays response until write confirms
+async increment(): Promise {
+ let val = await this.ctx.storage.get("counter");
+ this.ctx.storage.put("counter", val + 1);
+ return new Response(String(val));
+}
+```
+
+## Parent-Child Coordination
+
+Hierarchical DO pattern where parent manages child DOs:
+
+```typescript
+// Parent DO coordinates children
+export class Workspace extends DurableObject {
+ async createDocument(name: string): Promise {
+ const docId = crypto.randomUUID();
+ const childId = this.env.DOCUMENT.idFromName(`${this.ctx.id.toString()}:${docId}`);
+ const childStub = this.env.DOCUMENT.get(childId);
+ await childStub.initialize(name);
+
+ // Track child in parent storage
+ this.sql.exec('INSERT INTO documents (id, name, created) VALUES (?, ?, ?)',
+ docId, name, Date.now());
+ return docId;
+ }
+
+ async listDocuments(): Promise {
+ return this.sql.exec('SELECT id FROM documents').toArray().map(r => r.id);
+ }
+}
+
+// Child DO
+export class Document extends DurableObject {
+ async initialize(name: string) {
+ this.sql.exec('CREATE TABLE IF NOT EXISTS content(key TEXT PRIMARY KEY, value TEXT)');
+ this.sql.exec('INSERT INTO content VALUES (?, ?)', 'name', name);
+ }
+}
+```
+
+## Write Coalescing Pattern
+
+Multiple writes to same key coalesce atomically (last write wins):
+
+```typescript
+async updateMetrics(userId: string, actions: Action[]) {
+ // All writes coalesce - no await needed
+ for (const action of actions) {
+ this.ctx.storage.put(`user:${userId}:lastAction`, action.type);
+ this.ctx.storage.put(`user:${userId}:count`,
+ await this.ctx.storage.get(`user:${userId}:count`) + 1);
+ }
+ // Output gate ensures all writes confirm before response
+ return new Response("OK");
+}
+
+// Atomic batch with SQL
+async batchUpdate(items: Item[]) {
+ this.sql.exec('BEGIN');
+ for (const item of items) {
+ this.sql.exec('INSERT OR REPLACE INTO items VALUES (?, ?)', item.id, item.value);
+ }
+ this.sql.exec('COMMIT');
+}
+```
+
+## Cleanup
+
+```typescript
+async cleanup() {
+ await this.ctx.storage.deleteAlarm(); // Separate from deleteAll
+ await this.ctx.storage.deleteAll();
+}
+```
diff --git a/.agents/skills/cloudflare-deploy/references/do-storage/testing.md b/.agents/skills/cloudflare-deploy/references/do-storage/testing.md
new file mode 100644
index 0000000..d348d87
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/do-storage/testing.md
@@ -0,0 +1,183 @@
+# DO Storage Testing
+
+Testing Durable Objects with storage using `vitest-pool-workers`.
+
+## Setup
+
+**vitest.config.ts:**
+```typescript
+import { defineWorkersConfig } from "@cloudflare/vitest-pool-workers/config";
+
+export default defineWorkersConfig({
+ test: {
+ poolOptions: {
+ workers: { wrangler: { configPath: "./wrangler.toml" } }
+ }
+ }
+});
+```
+
+**package.json:** Add `@cloudflare/vitest-pool-workers` and `vitest` to devDependencies
+
+## Basic Testing
+
+```typescript
+import { env, runInDurableObject } from "cloudflare:test";
+import { describe, it, expect } from "vitest";
+
+describe("Counter DO", () => {
+ it("increments counter", async () => {
+ const id = env.COUNTER.idFromName("test");
+ const result = await runInDurableObject(env.COUNTER, id, async (instance, state) => {
+ const val1 = await instance.increment();
+ const val2 = await instance.increment();
+ return { val1, val2 };
+ });
+ expect(result.val1).toBe(1);
+ expect(result.val2).toBe(2);
+ });
+});
+```
+
+## Testing SQL Storage
+
+```typescript
+it("creates and queries users", async () => {
+ const id = env.USER_MANAGER.idFromName("test");
+ await runInDurableObject(env.USER_MANAGER, id, async (instance, state) => {
+ await instance.createUser("alice@example.com", "Alice");
+ const user = await instance.getUser("alice@example.com");
+ expect(user).toEqual({ email: "alice@example.com", name: "Alice" });
+ });
+});
+
+it("handles schema migrations", async () => {
+ const id = env.USER_MANAGER.idFromName("migration-test");
+ await runInDurableObject(env.USER_MANAGER, id, async (instance, state) => {
+ const version = state.storage.sql.exec(
+ "SELECT value FROM _meta WHERE key = 'schema_version'"
+ ).one()?.value;
+ expect(version).toBe("1");
+ });
+});
+```
+
+## Testing Alarms
+
+```typescript
+import { runDurableObjectAlarm } from "cloudflare:test";
+
+it("processes batch on alarm", async () => {
+ const id = env.BATCH_PROCESSOR.idFromName("test");
+
+ // Add items
+ await runInDurableObject(env.BATCH_PROCESSOR, id, async (instance) => {
+ await instance.addItem("item1");
+ await instance.addItem("item2");
+ });
+
+ // Trigger alarm
+ await runDurableObjectAlarm(env.BATCH_PROCESSOR, id);
+
+ // Verify processed
+ await runInDurableObject(env.BATCH_PROCESSOR, id, async (instance, state) => {
+ const count = state.storage.sql.exec(
+ "SELECT COUNT(*) as count FROM processed_items"
+ ).one().count;
+ expect(count).toBe(2);
+ });
+});
+```
+
+## Testing Concurrency
+
+```typescript
+it("handles concurrent increments safely", async () => {
+ const id = env.COUNTER.idFromName("concurrent-test");
+
+ // Parallel increments
+ const results = await Promise.all([
+ runInDurableObject(env.COUNTER, id, (i) => i.increment()),
+ runInDurableObject(env.COUNTER, id, (i) => i.increment()),
+ runInDurableObject(env.COUNTER, id, (i) => i.increment())
+ ]);
+
+ // All should get unique values
+ expect(new Set(results).size).toBe(3);
+ expect(Math.max(...results)).toBe(3);
+});
+```
+
+## Test Isolation
+
+```typescript
+// Per-test unique IDs
+let testId: string;
+beforeEach(() => { testId = crypto.randomUUID(); });
+
+it("isolated test", async () => {
+ const id = env.MY_DO.idFromName(testId);
+ // Uses unique DO instance
+});
+
+// Cleanup pattern
+it("with cleanup", async () => {
+ const id = env.MY_DO.idFromName("cleanup-test");
+ try {
+ await runInDurableObject(env.MY_DO, id, async (instance) => {});
+ } finally {
+ await runInDurableObject(env.MY_DO, id, async (instance, state) => {
+ await state.storage.deleteAll();
+ });
+ }
+});
+```
+
+## Testing PITR
+
+```typescript
+it("restores from bookmark", async () => {
+ const id = env.MY_DO.idFromName("pitr-test");
+
+ // Create checkpoint
+ const bookmark = await runInDurableObject(env.MY_DO, id, async (instance, state) => {
+ await state.storage.put("value", 1);
+ return await state.storage.getCurrentBookmark();
+ });
+
+ // Modify and restore
+ await runInDurableObject(env.MY_DO, id, async (instance, state) => {
+ await state.storage.put("value", 2);
+ await state.storage.onNextSessionRestoreBookmark(bookmark);
+ state.abort();
+ });
+
+ // Verify restored
+ await runInDurableObject(env.MY_DO, id, async (instance, state) => {
+ const value = await state.storage.get("value");
+ expect(value).toBe(1);
+ });
+});
+```
+
+## Testing Transactions
+
+```typescript
+it("rolls back on error", async () => {
+ const id = env.BANK.idFromName("transaction-test");
+
+ await runInDurableObject(env.BANK, id, async (instance, state) => {
+ await state.storage.put("balance", 100);
+
+ await expect(
+ state.storage.transaction(async () => {
+ await state.storage.put("balance", 50);
+ throw new Error("Cancel");
+ })
+ ).rejects.toThrow("Cancel");
+
+ const balance = await state.storage.get("balance");
+ expect(balance).toBe(100); // Rolled back
+ });
+});
+```
diff --git a/.agents/skills/cloudflare-deploy/references/durable-objects/README.md b/.agents/skills/cloudflare-deploy/references/durable-objects/README.md
new file mode 100644
index 0000000..8e96558
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/durable-objects/README.md
@@ -0,0 +1,185 @@
+# Cloudflare Durable Objects
+
+Expert guidance for building stateful applications with Cloudflare Durable Objects.
+
+## Reading Order
+
+1. **First time?** Read this overview + Quick Start
+2. **Setting up?** See [Configuration](./configuration.md)
+3. **Building features?** Use decision trees below → [Patterns](./patterns.md)
+4. **Debugging issues?** Check [Gotchas](./gotchas.md)
+5. **Deep dive?** [API](./api.md) and [DO Storage](../do-storage/README.md)
+
+## Overview
+
+Durable Objects combine compute with storage in globally-unique, strongly-consistent packages:
+- **Globally unique instances**: Each DO has unique ID for multi-client coordination
+- **Co-located storage**: Fast, strongly-consistent storage with compute
+- **Automatic placement**: Objects spawn near first request location
+- **Stateful serverless**: In-memory state + persistent storage
+- **Single-threaded**: Serial request processing (no race conditions)
+
+## Rules of Durable Objects
+
+Critical rules preventing most production issues:
+
+1. **One alarm per DO** - Schedule multiple events via queue pattern
+2. **~1K req/s per DO max** - Shard for higher throughput
+3. **Constructor runs every wake** - Keep initialization light; use lazy loading
+4. **Hibernation clears memory** - In-memory state lost; persist critical data
+5. **Use `ctx.waitUntil()` for cleanup** - Ensures completion after response sent
+6. **No setTimeout for persistence** - Use `setAlarm()` for reliable scheduling
+
+## Core Concepts
+
+### Class Structure
+All DOs extend `DurableObject` base class with constructor receiving `DurableObjectState` (storage, WebSockets, alarms) and `Env` (bindings).
+
+### Lifecycle States
+
+```
+[Not Created] → [Active] ⇄ [Hibernated] → [Evicted]
+ ↓
+ [Destroyed]
+```
+
+- **Not Created**: DO ID exists but instance never spawned
+- **Active**: Processing requests, in-memory state valid, billed per GB-hour
+- **Hibernated**: WebSocket connections open but zero compute, zero cost
+- **Evicted**: Removed from memory; next request triggers cold start
+- **Destroyed**: Data deleted via migration or manual deletion
+
+### Accessing from Workers
+Workers use bindings to get stubs, then call RPC methods directly (recommended) or use fetch handler (legacy).
+
+**RPC vs fetch() decision:**
+```
+├─ New project + compat ≥2024-04-03 → RPC (type-safe, simpler)
+├─ Need HTTP semantics (headers, status) → fetch()
+├─ Proxying requests to DO → fetch()
+└─ Legacy compatibility → fetch()
+```
+
+See [Patterns: RPC vs fetch()](./patterns.md) for examples.
+
+### ID Generation
+- `idFromName()`: Deterministic, named coordination (rate limiting, locks)
+- `newUniqueId()`: Random IDs for sharding high-throughput workloads
+- `idFromString()`: Derive from existing IDs
+- Jurisdiction option: Data locality compliance
+
+### Storage Options
+
+**Which storage API?**
+```
+├─ Structured data, relations, transactions → SQLite (recommended)
+├─ Simple KV on SQLite DO → ctx.storage.kv (sync KV)
+└─ Legacy KV-only DO → ctx.storage (async KV)
+```
+
+- **SQLite** (recommended): Structured data, transactions, 10GB/DO
+- **Synchronous KV API**: Simple key-value on SQLite objects
+- **Asynchronous KV API**: Legacy/advanced use cases
+
+See [DO Storage](../do-storage/README.md) for deep dive.
+
+### Special Features
+- **Alarms**: Schedule future execution per-DO (1 per DO - use queue pattern for multiple)
+- **WebSocket Hibernation**: Zero-cost idle connections (memory cleared on hibernation)
+- **Point-in-Time Recovery**: Restore to any point in 30 days (SQLite only)
+
+## Quick Start
+
+```typescript
+import { DurableObject } from "cloudflare:workers";
+
+export class Counter extends DurableObject {
+ async increment(): Promise {
+ const result = this.ctx.storage.sql.exec(
+ `INSERT INTO counters (id, value) VALUES (1, 1)
+ ON CONFLICT(id) DO UPDATE SET value = value + 1
+ RETURNING value`
+ ).one();
+ return result.value;
+ }
+}
+
+// Worker access
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const id = env.COUNTER.idFromName("global");
+ const stub = env.COUNTER.get(id);
+ const count = await stub.increment();
+ return new Response(`Count: ${count}`);
+ }
+};
+```
+
+## Decision Trees
+
+### What do you need?
+
+```
+├─ Coordinate requests (rate limit, lock, session)
+│ → idFromName(identifier) → [Patterns: Rate Limiting/Locks](./patterns.md)
+│
+├─ High throughput (>1K req/s)
+│ → Sharding with newUniqueId() or hash → [Patterns: Sharding](./patterns.md)
+│
+├─ Real-time updates (WebSocket, chat, collab)
+│ → WebSocket hibernation + room pattern → [Patterns: Real-time](./patterns.md)
+│
+├─ Background work (cleanup, notifications, scheduled tasks)
+│ → Alarms + queue pattern (1 alarm/DO) → [Patterns: Multiple Events](./patterns.md)
+│
+└─ User sessions with expiration
+ → Session pattern + alarm cleanup → [Patterns: Session Management](./patterns.md)
+```
+
+### Which access pattern?
+
+```
+├─ New project + typed methods → RPC (compat ≥2024-04-03)
+├─ Need HTTP semantics → fetch()
+├─ Proxying to DO → fetch()
+└─ Legacy compat → fetch()
+```
+
+See [Patterns: RPC vs fetch()](./patterns.md) for examples.
+
+### Which storage?
+
+```
+├─ Structured data, SQL queries, transactions → SQLite (recommended)
+├─ Simple KV on SQLite DO → ctx.storage.kv (sync API)
+└─ Legacy KV-only DO → ctx.storage (async API)
+```
+
+See [DO Storage](../do-storage/README.md) for complete guide.
+
+## Essential Commands
+
+```bash
+npx wrangler dev # Local dev with DOs
+npx wrangler dev --remote # Test against prod DOs
+npx wrangler deploy # Deploy + auto-apply migrations
+```
+
+## Resources
+
+**Docs**: https://developers.cloudflare.com/durable-objects/
+**API Reference**: https://developers.cloudflare.com/durable-objects/api/
+**Examples**: https://developers.cloudflare.com/durable-objects/examples/
+
+## In This Reference
+
+- **[Configuration](./configuration.md)** - wrangler.jsonc setup, migrations, bindings, environments
+- **[API](./api.md)** - Class structure, ctx methods, alarms, WebSocket hibernation
+- **[Patterns](./patterns.md)** - Sharding, rate limiting, locks, real-time, sessions
+- **[Gotchas](./gotchas.md)** - Limits, hibernation caveats, common errors
+
+## See Also
+
+- **[DO Storage](../do-storage/README.md)** - SQLite, KV, transactions (detailed storage guide)
+- **[Workers](../workers/README.md)** - Core Workers runtime features
+- **[WebSockets](../websockets/README.md)** - WebSocket APIs and patterns
diff --git a/.agents/skills/cloudflare-deploy/references/durable-objects/api.md b/.agents/skills/cloudflare-deploy/references/durable-objects/api.md
new file mode 100644
index 0000000..89c7e4d
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/durable-objects/api.md
@@ -0,0 +1,187 @@
+# Durable Objects API
+
+## Class Structure
+
+```typescript
+import { DurableObject } from "cloudflare:workers";
+
+export class MyDO extends DurableObject {
+ constructor(ctx: DurableObjectState, env: Env) {
+ super(ctx, env);
+ // Runs on EVERY wake - keep light!
+ }
+
+ // RPC methods (called directly from worker)
+ async myMethod(arg: string): Promise { return arg; }
+
+ // fetch handler (legacy/HTTP semantics)
+ async fetch(req: Request): Promise { /* ... */ }
+
+ // Lifecycle handlers
+ async alarm() { /* alarm fired */ }
+ async webSocketMessage(ws: WebSocket, msg: string | ArrayBuffer) { /* ... */ }
+ async webSocketClose(ws: WebSocket, code: number, reason: string, wasClean: boolean) { /* ... */ }
+ async webSocketError(ws: WebSocket, error: unknown) { /* ... */ }
+}
+```
+
+## DurableObjectState Context Methods
+
+### Concurrency Control
+
+```typescript
+// Complete work after response sent (e.g., cleanup, logging)
+this.ctx.waitUntil(promise: Promise): void
+
+// Critical section - blocks all other requests until complete
+await this.ctx.blockConcurrencyWhile(async () => {
+ // No other requests processed during this block
+ // Use for initialization or critical operations
+})
+```
+
+**When to use:**
+- `waitUntil()`: Background cleanup, logging, non-critical work after response
+- `blockConcurrencyWhile()`: First-time init, schema migration, critical state setup
+
+### Lifecycle
+
+```typescript
+this.ctx.id // DurableObjectId of this instance
+this.ctx.abort() // Force eviction (use after PITR restore to reload state)
+```
+
+### Storage Access
+
+```typescript
+this.ctx.storage.sql // SQLite API (recommended)
+this.ctx.storage.kv // Sync KV API (SQLite DOs only)
+this.ctx.storage // Async KV API (legacy/KV-only DOs)
+```
+
+See **[DO Storage](../do-storage/README.md)** for complete storage API reference.
+
+### WebSocket Management
+
+```typescript
+this.ctx.acceptWebSocket(ws: WebSocket, tags?: string[]) // Enable hibernation
+this.ctx.getWebSockets(tag?: string): WebSocket[] // Get by tag or all
+this.ctx.getTags(ws: WebSocket): string[] // Get tags for connection
+```
+
+### Alarms
+
+```typescript
+await this.ctx.storage.setAlarm(timestamp: number | Date) // Schedule (overwrites existing)
+await this.ctx.storage.getAlarm(): number | null // Get next alarm time
+await this.ctx.storage.deleteAlarm(): void // Cancel alarm
+```
+
+**Limit:** 1 alarm per DO. Use queue pattern for multiple events (see [Patterns](./patterns.md)).
+
+## Storage APIs
+
+For detailed storage documentation including SQLite queries, KV operations, transactions, and Point-in-Time Recovery, see **[DO Storage](../do-storage/README.md)**.
+
+Quick reference:
+
+```typescript
+// SQLite (recommended)
+this.ctx.storage.sql.exec("SELECT * FROM users WHERE id = ?", userId).one()
+
+// Sync KV (SQLite DOs only)
+this.ctx.storage.kv.get("key")
+
+// Async KV (legacy)
+await this.ctx.storage.get("key")
+```
+
+## Alarms
+
+Schedule future work that survives eviction:
+
+```typescript
+// Set alarm (overwrites any existing alarm)
+await this.ctx.storage.setAlarm(Date.now() + 3600000) // 1 hour from now
+await this.ctx.storage.setAlarm(new Date("2026-02-01")) // Absolute time
+
+// Check next alarm
+const nextRun = await this.ctx.storage.getAlarm() // null if none
+
+// Cancel alarm
+await this.ctx.storage.deleteAlarm()
+
+// Handler called when alarm fires
+async alarm() {
+ // Runs once alarm triggers
+ // DO wakes from hibernation if needed
+ // Use for cleanup, notifications, scheduled tasks
+}
+```
+
+**Limitations:**
+- 1 alarm per DO maximum
+- Overwrites previous alarm when set
+- Use queue pattern for multiple scheduled events (see [Patterns](./patterns.md))
+
+**Reliability:**
+- Alarms survive DO eviction/restart
+- Cloudflare retries failed alarms automatically
+- Not guaranteed exactly-once (handle idempotently)
+
+## WebSocket Hibernation
+
+Hibernation allows DOs with open WebSocket connections to consume zero compute/memory until message arrives.
+
+```typescript
+async fetch(req: Request): Promise {
+ const [client, server] = Object.values(new WebSocketPair());
+ this.ctx.acceptWebSocket(server, ["room:123"]); // Tags for filtering
+ server.serializeAttachment({ userId: "abc" }); // Persisted metadata
+ return new Response(null, { status: 101, webSocket: client });
+}
+
+// Called when message arrives (DO wakes from hibernation)
+async webSocketMessage(ws: WebSocket, msg: string | ArrayBuffer) {
+ const data = ws.deserializeAttachment(); // Retrieve metadata
+ for (const c of this.ctx.getWebSockets("room:123")) c.send(msg);
+}
+
+// Called on close (optional handler)
+async webSocketClose(ws: WebSocket, code: number, reason: string, wasClean: boolean) {
+ // Cleanup logic, remove from lists, etc.
+}
+
+// Called on error (optional handler)
+async webSocketError(ws: WebSocket, error: unknown) {
+ console.error("WebSocket error:", error);
+ // Handle error, close connection, etc.
+}
+```
+
+**Key concepts:**
+- **Auto-hibernation:** DO hibernates when no active requests/alarms
+- **Zero cost:** Hibernated DOs incur no charges while preserving connections
+- **Memory cleared:** All in-memory state lost on hibernation
+- **Attachment persistence:** Use `serializeAttachment()` for per-connection metadata that survives hibernation
+- **Tags for filtering:** Group connections by room/channel/user for targeted broadcasts
+
+**Handler lifecycle:**
+- `webSocketMessage`: DO wakes, processes message, may hibernate after
+- `webSocketClose`: Called when client closes (optional - implement for cleanup)
+- `webSocketError`: Called on connection error (optional - implement for error handling)
+
+**Metadata persistence:**
+```typescript
+// Store connection metadata (survives hibernation)
+ws.serializeAttachment({ userId: "abc", room: "lobby" })
+
+// Retrieve after hibernation
+const { userId, room } = ws.deserializeAttachment()
+```
+
+## See Also
+
+- **[DO Storage](../do-storage/README.md)** - Complete storage API reference
+- **[Patterns](./patterns.md)** - Real-world usage patterns
+- **[Gotchas](./gotchas.md)** - Hibernation caveats and limits
diff --git a/.agents/skills/cloudflare-deploy/references/durable-objects/configuration.md b/.agents/skills/cloudflare-deploy/references/durable-objects/configuration.md
new file mode 100644
index 0000000..651599a
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/durable-objects/configuration.md
@@ -0,0 +1,160 @@
+# Durable Objects Configuration
+
+## Basic Setup
+
+```jsonc
+{
+ "name": "my-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-01-01", // Use latest; ≥2024-04-03 for RPC
+ "durable_objects": {
+ "bindings": [
+ {
+ "name": "MY_DO", // Env binding name
+ "class_name": "MyDO" // Class exported from this worker
+ },
+ {
+ "name": "EXTERNAL", // Access DO from another worker
+ "class_name": "ExternalDO",
+ "script_name": "other-worker"
+ }
+ ]
+ },
+ "migrations": [
+ { "tag": "v1", "new_sqlite_classes": ["MyDO"] } // Prefer SQLite
+ ]
+}
+```
+
+## Binding Options
+
+```jsonc
+{
+ "name": "BINDING_NAME",
+ "class_name": "ClassName",
+ "script_name": "other-worker", // Optional: external DO
+ "environment": "production" // Optional: isolate by env
+}
+```
+
+## Jurisdiction (Data Locality)
+
+Specify jurisdiction at ID creation for data residency compliance:
+
+```typescript
+// EU data residency
+const id = env.MY_DO.idFromName("user:123", { jurisdiction: "eu" })
+
+// Available jurisdictions
+const jurisdictions = ["eu", "fedramp"] // More may be added
+
+// All operations on this DO stay within jurisdiction
+const stub = env.MY_DO.get(id)
+await stub.someMethod() // Data stays in EU
+```
+
+**Key points:**
+- Set at ID creation time, immutable afterward
+- DO instance physically located within jurisdiction
+- Storage and compute guaranteed within boundary
+- Use for GDPR, FedRAMP, other compliance requirements
+- No cross-jurisdiction access (requests fail if DO in different jurisdiction)
+
+## Migrations
+
+```jsonc
+{
+ "migrations": [
+ { "tag": "v1", "new_sqlite_classes": ["MyDO"] }, // Create SQLite (recommended)
+ // { "tag": "v1", "new_classes": ["MyDO"] }, // Create KV (paid only)
+ { "tag": "v2", "renamed_classes": [{ "from": "Old", "to": "New" }] },
+ { "tag": "v3", "transferred_classes": [{ "from": "Src", "from_script": "old", "to": "Dest" }] },
+ { "tag": "v4", "deleted_classes": ["Obsolete"] } // Destroys ALL data!
+ ]
+}
+```
+
+**Migration rules:**
+- Tags must be unique and sequential (v1, v2, v3...)
+- No rollback supported (test with `--dry-run` first)
+- Auto-applied on deploy
+- `new_sqlite_classes` recommended over `new_classes` (SQLite vs KV)
+- `deleted_classes` immediately destroys ALL data (irreversible)
+
+## Environment Isolation
+
+Separate DO namespaces per environment (staging/production have distinct object instances):
+
+```jsonc
+{
+ "durable_objects": {
+ "bindings": [{ "name": "MY_DO", "class_name": "MyDO" }]
+ },
+ "env": {
+ "production": {
+ "durable_objects": {
+ "bindings": [
+ { "name": "MY_DO", "class_name": "MyDO", "environment": "production" }
+ ]
+ }
+ }
+ }
+}
+```
+
+Deploy: `npx wrangler deploy --env production`
+
+## Limits & Settings
+
+```jsonc
+{
+ "limits": {
+ "cpu_ms": 300000 // Max CPU time: 30s default, 300s max
+ }
+}
+```
+
+See [Gotchas](./gotchas.md) for complete limits table.
+
+## Types
+
+```typescript
+import { DurableObject } from "cloudflare:workers";
+
+interface Env {
+ MY_DO: DurableObjectNamespace;
+}
+
+export class MyDO extends DurableObject {}
+
+type DurableObjectNamespace = {
+ newUniqueId(options?: { jurisdiction?: string }): DurableObjectId;
+ idFromName(name: string): DurableObjectId;
+ idFromString(id: string): DurableObjectId;
+ get(id: DurableObjectId): DurableObjectStub;
+};
+```
+
+## Commands
+
+```bash
+# Development
+npx wrangler dev # Local dev
+npx wrangler dev --remote # Test against production DOs
+
+# Deployment
+npx wrangler deploy # Deploy + auto-apply migrations
+npx wrangler deploy --dry-run # Validate migrations without deploying
+npx wrangler deploy --env production
+
+# Management
+npx wrangler durable-objects list # List namespaces
+npx wrangler durable-objects info # Inspect specific DO
+npx wrangler durable-objects delete # Delete DO (destroys data)
+```
+
+## See Also
+
+- **[API](./api.md)** - DurableObjectState and lifecycle handlers
+- **[Patterns](./patterns.md)** - Multi-environment patterns
+- **[Gotchas](./gotchas.md)** - Migration caveats, limits
diff --git a/.agents/skills/cloudflare-deploy/references/durable-objects/gotchas.md b/.agents/skills/cloudflare-deploy/references/durable-objects/gotchas.md
new file mode 100644
index 0000000..72495f9
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/durable-objects/gotchas.md
@@ -0,0 +1,197 @@
+# Durable Objects Gotchas
+
+## Common Errors
+
+### "Hibernation Cleared My In-Memory State"
+
+**Problem:** Variables lost after hibernation
+**Cause:** DO auto-hibernates when idle; in-memory state not persisted
+**Solution:** Use `ctx.storage` for critical data, `ws.serializeAttachment()` for per-connection metadata
+
+```typescript
+// ❌ Wrong - lost on hibernation
+private userCount = 0;
+async webSocketMessage(ws: WebSocket, msg: string) {
+ this.userCount++; // Lost!
+}
+
+// ✅ Right - persisted
+async webSocketMessage(ws: WebSocket, msg: string) {
+ const count = this.ctx.storage.kv.get("userCount") || 0;
+ this.ctx.storage.kv.put("userCount", count + 1);
+}
+```
+
+### "setTimeout Didn't Fire After Restart"
+
+**Problem:** Scheduled work lost on eviction
+**Cause:** `setTimeout` in-memory only; eviction clears timers
+**Solution:** Use `ctx.storage.setAlarm()` for reliable scheduling
+
+```typescript
+// ❌ Wrong - lost on eviction
+setTimeout(() => this.cleanup(), 3600000);
+
+// ✅ Right - survives eviction
+await this.ctx.storage.setAlarm(Date.now() + 3600000);
+async alarm() { await this.cleanup(); }
+```
+
+### "Constructor Runs on Every Wake"
+
+**Problem:** Expensive init logic slows all requests
+**Cause:** Constructor runs on every wake (first request after eviction OR after hibernation)
+**Solution:** Lazy initialization or cache in storage
+
+**Critical understanding:** Constructor runs in two scenarios:
+1. **Cold start** - DO evicted from memory, first request creates new instance
+2. **Wake from hibernation** - DO with WebSockets hibernated, message/alarm wakes it
+
+```typescript
+// ❌ Wrong - expensive on every wake
+constructor(ctx: DurableObjectState, env: Env) {
+ super(ctx, env);
+ this.heavyData = this.loadExpensiveData(); // Slow!
+}
+
+// ✅ Right - lazy load
+private heavyData?: HeavyData;
+private getHeavyData() {
+ if (!this.heavyData) this.heavyData = this.loadExpensiveData();
+ return this.heavyData;
+}
+```
+
+### "Durable Object Overloaded (503 errors)"
+
+**Problem:** 503 errors under load
+**Cause:** Single DO exceeding ~1K req/s throughput limit
+**Solution:** Shard across multiple DOs (see [Patterns: Sharding](./patterns.md))
+
+### "Storage Quota Exceeded (Write failures)"
+
+**Problem:** Write operations failing
+**Cause:** DO storage exceeding 10GB limit or account quota
+**Solution:** Cleanup with alarms, use `deleteAll()` for old data, upgrade plan
+
+### "CPU Time Exceeded (Terminated)"
+
+**Problem:** Request terminated mid-execution
+**Cause:** Processing exceeding 30s CPU time default limit
+**Solution:** Increase `limits.cpu_ms` in wrangler.jsonc (max 300s) or chunk work
+
+### "WebSockets Disconnect on Eviction"
+
+**Problem:** Connections drop unexpectedly
+**Cause:** DO evicted from memory without hibernation API
+**Solution:** Use WebSocket hibernation handlers + client reconnection logic
+
+### "Migration Failed (Deploy error)"
+
+**Cause:** Non-unique tags, non-sequential tags, or invalid class names in migration
+**Solution:** Check tag uniqueness/sequential ordering and verify class names are correct
+
+### "RPC Method Not Found"
+
+**Cause:** compatibility_date < 2024-04-03 preventing RPC usage
+**Solution:** Update compatibility_date to >= 2024-04-03 or use fetch() instead of RPC
+
+### "Only One Alarm Allowed"
+
+**Cause:** Need multiple scheduled tasks but only one alarm supported per DO
+**Solution:** Use event queue pattern to schedule multiple tasks with single alarm
+
+### "Race Condition Despite Single-Threading"
+
+**Problem:** Concurrent requests see inconsistent state
+**Cause:** Async operations allow request interleaving (await = yield point)
+**Solution:** Use `blockConcurrencyWhile()` for critical sections or atomic storage ops
+
+```typescript
+// ❌ Wrong - race condition
+async incrementCounter() {
+ const count = await this.ctx.storage.get("count") || 0;
+ // ⚠️ Another request could execute here during await
+ await this.ctx.storage.put("count", count + 1);
+}
+
+// ✅ Right - atomic operation
+async incrementCounter() {
+ return this.ctx.storage.sql.exec(
+ "INSERT INTO counters (id, value) VALUES (1, 1) ON CONFLICT(id) DO UPDATE SET value = value + 1 RETURNING value"
+ ).one().value;
+}
+
+// ✅ Right - explicit locking
+async criticalOperation() {
+ await this.ctx.blockConcurrencyWhile(async () => {
+ const count = await this.ctx.storage.get("count") || 0;
+ await this.ctx.storage.put("count", count + 1);
+ });
+}
+```
+
+### "Migration Rollback Not Supported"
+
+**Cause:** Attempting to rollback a migration after deployment
+**Solution:** Test with `--dry-run` before deploying; migrations cannot be rolled back
+
+### "deleted_classes Destroys Data"
+
+**Problem:** Migration deleted all data
+**Cause:** `deleted_classes` migration immediately destroys all DO instances and data
+**Solution:** Test with `--dry-run`; use `transferred_classes` to preserve data during moves
+
+### "Cold Starts Are Slow"
+
+**Problem:** First request after eviction takes longer
+**Cause:** DO constructor + initial storage access on cold start
+**Solution:** Expected behavior; optimize constructor, use connection pooling in clients, consider warming strategy for critical DOs
+
+```typescript
+// Warming strategy (periodically ping critical DOs)
+export default {
+ async scheduled(event: ScheduledEvent, env: Env) {
+ const criticalIds = ["auth", "sessions", "locks"];
+ await Promise.all(criticalIds.map(name => {
+ const id = env.MY_DO.idFromName(name);
+ const stub = env.MY_DO.get(id);
+ return stub.ping(); // Keep warm
+ }));
+ }
+};
+```
+
+## Limits
+
+| Limit | Free | Paid | Notes |
+|-------|------|------|-------|
+| SQLite storage per DO | 10 GB | 10 GB | Per Durable Object instance |
+| SQLite total storage | 5 GB | Unlimited | Account-wide quota |
+| Key+value size | 2 MB | 2 MB | Single KV pair (SQLite/async) |
+| CPU time default | 30s | 30s | Per request; configurable |
+| CPU time max | 300s | 300s | Set via `limits.cpu_ms` |
+| DO classes | 100 | 500 | Distinct DO class definitions |
+| SQL columns | 100 | 100 | Per table |
+| SQL statement size | 100 KB | 100 KB | Max SQL query size |
+| WebSocket message size | 32 MiB | 32 MiB | Per message |
+| Request throughput | ~1K req/s | ~1K req/s | Per DO (soft limit - shard for more) |
+| Alarms per DO | 1 | 1 | Use queue pattern for multiple events |
+| Total DOs | Unlimited | Unlimited | Create as many instances as needed |
+| WebSockets | Unlimited | Unlimited | Within 128MB memory limit per DO |
+| Memory per DO | 128 MB | 128 MB | In-memory state + WebSocket buffers |
+
+## Hibernation Caveats
+
+1. **Memory cleared** - All in-memory variables lost; reconstruct from storage or `deserializeAttachment()`
+2. **Constructor reruns** - Runs on wake; avoid expensive operations, use lazy initialization
+3. **No guarantees** - DO may evict instead of hibernate; design for both
+4. **Attachment limit** - `serializeAttachment()` data must be JSON-serializable, keep small
+5. **Alarm wakes DO** - Alarm prevents hibernation until handler completes
+6. **WebSocket state not automatic** - Must explicitly persist with `serializeAttachment()` or storage
+
+## See Also
+
+- **[Patterns](./patterns.md)** - Workarounds for common limitations
+- **[API](./api.md)** - Storage limits and quotas
+- **[Configuration](./configuration.md)** - Setting CPU limits
diff --git a/.agents/skills/cloudflare-deploy/references/durable-objects/patterns.md b/.agents/skills/cloudflare-deploy/references/durable-objects/patterns.md
new file mode 100644
index 0000000..d91f382
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/durable-objects/patterns.md
@@ -0,0 +1,201 @@
+# Durable Objects Patterns
+
+## When to Use Which Pattern
+
+| Need | Pattern | ID Strategy |
+|------|---------|-------------|
+| Rate limit per user/IP | Rate Limiting | `idFromName(identifier)` |
+| Mutual exclusion | Distributed Lock | `idFromName(resource)` |
+| >1K req/s throughput | Sharding | `newUniqueId()` or hash |
+| Real-time updates | WebSocket Collab | `idFromName(room)` |
+| User sessions | Session Management | `idFromName(sessionId)` |
+| Background cleanup | Alarm-based | Any |
+
+## RPC vs fetch()
+
+**RPC** (compat ≥2024-04-03): Type-safe, simpler, default for new projects
+**fetch()**: Legacy compat, HTTP semantics, proxying
+
+```typescript
+const count = await stub.increment(); // RPC
+const count = await (await stub.fetch(req)).json(); // fetch()
+```
+
+## Sharding (High Throughput)
+
+Single DO ~1K req/s max. Shard for higher throughput:
+
+```typescript
+export default {
+ async fetch(req: Request, env: Env): Promise {
+ const userId = new URL(req.url).searchParams.get("user");
+ const hash = hashCode(userId) % 100; // 100 shards
+ const id = env.COUNTER.idFromName(`shard:${hash}`);
+ return env.COUNTER.get(id).fetch(req);
+ }
+};
+
+function hashCode(str: string): number {
+ let hash = 0;
+ for (let i = 0; i < str.length; i++) hash = ((hash << 5) - hash) + str.charCodeAt(i);
+ return Math.abs(hash);
+}
+```
+
+**Decisions:**
+- **Shard count**: 10-1000 typical (start with 100, measure, adjust)
+- **Shard key**: User ID, IP, session - must distribute evenly (use hash)
+- **Aggregation**: Coordinator DO or external system (D1, R2)
+
+## Rate Limiting
+
+```typescript
+async checkLimit(key: string, limit: number, windowMs: number): Promise {
+ const req = this.ctx.storage.sql.exec("SELECT COUNT(*) as count FROM requests WHERE key = ? AND timestamp > ?", key, Date.now() - windowMs).one();
+ if (req.count >= limit) return false;
+ this.ctx.storage.sql.exec("INSERT INTO requests (key, timestamp) VALUES (?, ?)", key, Date.now());
+ return true;
+}
+```
+
+## Distributed Lock
+
+```typescript
+private held = false;
+async acquire(timeoutMs = 5000): Promise {
+ if (this.held) return false;
+ this.held = true;
+ await this.ctx.storage.setAlarm(Date.now() + timeoutMs);
+ return true;
+}
+async release() { this.held = false; await this.ctx.storage.deleteAlarm(); }
+async alarm() { this.held = false; } // Auto-release on timeout
+```
+
+## Hibernation-Aware Pattern
+
+Preserve state across hibernation:
+
+```typescript
+async fetch(req: Request): Promise {
+ const [client, server] = Object.values(new WebSocketPair());
+ const userId = new URL(req.url).searchParams.get("user");
+ server.serializeAttachment({ userId }); // Survives hibernation
+ this.ctx.acceptWebSocket(server, ["room:lobby"]);
+ server.send(JSON.stringify({ type: "init", state: this.ctx.storage.kv.get("state") }));
+ return new Response(null, { status: 101, webSocket: client });
+}
+
+async webSocketMessage(ws: WebSocket, msg: string) {
+ const { userId } = ws.deserializeAttachment(); // Retrieve after wake
+ const state = this.ctx.storage.kv.get("state") || {};
+ state[userId] = JSON.parse(msg);
+ this.ctx.storage.kv.put("state", state);
+ for (const c of this.ctx.getWebSockets("room:lobby")) c.send(msg);
+}
+```
+
+## Real-time Collaboration
+
+Broadcast updates to all connected clients:
+
+```typescript
+async webSocketMessage(ws: WebSocket, msg: string) {
+ const data = JSON.parse(msg);
+ this.ctx.storage.kv.put("doc", data.content); // Persist
+ for (const c of this.ctx.getWebSockets()) if (c !== ws) c.send(msg); // Broadcast
+}
+```
+
+### WebSocket Reconnection
+
+**Client-side** (exponential backoff):
+```typescript
+class ResilientWS {
+ private delay = 1000;
+ connect(url: string) {
+ const ws = new WebSocket(url);
+ ws.onclose = () => setTimeout(() => {
+ this.connect(url);
+ this.delay = Math.min(this.delay * 2, 30000);
+ }, this.delay);
+ }
+}
+```
+
+**Server-side** (cleanup on close):
+```typescript
+async webSocketClose(ws: WebSocket, code: number, reason: string, wasClean: boolean) {
+ const { userId } = ws.deserializeAttachment();
+ this.ctx.storage.sql.exec("UPDATE users SET online = false WHERE id = ?", userId);
+ for (const c of this.ctx.getWebSockets()) c.send(JSON.stringify({ type: "user_left", userId }));
+}
+```
+
+## Session Management
+
+```typescript
+async createSession(userId: string, data: object): Promise {
+ const id = crypto.randomUUID(), exp = Date.now() + 86400000;
+ this.ctx.storage.sql.exec("INSERT INTO sessions VALUES (?, ?, ?, ?)", id, userId, JSON.stringify(data), exp);
+ await this.ctx.storage.setAlarm(exp);
+ return id;
+}
+
+async getSession(id: string): Promise {
+ const row = this.ctx.storage.sql.exec("SELECT data FROM sessions WHERE id = ? AND expires_at > ?", id, Date.now()).one();
+ return row ? JSON.parse(row.data) : null;
+}
+
+async alarm() { this.ctx.storage.sql.exec("DELETE FROM sessions WHERE expires_at <= ?", Date.now()); }
+```
+
+## Multiple Events (Single Alarm)
+
+Queue pattern to schedule multiple events:
+
+```typescript
+async scheduleEvent(id: string, runAt: number) {
+ await this.ctx.storage.put(`event:${id}`, { id, runAt });
+ const curr = await this.ctx.storage.getAlarm();
+ if (!curr || runAt < curr) await this.ctx.storage.setAlarm(runAt);
+}
+
+async alarm() {
+ const events = await this.ctx.storage.list({ prefix: "event:" }), now = Date.now();
+ let next = null;
+ for (const [key, ev] of events) {
+ if (ev.runAt <= now) {
+ await this.processEvent(ev);
+ await this.ctx.storage.delete(key);
+ } else if (!next || ev.runAt < next) next = ev.runAt;
+ }
+ if (next) await this.ctx.storage.setAlarm(next);
+}
+```
+
+## Graceful Cleanup
+
+Use `ctx.waitUntil()` to complete work after response:
+
+```typescript
+async myMethod() {
+ const response = { success: true };
+ this.ctx.waitUntil(this.ctx.storage.sql.exec("DELETE FROM old_data WHERE timestamp < ?", cutoff));
+ return response;
+}
+```
+
+## Best Practices
+
+- **Design**: Use `idFromName()` for coordination, `newUniqueId()` for sharding, minimize constructor work
+- **Storage**: Prefer SQLite, batch with transactions, set alarms for cleanup, use PITR before risky ops
+- **Performance**: ~1K req/s per DO max - shard for more, cache in memory, use alarms for deferred work
+- **Reliability**: Handle 503 with retry+backoff, design for cold starts, test migrations with `--dry-run`
+- **Security**: Validate inputs in Workers, rate limit DO creation, use jurisdiction for compliance
+
+## See Also
+
+- **[API](./api.md)** - ctx methods, WebSocket handlers
+- **[Gotchas](./gotchas.md)** - Hibernation caveats, common errors
+- **[DO Storage](../do-storage/README.md)** - Storage patterns and transactions
diff --git a/.agents/skills/cloudflare-deploy/references/email-routing/README.md b/.agents/skills/cloudflare-deploy/references/email-routing/README.md
new file mode 100644
index 0000000..7fa902e
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-routing/README.md
@@ -0,0 +1,89 @@
+# Cloudflare Email Routing Skill Reference
+
+## Overview
+
+Cloudflare Email Routing enables custom email addresses for your domain that route to verified destination addresses. It's free, privacy-focused (no storage/access), and includes Email Workers for programmatic email processing.
+
+**Available to all Cloudflare customers using Cloudflare as authoritative nameserver.**
+
+## Quick Start
+
+```typescript
+// Basic email handler
+export default {
+ async email(message, env, ctx) {
+ // CRITICAL: Must consume stream before response
+ const parser = new PostalMime.default();
+ const email = await parser.parse(await message.raw.arrayBuffer());
+
+ // Process email
+ console.log(`From: ${message.from}, Subject: ${email.subject}`);
+
+ // Forward or reject
+ await message.forward("verified@destination.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## Reading Order
+
+**Start here based on your goal:**
+
+1. **New to Email Routing?** → [configuration.md](configuration.md) → [patterns.md](patterns.md)
+2. **Adding Workers?** → [api.md](api.md) § Worker Runtime API → [patterns.md](patterns.md)
+3. **Sending emails?** → [api.md](api.md) § SendEmail Binding
+4. **Managing via API?** → [api.md](api.md) § REST API Operations
+5. **Debugging issues?** → [gotchas.md](gotchas.md)
+
+## Decision Tree
+
+```
+Need to receive emails?
+├─ Simple forwarding only? → Dashboard rules (configuration.md)
+├─ Complex logic/filtering? → Email Workers (api.md + patterns.md)
+└─ Parse attachments/body? → postal-mime library (patterns.md § Parse Email)
+
+Need to send emails?
+├─ From Worker? → SendEmail binding (api.md § SendEmail)
+└─ From external app? → Use external SMTP/API service
+
+Having issues?
+├─ Email not arriving? → gotchas.md § Mail Authentication
+├─ Worker crashing? → gotchas.md § Stream Consumption
+└─ Forward failing? → gotchas.md § Destination Verification
+```
+
+## Key Concepts
+
+**Routing Rules**: Pattern-based forwarding configured via Dashboard/API. Simple but limited.
+
+**Email Workers**: Custom TypeScript handlers with full email access. Handles complex logic, parsing, storage, rejection.
+
+**SendEmail Binding**: Outbound email API for Workers. Transactional email only (no marketing/bulk).
+
+**ForwardableEmailMessage**: Runtime interface for incoming emails. Provides headers, raw stream, forward/reject methods.
+
+## In This Reference
+
+- **[configuration.md](configuration.md)** - Setup, deployment, wrangler config
+- **[api.md](api.md)** - REST API + Worker runtime API + types
+- **[patterns.md](patterns.md)** - Common patterns with working examples
+- **[gotchas.md](gotchas.md)** - Critical pitfalls, troubleshooting, limits
+
+## Architecture
+
+```
+Internet → MX Records → Cloudflare Email Routing
+ ├─ Routing Rules (dashboard)
+ └─ Email Worker (your code)
+ ├─ Forward to destination
+ ├─ Reject with reason
+ ├─ Store in R2/KV/D1
+ └─ Send outbound (SendEmail)
+```
+
+## See Also
+
+- [Cloudflare Docs: Email Routing](https://developers.cloudflare.com/email-routing/)
+- [Cloudflare Docs: Email Workers](https://developers.cloudflare.com/email-routing/email-workers/)
+- [postal-mime npm package](https://www.npmjs.com/package/postal-mime)
diff --git a/.agents/skills/cloudflare-deploy/references/email-routing/api.md b/.agents/skills/cloudflare-deploy/references/email-routing/api.md
new file mode 100644
index 0000000..33b8bf0
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-routing/api.md
@@ -0,0 +1,195 @@
+# Email Routing API Reference
+
+## Worker Runtime API
+
+### Email Handler Interface
+
+```typescript
+interface ExportedHandler {
+ email?(message: ForwardableEmailMessage, env: Env, ctx: ExecutionContext): void | Promise;
+}
+```
+
+### ForwardableEmailMessage
+
+Main interface for incoming emails:
+
+```typescript
+interface ForwardableEmailMessage {
+ readonly from: string; // Envelope sender (e.g., "sender@example.com")
+ readonly to: string; // Envelope recipient (e.g., "you@yourdomain.com")
+ readonly headers: Headers; // Web API Headers object
+ readonly raw: ReadableStream; // Raw MIME message stream
+
+ setReject(reason: string): void;
+ forward(rcptTo: string, headers?: Headers): Promise;
+}
+```
+
+**Key Properties:**
+
+| Property | Type | Description |
+|----------|------|-------------|
+| `from` | `string` | Envelope sender (MAIL FROM), not header From |
+| `to` | `string` | Envelope recipient (RCPT TO), not header To |
+| `headers` | `Headers` | Email headers (Subject, From, To, etc.) |
+| `raw` | `ReadableStream` | Raw MIME message (consume once only) |
+
+**Methods:**
+
+- `setReject(reason)`: Reject email with bounce message
+- `forward(rcptTo, headers?)`: Forward to verified destination, optionally add headers
+
+### Headers Object
+
+Standard Web API Headers interface:
+
+```typescript
+// Access headers
+const subject = message.headers.get("subject");
+const from = message.headers.get("from");
+const messageId = message.headers.get("message-id");
+
+// Check spam score
+const spamScore = parseFloat(message.headers.get("x-cf-spamh-score") || "0");
+if (spamScore > 5) {
+ message.setReject("Spam detected");
+}
+```
+
+### Common Headers
+
+`subject`, `from`, `to`, `x-cf-spamh-score` (spam score), `message-id` (deduplication), `dkim-signature` (auth)
+
+### Envelope vs Header Addresses
+
+**Critical distinction:**
+
+```typescript
+// Envelope addresses (routing, auth checks)
+message.from // "bounce@sender.com" (actual sender)
+message.to // "you@yourdomain.com" (your address)
+
+// Header addresses (display, user-facing)
+message.headers.get("from") // "Alice "
+message.headers.get("to") // "Bob "
+```
+
+**Use envelope addresses for:**
+- Authentication/SPF checks
+- Routing decisions
+- Bounce handling
+
+**Use header addresses for:**
+- Display to users
+- Reply-To logic
+- User-facing filtering
+
+## SendEmail Binding
+
+Outbound email API for transactional messages.
+
+### Configuration
+
+```jsonc
+// wrangler.jsonc
+{
+ "send_email": [
+ { "name": "EMAIL" }
+ ]
+}
+```
+
+### TypeScript Types
+
+```typescript
+interface Env {
+ EMAIL: SendEmail;
+}
+
+interface SendEmail {
+ send(message: EmailMessage): Promise;
+}
+
+interface EmailMessage {
+ from: string | { name?: string; email: string };
+ to: string | { name?: string; email: string } | Array;
+ subject: string;
+ text?: string;
+ html?: string;
+ headers?: Headers;
+ reply_to?: string | { name?: string; email: string };
+}
+```
+
+### Send Email Example
+
+```typescript
+interface Env {
+ EMAIL: SendEmail;
+}
+
+export default {
+ async fetch(request, env, ctx): Promise {
+ await env.EMAIL.send({
+ from: { name: "Acme Corp", email: "noreply@yourdomain.com" },
+ to: [
+ { name: "Alice", email: "alice@example.com" },
+ "bob@example.com"
+ ],
+ subject: "Your order #12345 has shipped",
+ text: "Track your package at: https://track.example.com/12345",
+ html: "Track your package at: View tracking
",
+ reply_to: { name: "Support", email: "support@yourdomain.com" }
+ });
+
+ return new Response("Email sent");
+ }
+} satisfies ExportedHandler;
+```
+
+### SendEmail Constraints
+
+- **From address**: Must be on verified domain (your domain with Email Routing enabled)
+- **Volume limits**: Transactional only, no bulk/marketing email
+- **Rate limits**: 100 emails/minute on Free plan, higher on Paid
+- **No attachments**: Use links to hosted files instead
+- **No DKIM control**: Cloudflare signs automatically
+
+## REST API Operations
+
+Base URL: `https://api.cloudflare.com/client/v4`
+
+### Authentication
+
+```bash
+curl -H "Authorization: Bearer $API_TOKEN" https://api.cloudflare.com/client/v4/...
+```
+
+### Key Endpoints
+
+| Operation | Method | Endpoint |
+|-----------|--------|----------|
+| Enable routing | POST | `/zones/{zone_id}/email/routing/enable` |
+| Disable routing | POST | `/zones/{zone_id}/email/routing/disable` |
+| List rules | GET | `/zones/{zone_id}/email/routing/rules` |
+| Create rule | POST | `/zones/{zone_id}/email/routing/rules` |
+| Verify destination | POST | `/zones/{zone_id}/email/routing/addresses` |
+| List destinations | GET | `/zones/{zone_id}/email/routing/addresses` |
+
+### Create Routing Rule Example
+
+```bash
+curl -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/email/routing/rules" \
+ -H "Authorization: Bearer $API_TOKEN" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "enabled": true,
+ "name": "Forward sales",
+ "matchers": [{"type": "literal", "field": "to", "value": "sales@yourdomain.com"}],
+ "actions": [{"type": "forward", "value": ["alice@company.com"]}],
+ "priority": 0
+ }'
+```
+
+Matcher types: `literal` (exact match), `all` (catch-all).
diff --git a/.agents/skills/cloudflare-deploy/references/email-routing/configuration.md b/.agents/skills/cloudflare-deploy/references/email-routing/configuration.md
new file mode 100644
index 0000000..3f9613e
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-routing/configuration.md
@@ -0,0 +1,186 @@
+# Email Routing Configuration
+
+## Wrangler Configuration
+
+### Basic Email Worker
+
+```jsonc
+// wrangler.jsonc
+{
+ "name": "email-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-01-01",
+ "send_email": [{ "name": "EMAIL" }]
+}
+```
+
+```typescript
+// src/index.ts
+export default {
+ async email(message, env, ctx) {
+ await message.forward("destination@example.com");
+ }
+} satisfies ExportedHandler;
+```
+
+### With Storage Bindings
+
+```jsonc
+{
+ "name": "email-processor",
+ "send_email": [{ "name": "EMAIL" }],
+ "kv_namespaces": [{ "binding": "KV", "id": "abc123" }],
+ "r2_buckets": [{ "binding": "R2", "bucket_name": "emails" }],
+ "d1_databases": [{ "binding": "DB", "database_id": "def456" }]
+}
+```
+
+```typescript
+interface Env {
+ EMAIL: SendEmail;
+ KV: KVNamespace;
+ R2: R2Bucket;
+ DB: D1Database;
+}
+```
+
+## Local Development
+
+```bash
+npx wrangler dev
+
+# Test with curl
+curl -X POST 'http://localhost:8787/__email' \
+ --header 'content-type: message/rfc822' \
+ --data 'From: test@example.com
+To: you@yourdomain.com
+Subject: Test
+
+Body'
+```
+
+## Deployment
+
+```bash
+npx wrangler deploy
+```
+
+**Connect to Email Routing:**
+
+Dashboard: Email > Email Routing > [domain] > Settings > Email Workers > Select worker
+
+API:
+```bash
+curl -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/email/routing/settings" \
+ -H "Authorization: Bearer $API_TOKEN" \
+ -d '{"enabled": true, "worker": "email-worker"}'
+```
+
+## DNS (Auto-Created)
+
+```dns
+yourdomain.com. IN MX 1 isaac.mx.cloudflare.net.
+yourdomain.com. IN MX 2 linda.mx.cloudflare.net.
+yourdomain.com. IN MX 3 amir.mx.cloudflare.net.
+yourdomain.com. IN TXT "v=spf1 include:_spf.mx.cloudflare.net ~all"
+```
+
+## Secrets & Variables
+
+```bash
+# Secrets (encrypted)
+npx wrangler secret put API_KEY
+
+# Variables (plain)
+# wrangler.jsonc
+{ "vars": { "THRESHOLD": "5.0" } }
+```
+
+```typescript
+interface Env {
+ API_KEY: string;
+ THRESHOLD: string;
+}
+```
+
+## TypeScript Setup
+
+```bash
+npm install --save-dev @cloudflare/workers-types
+```
+
+```json
+// tsconfig.json
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "ES2022",
+ "lib": ["ES2022"],
+ "types": ["@cloudflare/workers-types"],
+ "moduleResolution": "bundler",
+ "strict": true
+ }
+}
+```
+
+```typescript
+import type { ForwardableEmailMessage } from "@cloudflare/workers-types";
+
+export default {
+ async email(message: ForwardableEmailMessage, env: Env, ctx: ExecutionContext): Promise {
+ await message.forward("dest@example.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## Dependencies
+
+```bash
+npm install postal-mime
+```
+
+```typescript
+import PostalMime from 'postal-mime';
+
+export default {
+ async email(message, env, ctx) {
+ const parser = new PostalMime();
+ const email = await parser.parse(await message.raw.arrayBuffer());
+ console.log(email.subject);
+ await message.forward("inbox@corp.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## Multi-Environment
+
+```bash
+# wrangler.dev.jsonc
+{ "name": "worker-dev", "vars": { "ENV": "dev" } }
+
+# wrangler.prod.jsonc
+{ "name": "worker-prod", "vars": { "ENV": "prod" } }
+
+npx wrangler deploy --config wrangler.dev.jsonc
+npx wrangler deploy --config wrangler.prod.jsonc
+```
+
+## CI/CD (GitHub Actions)
+
+```yaml
+# .github/workflows/deploy.yml
+name: Deploy
+on:
+ push:
+ branches: [main]
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-node@v3
+ - run: npm ci
+ - run: npx wrangler deploy
+ env:
+ CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
+```
diff --git a/.agents/skills/cloudflare-deploy/references/email-routing/gotchas.md b/.agents/skills/cloudflare-deploy/references/email-routing/gotchas.md
new file mode 100644
index 0000000..20ea419
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-routing/gotchas.md
@@ -0,0 +1,196 @@
+# Gotchas & Troubleshooting
+
+## Critical Pitfalls
+
+### Stream Consumption (MOST COMMON)
+
+**Problem:** "stream already consumed" or worker hangs
+
+**Cause:** `message.raw` is `ReadableStream` - consume once only
+
+**Solution:**
+```typescript
+// ❌ WRONG
+const email1 = await parser.parse(await message.raw.arrayBuffer());
+const email2 = await parser.parse(await message.raw.arrayBuffer()); // FAILS
+
+// ✅ CORRECT
+const raw = await message.raw.arrayBuffer();
+const email = await parser.parse(raw);
+```
+
+Consume `message.raw` immediately before any async operations.
+
+### Destination Verification
+
+**Problem:** Emails not forwarding
+
+**Cause:** Destination unverified
+
+**Solution:** Add destination, check inbox for verification email, click link. Verify status: `GET /zones/{id}/email/routing/addresses`
+
+### Mail Authentication
+
+**Problem:** Legitimate emails rejected
+
+**Cause:** Missing SPF/DKIM/DMARC on sender domain
+
+**Solution:** Configure sender DNS:
+```dns
+example.com. IN TXT "v=spf1 include:_spf.example.com ~all"
+selector._domainkey.example.com. IN TXT "v=DKIM1; k=rsa; p=..."
+_dmarc.example.com. IN TXT "v=DMARC1; p=quarantine"
+```
+
+### Envelope vs Header
+
+**Problem:** Filtering on wrong address
+
+**Solution:**
+```typescript
+// Routing/auth: envelope
+if (message.from === "trusted@example.com") { }
+
+// Display: headers
+const display = message.headers.get("from");
+```
+
+### SendEmail Limits
+
+| Issue | Limit | Solution |
+|-------|-------|----------|
+| From domain | Must own | Use Email Routing domain |
+| Volume | ~100/min Free | Upgrade or throttle |
+| Attachments | Not supported | Link to R2 |
+| Type | Transactional | No bulk |
+
+## Common Errors
+
+### CPU Time Exceeded
+
+**Cause:** Heavy parsing, large emails
+
+**Solution:**
+```typescript
+const size = parseInt(message.headers.get("content-length") || "0") / 1024 / 1024;
+if (size > 20) {
+ message.setReject("Too large");
+ return;
+}
+
+ctx.waitUntil(expensiveWork());
+await message.forward("dest@example.com");
+```
+
+### Rule Not Triggering
+
+**Causes:** Priority conflict, matcher error, catch-all override
+
+**Solution:** Check priority (lower=first), verify exact match, confirm destination verified
+
+### Undefined Property
+
+**Cause:** Missing header
+
+**Solution:**
+```typescript
+// ❌ WRONG
+const subj = message.headers.get("subject").toLowerCase();
+
+// ✅ CORRECT
+const subj = message.headers.get("subject")?.toLowerCase() || "";
+```
+
+## Limits
+
+| Resource | Free | Paid |
+|----------|------|------|
+| Email size | 25 MB | 25 MB |
+| Rules | 200 | 200 |
+| Destinations | 200 | 200 |
+| CPU time | 10ms | 50ms |
+| SendEmail | ~100/min | Higher |
+
+## Debugging
+
+### Local
+
+```bash
+npx wrangler dev
+
+curl -X POST 'http://localhost:8787/__email' \
+ --header 'content-type: message/rfc822' \
+ --data 'From: test@example.com
+To: you@yourdomain.com
+Subject: Test
+
+Body'
+```
+
+### Production
+
+```bash
+npx wrangler tail
+```
+
+### Pattern
+
+```typescript
+export default {
+ async email(message, env, ctx) {
+ try {
+ console.log("From:", message.from);
+ await process(message, env);
+ } catch (err) {
+ console.error(err);
+ message.setReject(err.message);
+ }
+ }
+} satisfies ExportedHandler;
+```
+
+## Auth Troubleshooting
+
+### Check Status
+
+```typescript
+const auth = message.headers.get("authentication-results") || "";
+console.log({
+ spf: auth.includes("spf=pass"),
+ dkim: auth.includes("dkim=pass"),
+ dmarc: auth.includes("dmarc=pass")
+});
+
+if (!auth.includes("pass")) {
+ message.setReject("Failed auth");
+ return;
+}
+```
+
+### SPF Issues
+
+**Causes:** Forwarding breaks SPF, too many lookups (>10), missing includes
+
+**Solution:**
+```dns
+; ✅ Good
+example.com. IN TXT "v=spf1 include:_spf.google.com ~all"
+
+; ❌ Bad - too many
+example.com. IN TXT "v=spf1 include:a.com include:b.com ... ~all"
+```
+
+### DMARC Alignment
+
+**Cause:** From domain must match SPF/DKIM domain
+
+## Best Practices
+
+1. Consume `message.raw` immediately
+2. Verify destinations
+3. Handle missing headers (`?.`)
+4. Use envelope for routing
+5. Check spam scores
+6. Test locally first
+7. Use `ctx.waitUntil` for background work
+8. Size-check early
diff --git a/.agents/skills/cloudflare-deploy/references/email-routing/patterns.md b/.agents/skills/cloudflare-deploy/references/email-routing/patterns.md
new file mode 100644
index 0000000..2163677
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-routing/patterns.md
@@ -0,0 +1,229 @@
+# Common Patterns
+
+## 1. Allowlist/Blocklist
+
+```typescript
+// Allowlist
+const allowed = ["user@example.com", "trusted@corp.com"];
+if (!allowed.includes(message.from)) {
+ message.setReject("Not allowed");
+ return;
+}
+await message.forward("inbox@corp.com");
+```
+
+## 2. Parse Email Body
+
+```typescript
+import PostalMime from 'postal-mime';
+
+export default {
+ async email(message, env, ctx) {
+ // CRITICAL: Consume stream immediately
+ const raw = await message.raw.arrayBuffer();
+
+ const parser = new PostalMime();
+ const email = await parser.parse(raw);
+
+ console.log({
+ subject: email.subject,
+ text: email.text,
+ html: email.html,
+ from: email.from.address,
+ attachments: email.attachments.length
+ });
+
+ await message.forward("inbox@corp.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## 3. Spam Filter
+
+```typescript
+const score = parseFloat(message.headers.get("x-cf-spamh-score") || "0");
+if (score > 5) {
+ message.setReject("Spam detected");
+ return;
+}
+await message.forward("inbox@corp.com");
+```
+
+## 4. Archive to R2
+
+```typescript
+interface Env { R2: R2Bucket; }
+
+export default {
+ async email(message, env, ctx) {
+ const raw = await message.raw.arrayBuffer();
+
+ const key = `${new Date().toISOString()}-${message.from}.eml`;
+ await env.R2.put(key, raw, {
+ httpMetadata: { contentType: "message/rfc822" }
+ });
+
+ await message.forward("inbox@corp.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## 5. Store Metadata in KV
+
+```typescript
+import PostalMime from 'postal-mime';
+
+interface Env { KV: KVNamespace; }
+
+export default {
+ async email(message, env, ctx) {
+ const raw = await message.raw.arrayBuffer();
+ const parser = new PostalMime();
+ const email = await parser.parse(raw);
+
+ const metadata = {
+ from: email.from.address,
+ subject: email.subject,
+ timestamp: new Date().toISOString(),
+ size: raw.byteLength
+ };
+
+ await env.KV.put(`email:${Date.now()}`, JSON.stringify(metadata));
+ await message.forward("inbox@corp.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## 6. Subject-Based Routing
+
+```typescript
+export default {
+ async email(message, env, ctx) {
+ const subject = message.headers.get("subject")?.toLowerCase() || "";
+
+ if (subject.includes("[urgent]")) {
+ await message.forward("oncall@corp.com");
+ } else if (subject.includes("[billing]")) {
+ await message.forward("billing@corp.com");
+ } else if (subject.includes("[support]")) {
+ await message.forward("support@corp.com");
+ } else {
+ await message.forward("general@corp.com");
+ }
+ }
+} satisfies ExportedHandler;
+```
+
+## 7. Auto-Reply
+
+```typescript
+interface Env {
+ EMAIL: SendEmail;
+ REPLIED: KVNamespace;
+}
+
+export default {
+ async email(message, env, ctx) {
+ const msgId = message.headers.get("message-id");
+
+ if (msgId && await env.REPLIED.get(msgId)) {
+ await message.forward("archive@corp.com");
+ return;
+ }
+
+ ctx.waitUntil((async () => {
+ await env.EMAIL.send({
+ from: "noreply@yourdomain.com",
+ to: message.from,
+ subject: "Re: " + (message.headers.get("subject") || ""),
+ text: "Thank you. We'll respond within 24h."
+ });
+ if (msgId) await env.REPLIED.put(msgId, "1", { expirationTtl: 604800 });
+ })());
+
+ await message.forward("support@corp.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## 8. Extract Attachments
+
+```typescript
+import PostalMime from 'postal-mime';
+
+interface Env { ATTACHMENTS: R2Bucket; }
+
+export default {
+ async email(message, env, ctx) {
+ const parser = new PostalMime();
+ const email = await parser.parse(await message.raw.arrayBuffer());
+
+ for (const att of email.attachments) {
+ const key = `${Date.now()}-${att.filename}`;
+ await env.ATTACHMENTS.put(key, att.content, {
+ httpMetadata: { contentType: att.mimeType }
+ });
+ }
+
+ await message.forward("inbox@corp.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## 9. Log to D1
+
+```typescript
+import PostalMime from 'postal-mime';
+
+interface Env { DB: D1Database; }
+
+export default {
+ async email(message, env, ctx) {
+ const parser = new PostalMime();
+ const email = await parser.parse(await message.raw.arrayBuffer());
+
+ ctx.waitUntil(
+ env.DB.prepare("INSERT INTO log (ts, from_addr, subj) VALUES (?, ?, ?)")
+ .bind(new Date().toISOString(), email.from.address, email.subject || "")
+ .run()
+ );
+
+ await message.forward("inbox@corp.com");
+ }
+} satisfies ExportedHandler;
+```
+
+## 10. Multi-Tenant
+
+```typescript
+interface Env { TENANTS: KVNamespace; }
+
+export default {
+ async email(message, env, ctx) {
+ const subdomain = message.to.split("@")[1].split(".")[0];
+ const config = await env.TENANTS.get(subdomain, "json") as { forward: string } | null;
+
+ if (!config) {
+ message.setReject("Unknown tenant");
+ return;
+ }
+
+ await message.forward(config.forward);
+ }
+} satisfies ExportedHandler;
+```
+
+## Summary
+
+| Pattern | Use Case | Storage |
+|---------|----------|---------|
+| Allowlist | Security | None |
+| Parse | Body/attachments | None |
+| Spam Filter | Reduce spam | None |
+| R2 Archive | Email storage | R2 |
+| KV Meta | Analytics | KV |
+| Subject Route | Dept routing | None |
+| Auto-Reply | Support | KV |
+| Attachments | Doc mgmt | R2 |
+| D1 Log | Audit trail | D1 |
+| Multi-Tenant | SaaS | KV |
diff --git a/.agents/skills/cloudflare-deploy/references/email-workers/README.md b/.agents/skills/cloudflare-deploy/references/email-workers/README.md
new file mode 100644
index 0000000..5a3e304
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-workers/README.md
@@ -0,0 +1,151 @@
+# Cloudflare Email Workers
+
+Process incoming emails programmatically using Cloudflare Workers runtime.
+
+## Overview
+
+Email Workers enable custom email processing logic at the edge. Build spam filters, auto-responders, ticket systems, notification handlers, and more using the same Workers runtime you use for HTTP requests.
+
+**Key capabilities**:
+- Process inbound emails with full message access
+- Forward to verified destinations
+- Send replies with proper threading
+- Parse MIME content and attachments
+- Integrate with KV, R2, D1, and external APIs
+
+## Quick Start
+
+### Minimal ES Modules Handler
+
+```typescript
+export default {
+ async email(message, env, ctx) {
+ // Reject spam
+ if (message.from.includes('spam.com')) {
+ message.setReject('Blocked');
+ return;
+ }
+
+ // Forward to inbox
+ await message.forward('inbox@example.com');
+ }
+};
+```
+
+### Core Operations
+
+| Operation | Method | Use Case |
+|-----------|--------|----------|
+| Forward | `message.forward(to, headers?)` | Route to verified destination |
+| Reject | `message.setReject(reason)` | Block with SMTP error |
+| Reply | `message.reply(emailMessage)` | Auto-respond with threading |
+| Parse | postal-mime library | Extract subject, body, attachments |
+
+## Reading Order
+
+For comprehensive understanding, read files in this order:
+
+1. **README.md** (this file) - Overview and quick start
+2. **configuration.md** - Setup, deployment, bindings
+3. **api.md** - Complete API reference
+4. **patterns.md** - Real-world implementation examples
+5. **gotchas.md** - Critical pitfalls and debugging
+
+## In This Reference
+
+| File | Description | Key Topics |
+|------|-------------|------------|
+| [api.md](./api.md) | Complete API reference | ForwardableEmailMessage, SendEmail bindings, reply() method, postal-mime/mimetext APIs |
+| [configuration.md](./configuration.md) | Setup and configuration | wrangler.jsonc, bindings, deployment, dependencies |
+| [patterns.md](./patterns.md) | Real-world examples | Allowlists from KV, auto-reply with threading, attachment extraction, webhook notifications |
+| [gotchas.md](./gotchas.md) | Pitfalls and debugging | Stream consumption, ctx.waitUntil errors, security, limits |
+
+## Architecture
+
+```
+Incoming Email → Email Routing → Email Worker
+ ↓
+ Process + Decide
+ ↓
+ ┌───────────────┼───────────────┐
+ ↓ ↓ ↓
+ Forward Reply Reject
+```
+
+**Event flow**:
+1. Email arrives at your domain
+2. Email Routing matches route (e.g., `support@example.com`)
+3. Bound Email Worker receives `ForwardableEmailMessage`
+4. Worker processes and takes action (forward/reply/reject)
+5. Email delivered or rejected based on worker logic
+
+## Key Concepts
+
+### Envelope vs Headers
+
+- **Envelope addresses** (`message.from`, `message.to`): SMTP transport addresses (trusted)
+- **Header addresses** (parsed from body): Display addresses (can be spoofed)
+
+Use envelope addresses for security decisions.
+
+### Single-Use Streams
+
+`message.raw` is a ReadableStream that can only be read once. Buffer to ArrayBuffer for multiple uses.
+
+```typescript
+// Buffer first
+const buffer = await new Response(message.raw).arrayBuffer();
+const email = await PostalMime.parse(buffer);
+```
+
+See [gotchas.md](./gotchas.md#readablestream-can-only-be-consumed-once) for details.
+
+### Verified Destinations
+
+`forward()` only works with addresses verified in the Cloudflare Email Routing dashboard. Add destinations before deployment.
+
+## Use Cases
+
+- **Spam filtering**: Block based on sender, content, or reputation
+- **Auto-responders**: Send acknowledgment replies with threading
+- **Ticket creation**: Parse emails and create support tickets
+- **Email archival**: Store in KV, R2, or D1
+- **Notification routing**: Forward to Slack, Discord, or webhooks
+- **Attachment processing**: Extract files to R2 storage
+- **Multi-tenant routing**: Route based on recipient subdomain
+- **Size filtering**: Reject oversized attachments
+
+## Limits
+
+| Limit | Value |
+|-------|-------|
+| Max message size | 25 MiB |
+| Max routing rules | 200 |
+| Max destinations | 200 |
+| CPU time (free tier) | 10ms |
+| CPU time (paid tier) | 50ms |
+
+See [gotchas.md](./gotchas.md#limits-reference) for complete limits table.
+
+## Prerequisites
+
+Before deploying Email Workers:
+
+1. **Enable Email Routing** in Cloudflare dashboard for your domain
+2. **Verify destination addresses** for forwarding
+3. **Configure DMARC/SPF** for sending domains (required for replies)
+4. **Set up wrangler.jsonc** with SendEmail binding
+
+See [configuration.md](./configuration.md) for detailed setup.
+
+## Service Worker Syntax (Deprecated)
+
+Modern projects should use ES modules format shown above. Service Worker syntax (`addEventListener('email', ...)`) is deprecated but still supported.
+
+## See Also
+
+- [Email Routing Documentation](https://developers.cloudflare.com/email-routing/)
+- [Workers Platform](https://developers.cloudflare.com/workers/)
+- [Wrangler CLI](https://developers.cloudflare.com/workers/wrangler/)
+- [postal-mime on npm](https://www.npmjs.com/package/postal-mime)
+- [mimetext on npm](https://www.npmjs.com/package/mimetext)
diff --git a/.agents/skills/cloudflare-deploy/references/email-workers/api.md b/.agents/skills/cloudflare-deploy/references/email-workers/api.md
new file mode 100644
index 0000000..74da66c
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-workers/api.md
@@ -0,0 +1,237 @@
+# Email Workers API Reference
+
+Complete API reference for Cloudflare Email Workers runtime.
+
+## ForwardableEmailMessage Interface
+
+The main interface passed to email handlers.
+
+```typescript
+interface ForwardableEmailMessage {
+ readonly from: string; // Envelope MAIL FROM (SMTP sender)
+ readonly to: string; // Envelope RCPT TO (SMTP recipient)
+ readonly headers: Headers; // Web-standard Headers object
+ readonly raw: ReadableStream; // Raw MIME message (single-use stream)
+ readonly rawSize: number; // Total message size in bytes
+
+ setReject(reason: string): void;
+ forward(rcptTo: string, headers?: Headers): Promise;
+ reply(message: EmailMessage): Promise;
+}
+```
+
+### Properties
+
+| Property | Type | Description |
+|----------|------|-------------|
+| `from` | string | Envelope sender (SMTP MAIL FROM) - use for security |
+| `to` | string | Envelope recipient (SMTP RCPT TO) |
+| `headers` | Headers | Message headers (Subject, Message-ID, etc.) |
+| `raw` | ReadableStream | Raw MIME message (**single-use**, buffer first) |
+| `rawSize` | number | Message size in bytes |
+
+### Methods
+
+#### setReject(reason: string): void
+
+Reject with permanent SMTP 5xx error. Email not delivered, sender may receive bounce.
+
+```typescript
+if (blockList.includes(message.from)) {
+ message.setReject('Sender blocked');
+}
+```
+
+#### forward(rcptTo: string, headers?: Headers): Promise
+
+Forward to verified destination. Only `X-*` custom headers allowed.
+
+```typescript
+await message.forward('inbox@example.com');
+
+// With custom headers
+const h = new Headers();
+h.set('X-Processed-By', 'worker');
+await message.forward('inbox@example.com', h);
+```
+
+#### reply(message: EmailMessage): Promise
+
+Send a reply to the original sender (March 2025 feature).
+
+```typescript
+import { EmailMessage } from 'cloudflare:email';
+import { createMimeMessage } from 'mimetext';
+
+const msg = createMimeMessage();
+msg.setSender({ name: 'Support', addr: 'support@example.com' });
+msg.setRecipient(message.from);
+msg.setSubject(`Re: ${message.headers.get('Subject')}`);
+msg.setHeader('In-Reply-To', message.headers.get('Message-ID'));
+msg.setHeader('References', message.headers.get('References') || '');
+msg.addMessage({
+ contentType: 'text/plain',
+ data: 'Thank you for your message.'
+});
+
+await message.reply(new EmailMessage(
+ 'support@example.com',
+ message.from,
+ msg.asRaw()
+));
+```
+
+**Requirements**:
+- Incoming email needs valid DMARC
+- Reply once per event, recipient = `message.from`
+- Sender domain = receiving domain, with DMARC/SPF/DKIM
+- Max 100 `References` entries
+- Threading: `In-Reply-To` (original Message-ID), `References`, new `Message-ID`
+
+## EmailMessage Constructor
+
+```typescript
+import { EmailMessage } from 'cloudflare:email';
+
+new EmailMessage(from: string, to: string, raw: ReadableStream | string)
+```
+
+Used for sending emails (replies or via SendEmail binding). Domain must be verified.
+
+## SendEmail Interface
+
+```typescript
+interface SendEmail {
+ send(message: EmailMessage): Promise;
+}
+
+// Usage
+await env.EMAIL.send(new EmailMessage(from, to, mimeContent));
+```
+
+## SendEmail Binding Types
+
+```jsonc
+{
+ "send_email": [
+ { "name": "EMAIL" }, // Type 1: Any verified address
+ { "name": "LOGS", "destination_address": "logs@example.com" }, // Type 2: Single dest
+ { "name": "TEAM", "allowed_destination_addresses": ["a@ex.com", "b@ex.com"] }, // Type 3: Dest allowlist
+ { "name": "NOREPLY", "allowed_sender_addresses": ["noreply@ex.com"] } // Type 4: Sender allowlist
+ ]
+}
+```
+
+## postal-mime Parsed Output
+
+postal-mime v2.7.3 parses incoming emails into structured data.
+
+```typescript
+interface ParsedEmail {
+ headers: Array<{ key: string; value: string }>;
+ from: { name: string; address: string } | null;
+ to: Array<{ name: string; address: string }> | { name: string; address: string } | null;
+ cc: Array<{ name: string; address: string }> | null;
+ bcc: Array<{ name: string; address: string }> | null;
+ subject: string;
+ messageId: string | null;
+ inReplyTo: string | null;
+ references: string | null;
+ date: string | null;
+ html: string | null;
+ text: string | null;
+ attachments: Array<{
+ filename: string;
+ mimeType: string;
+ disposition: string | null;
+ related: boolean;
+ contentId: string | null;
+ content: Uint8Array;
+ }>;
+}
+```
+
+### Usage
+
+```typescript
+import PostalMime from 'postal-mime';
+
+const buffer = await new Response(message.raw).arrayBuffer();
+const email = await PostalMime.parse(buffer);
+
+console.log(email.subject);
+console.log(email.from?.address);
+console.log(email.text);
+console.log(email.attachments.length);
+```
+
+## mimetext API Quick Reference
+
+mimetext v3.0.27 composes outgoing emails.
+
+```typescript
+import { createMimeMessage } from 'mimetext';
+
+const msg = createMimeMessage();
+
+// Sender
+msg.setSender({ name: 'John Doe', addr: 'john@example.com' });
+
+// Recipients
+msg.setRecipient('alice@example.com');
+msg.setRecipients(['bob@example.com', 'carol@example.com']);
+msg.setCc('manager@example.com');
+msg.setBcc(['audit@example.com']);
+
+// Headers
+msg.setSubject('Meeting Notes');
+msg.setHeader('In-Reply-To', '');
+msg.setHeader('References', ' ');
+msg.setHeader('Message-ID', `<${crypto.randomUUID()}@example.com>`);
+
+// Content
+msg.addMessage({
+ contentType: 'text/plain',
+ data: 'Plain text content'
+});
+
+msg.addMessage({
+ contentType: 'text/html',
+ data: 'HTML content
'
+});
+
+// Attachments
+msg.addAttachment({
+ filename: 'report.pdf',
+ contentType: 'application/pdf',
+ data: pdfBuffer // Uint8Array or base64 string
+});
+
+// Generate raw MIME
+const raw = msg.asRaw(); // Returns string
+```
+
+## TypeScript Types
+
+```typescript
+import {
+ ForwardableEmailMessage,
+ EmailMessage
+} from 'cloudflare:email';
+
+interface Env {
+ EMAIL: SendEmail;
+ EMAIL_ARCHIVE: KVNamespace;
+ ALLOWED_SENDERS: KVNamespace;
+}
+
+export default {
+ async email(
+ message: ForwardableEmailMessage,
+ env: Env,
+ ctx: ExecutionContext
+ ): Promise {
+ // Fully typed
+ }
+};
+```
diff --git a/.agents/skills/cloudflare-deploy/references/email-workers/configuration.md b/.agents/skills/cloudflare-deploy/references/email-workers/configuration.md
new file mode 100644
index 0000000..7928d04
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-workers/configuration.md
@@ -0,0 +1,112 @@
+# Email Workers Configuration
+
+## wrangler.jsonc
+
+```jsonc
+{
+ "name": "email-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-01-27",
+ "send_email": [
+ { "name": "EMAIL" }, // Unrestricted
+ { "name": "EMAIL_LOGS", "destination_address": "logs@example.com" }, // Single dest
+ { "name": "EMAIL_TEAM", "allowed_destination_addresses": ["a@ex.com", "b@ex.com"] },
+ { "name": "EMAIL_NOREPLY", "allowed_sender_addresses": ["noreply@ex.com"] }
+ ],
+ "kv_namespaces": [{ "binding": "ARCHIVE", "id": "xxx" }],
+ "r2_buckets": [{ "binding": "ATTACHMENTS", "bucket_name": "email-attachments" }],
+ "vars": { "WEBHOOK_URL": "https://hooks.example.com" }
+}
+```
+
+## TypeScript Types
+
+```typescript
+interface Env {
+ EMAIL: SendEmail;
+ ARCHIVE: KVNamespace;
+ ATTACHMENTS: R2Bucket;
+ WEBHOOK_URL: string;
+}
+
+export default {
+ async email(message: ForwardableEmailMessage, env: Env, ctx: ExecutionContext) {}
+};
+```
+
+## Dependencies
+
+```bash
+npm install postal-mime mimetext
+npm install -D @cloudflare/workers-types wrangler typescript
+```
+
+Use postal-mime v2.x, mimetext v3.x.
+
+## tsconfig.json
+
+```json
+{
+ "compilerOptions": {
+ "target": "ES2022", "module": "ES2022", "lib": ["ES2022"],
+ "types": ["@cloudflare/workers-types"],
+ "moduleResolution": "bundler", "strict": true
+ }
+}
+```
+
+## Local Development
+
+```bash
+npx wrangler dev
+
+# Test receiving
+curl --request POST 'http://localhost:8787/cdn-cgi/handler/email' \
+ --url-query 'from=sender@example.com' --url-query 'to=recipient@example.com' \
+ --header 'Content-Type: text/plain' --data-raw 'Subject: Test\n\nHello'
+```
+
+Sent emails write to local `.eml` files.
+
+## Deployment Checklist
+
+- [ ] Enable Email Routing in dashboard
+- [ ] Verify destination addresses
+- [ ] Configure DMARC/SPF/DKIM for sending
+- [ ] Create KV/R2 resources if needed
+- [ ] Update wrangler.jsonc with production IDs
+
+```bash
+npx wrangler deploy
+npx wrangler deployments list
+```
+
+## Dashboard Setup
+
+1. **Email Routing:** Domain → Email → Enable Email Routing
+2. **Verify addresses:** Email → Destination addresses → Add & verify
+3. **Bind Worker:** Email → Email Workers → Create route → Select pattern & Worker
+4. **DMARC:** Add TXT `_dmarc.domain.com`: `v=DMARC1; p=quarantine;`
+
+## Secrets
+
+```bash
+npx wrangler secret put API_KEY
+# Access: env.API_KEY
+```
+
+## Monitoring
+
+```bash
+npx wrangler tail
+npx wrangler tail --status error
+npx wrangler tail --format json
+```
+
+## Troubleshooting
+
+| Error | Fix |
+|-------|-----|
+| "Binding not found" | Check `send_email` name matches code |
+| "Invalid destination" | Verify in Email Routing dashboard |
+| Type errors | Install `@cloudflare/workers-types` |
diff --git a/.agents/skills/cloudflare-deploy/references/email-workers/gotchas.md b/.agents/skills/cloudflare-deploy/references/email-workers/gotchas.md
new file mode 100644
index 0000000..3700a50
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-workers/gotchas.md
@@ -0,0 +1,125 @@
+# Email Workers Gotchas
+
+## Critical Issues
+
+### ReadableStream Single-Use
+
+```typescript
+// ❌ WRONG: Stream consumed twice
+const email = await PostalMime.parse(await new Response(message.raw).arrayBuffer());
+const rawText = await new Response(message.raw).text(); // EMPTY!
+
+// ✅ CORRECT: Buffer first
+const buffer = await new Response(message.raw).arrayBuffer();
+const email = await PostalMime.parse(buffer);
+const rawText = new TextDecoder().decode(buffer);
+```
+
+### ctx.waitUntil() Errors Silent
+
+```typescript
+// ❌ Errors dropped silently
+ctx.waitUntil(fetch(webhookUrl, { method: 'POST', body: data }));
+
+// ✅ Catch and log
+ctx.waitUntil(
+ fetch(webhookUrl, { method: 'POST', body: data })
+ .catch(err => env.ERROR_LOG.put(`error:${Date.now()}`, err.message))
+);
+```
+
+## Security
+
+### Envelope vs Header From (Spoofing)
+
+```typescript
+const envelopeFrom = message.from; // SMTP MAIL FROM (trusted)
+const headerFrom = (await PostalMime.parse(buffer)).from?.address; // (untrusted)
+// Use envelope for security decisions
+```
+
+### Input Validation
+
+```typescript
+if (message.rawSize > 5_000_000) { message.setReject('Too large'); return; }
+if ((message.headers.get('Subject') || '').length > 1000) {
+ message.setReject('Invalid subject'); return;
+}
+```
+
+### DMARC for Replies
+
+Replies fail silently without DMARC. Verify: `dig TXT _dmarc.example.com`
+
+## Parsing
+
+### Address Parsing
+
+```typescript
+const email = await PostalMime.parse(buffer);
+const fromAddress = email.from?.address || 'unknown';
+const toAddresses = Array.isArray(email.to) ? email.to.map(t => t.address) : [email.to?.address];
+```
+
+### Character Encoding
+
+Let postal-mime handle decoding - `email.subject`, `email.text`, `email.html` are UTF-8.
+
+## API Behavior
+
+### setReject() vs throw
+
+```typescript
+// setReject() for SMTP rejection
+if (blockList.includes(message.from)) { message.setReject('Blocked'); return; }
+
+// throw for worker errors
+if (!env.KV) throw new Error('KV not configured');
+```
+
+### forward() Only X-* Headers
+
+```typescript
+headers.set('X-Processed-By', 'worker'); // ✅ Works
+headers.set('Subject', 'Modified'); // ❌ Dropped
+```
+
+### Reply Requires Verified Domain
+
+```typescript
+// Use same domain as receiving address
+const receivingDomain = message.to.split('@')[1];
+await message.reply(new EmailMessage(`noreply@${receivingDomain}`, message.from, rawMime));
+```
+
+## Performance
+
+### CPU Limit
+
+```typescript
+// Skip parsing large emails
+if (message.rawSize > 5_000_000) {
+ await message.forward('inbox@example.com');
+ return;
+}
+```
+
+Monitor: `npx wrangler tail`
+
+## Limits
+
+| Limit | Value |
+|-------|-------|
+| Max message size | 25 MiB |
+| Max rules/zone | 200 |
+| CPU time (free/paid) | 10ms / 50ms |
+| Reply References | 100 |
+
+## Common Errors
+
+| Error | Fix |
+|-------|-----|
+| "Address not verified" | Add in Email Routing dashboard |
+| "Exceeded CPU time" | Use `ctx.waitUntil()` or upgrade |
+| "Stream is locked" | Buffer `message.raw` first |
+| Silent reply failure | Check DMARC records |
diff --git a/.agents/skills/cloudflare-deploy/references/email-workers/patterns.md b/.agents/skills/cloudflare-deploy/references/email-workers/patterns.md
new file mode 100644
index 0000000..f1e65f5
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/email-workers/patterns.md
@@ -0,0 +1,102 @@
+# Email Workers Patterns
+
+## Parse Email
+
+```typescript
+import PostalMime from 'postal-mime';
+
+export default {
+ async email(message, env, ctx) {
+ const buffer = await new Response(message.raw).arrayBuffer();
+ const email = await PostalMime.parse(buffer);
+ console.log(email.from, email.subject, email.text, email.attachments.length);
+ await message.forward('inbox@example.com');
+ }
+};
+```
+
+## Filtering
+
+```typescript
+// Allowlist from KV
+const allowList = await env.ALLOWED_SENDERS.get('list', 'json') || [];
+if (!allowList.includes(message.from)) {
+ message.setReject('Not allowed');
+ return;
+}
+
+// Size check (avoid parsing large emails)
+if (message.rawSize > 5_000_000) {
+ await message.forward('inbox@example.com'); // Forward without parsing
+ return;
+}
+```
+
+## Auto-Reply with Threading
+
+```typescript
+import { EmailMessage } from 'cloudflare:email';
+import { createMimeMessage } from 'mimetext';
+
+const msg = createMimeMessage();
+msg.setSender({ addr: 'support@example.com' });
+msg.setRecipient(message.from);
+msg.setSubject(`Re: ${message.headers.get('Subject')}`);
+msg.setHeader('In-Reply-To', message.headers.get('Message-ID') || '');
+msg.addMessage({ contentType: 'text/plain', data: 'Thank you. We will respond.' });
+
+await message.reply(new EmailMessage('support@example.com', message.from, msg.asRaw()));
+```
+
+## Rate-Limited Auto-Reply
+
+```typescript
+const rateKey = `rate:${message.from}`;
+if (!await env.RATE_LIMIT.get(rateKey)) {
+ // Send reply...
+ ctx.waitUntil(env.RATE_LIMIT.put(rateKey, '1', { expirationTtl: 3600 }));
+}
+```
+
+## Subject-Based Routing
+
+```typescript
+const subject = (message.headers.get('Subject') || '').toLowerCase();
+if (subject.includes('billing')) await message.forward('billing@example.com');
+else if (subject.includes('support')) await message.forward('support@example.com');
+else await message.forward('general@example.com');
+```
+
+## Multi-Tenant Routing
+
+```typescript
+// support+tenant123@example.com → tenant123
+const tenantId = message.to.split('@')[0].match(/\+(.+)$/)?.[1] || 'default';
+const config = await env.TENANT_CONFIG.get(tenantId, 'json');
+config?.forwardTo ? await message.forward(config.forwardTo) : message.setReject('Unknown');
+```
+
+## Archive & Extract Attachments
+
+```typescript
+// Archive to KV
+ctx.waitUntil(env.ARCHIVE.put(`email:${Date.now()}`, JSON.stringify({
+ from: message.from, subject: email.subject
+})));
+
+// Attachments to R2
+for (const att of email.attachments) {
+ ctx.waitUntil(env.R2.put(`${Date.now()}-${att.filename}`, att.content));
+}
+```
+
+## Webhook Integration
+
+```typescript
+ctx.waitUntil(
+ fetch(env.WEBHOOK_URL, {
+ method: 'POST',
+ body: JSON.stringify({ from: message.from, subject: message.headers.get('Subject') })
+ }).catch(err => console.error(err))
+);
+```
diff --git a/.agents/skills/cloudflare-deploy/references/hyperdrive/README.md b/.agents/skills/cloudflare-deploy/references/hyperdrive/README.md
new file mode 100644
index 0000000..6626776
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/hyperdrive/README.md
@@ -0,0 +1,82 @@
+# Hyperdrive
+
+Accelerates database queries from Workers via connection pooling, edge setup, query caching.
+
+## Key Features
+
+- **Connection Pooling**: Persistent connections eliminate TCP/TLS/auth handshakes (~7 round-trips)
+- **Edge Setup**: Connection negotiation at edge, pooling near origin
+- **Query Caching**: Auto-cache non-mutating queries (default 60s TTL)
+- **Support**: PostgreSQL, MySQL + compatibles (CockroachDB, Timescale, PlanetScale, Neon, Supabase)
+
+## Architecture
+
+```
+Worker → Edge (setup) → Pool (near DB) → Origin
+ ↓ cached reads
+ Cache
+```
+
+## Quick Start
+
+```bash
+# Create config
+npx wrangler hyperdrive create my-db \
+ --connection-string="postgres://user:pass@host:5432/db"
+
+# wrangler.jsonc
+{
+ "compatibility_flags": ["nodejs_compat"],
+ "hyperdrive": [{"binding": "HYPERDRIVE", "id": ""}]
+}
+```
+
+```typescript
+import { Client } from "pg";
+
+export default {
+ async fetch(req: Request, env: Env): Promise {
+ const client = new Client({
+ connectionString: env.HYPERDRIVE.connectionString,
+ });
+ await client.connect();
+ const result = await client.query("SELECT * FROM users WHERE id = $1", [123]);
+ await client.end();
+ return Response.json(result.rows);
+ },
+};
+```
+
+## When to Use
+
+✅ Global access to single-region DBs, high read ratios, popular queries, connection-heavy loads
+❌ Write-heavy, real-time data (<1s), single-region apps close to DB
+
+**💡 Pair with Smart Placement** for Workers making multiple queries - executes near DB to minimize latency.
+
+## Driver Choice
+
+| Driver | Use When | Notes |
+|--------|----------|-------|
+| **pg** (recommended) | General use, TypeScript, ecosystem compatibility | Stable, widely used, works with most ORMs |
+| **postgres.js** | Advanced features, template literals, streaming | Lighter than pg, `prepare: true` is default |
+| **mysql2** | MySQL/MariaDB/PlanetScale | MySQL only, less mature support |
+
+## Reading Order
+
+| New to Hyperdrive | Implementing | Troubleshooting |
+|-------------------|--------------|-----------------|
+| 1. README (this) | 1. [configuration.md](./configuration.md) | 1. [gotchas.md](./gotchas.md) |
+| 2. [configuration.md](./configuration.md) | 2. [api.md](./api.md) | 2. [patterns.md](./patterns.md) |
+| 3. [api.md](./api.md) | 3. [patterns.md](./patterns.md) | 3. [api.md](./api.md) |
+
+## In This Reference
+- [configuration.md](./configuration.md) - Setup, wrangler config, Smart Placement
+- [api.md](./api.md) - Binding APIs, query patterns, driver usage
+- [patterns.md](./patterns.md) - Use cases, ORMs, multi-query optimization
+- [gotchas.md](./gotchas.md) - Limits, troubleshooting, connection management
+
+## See Also
+- [smart-placement](../smart-placement/) - Optimize multi-query Workers near databases
+- [d1](../d1/) - Serverless SQLite alternative for edge-native apps
+- [workers](../workers/) - Worker runtime with database bindings
diff --git a/.agents/skills/cloudflare-deploy/references/hyperdrive/api.md b/.agents/skills/cloudflare-deploy/references/hyperdrive/api.md
new file mode 100644
index 0000000..0e587b9
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/hyperdrive/api.md
@@ -0,0 +1,143 @@
+# API Reference
+
+See [README.md](./README.md) for overview, [configuration.md](./configuration.md) for setup.
+
+## Binding Interface
+
+```typescript
+interface Hyperdrive {
+ connectionString: string; // PostgreSQL
+ // MySQL properties:
+ host: string;
+ port: number;
+ user: string;
+ password: string;
+ database: string;
+}
+
+interface Env {
+ HYPERDRIVE: Hyperdrive;
+}
+```
+
+**Generate types:** `npx wrangler types` (auto-creates worker-configuration.d.ts from wrangler.jsonc)
+
+## PostgreSQL (node-postgres) - RECOMMENDED
+
+```typescript
+import { Client } from "pg"; // pg@^8.17.2
+
+export default {
+ async fetch(req: Request, env: Env): Promise {
+ const client = new Client({connectionString: env.HYPERDRIVE.connectionString});
+ try {
+ await client.connect();
+ const result = await client.query("SELECT * FROM users WHERE id = $1", [123]);
+ return Response.json(result.rows);
+ } finally {
+ await client.end();
+ }
+ },
+};
+```
+
+**⚠️ Workers connection limit: 6 per Worker invocation** - use connection pooling wisely.
+
+## PostgreSQL (postgres.js)
+
+```typescript
+import postgres from "postgres"; // postgres@^3.4.8
+
+const sql = postgres(env.HYPERDRIVE.connectionString, {
+ max: 5, // Limit per Worker (Workers max: 6)
+ prepare: true, // Enabled by default, required for caching
+ fetch_types: false, // Reduce latency if not using arrays
+});
+
+const users = await sql`SELECT * FROM users WHERE active = ${true} LIMIT 10`;
+```
+
+**⚠️ `prepare: true` is enabled by default and required for Hyperdrive caching.** Setting to `false` disables prepared statements + cache.
+
+## MySQL (mysql2)
+
+```typescript
+import { createConnection } from "mysql2/promise"; // mysql2@^3.16.2
+
+const conn = await createConnection({
+ host: env.HYPERDRIVE.host,
+ user: env.HYPERDRIVE.user,
+ password: env.HYPERDRIVE.password,
+ database: env.HYPERDRIVE.database,
+ port: env.HYPERDRIVE.port,
+ disableEval: true, // ⚠️ REQUIRED for Workers
+});
+
+const [results] = await conn.query("SELECT * FROM users WHERE active = ? LIMIT ?", [true, 10]);
+ctx.waitUntil(conn.end());
+```
+
+**⚠️ MySQL support is less mature than PostgreSQL** - expect fewer optimizations and potential edge cases.
+
+## Query Caching
+
+**Cacheable:**
+```sql
+SELECT * FROM posts WHERE published = true;
+SELECT COUNT(*) FROM users;
+```
+
+**NOT cacheable:**
+```sql
+-- Writes
+INSERT/UPDATE/DELETE
+
+-- Volatile functions
+SELECT NOW();
+SELECT random();
+SELECT LASTVAL(); -- PostgreSQL
+SELECT UUID(); -- MySQL
+```
+
+**Cache config:**
+- Default: `max_age=60s`, `swr=15s`
+- Max `max_age`: 3600s
+- Disable: `--caching-disabled=true`
+
+**Multiple configs pattern:**
+```typescript
+// Reads: cached
+const sqlCached = postgres(env.HYPERDRIVE_CACHED.connectionString);
+const posts = await sqlCached`SELECT * FROM posts ORDER BY views DESC LIMIT 10`;
+
+// Writes/time-sensitive: no cache
+const sqlNoCache = postgres(env.HYPERDRIVE_NO_CACHE.connectionString);
+const orders = await sqlNoCache`SELECT * FROM orders WHERE created_at > NOW() - INTERVAL 5 MINUTE`;
+```
+
+## ORMs
+
+**Drizzle:**
+```typescript
+import { drizzle } from "drizzle-orm/postgres-js"; // drizzle-orm@^0.45.1
+import postgres from "postgres";
+
+const client = postgres(env.HYPERDRIVE.connectionString, {max: 5, prepare: true});
+const db = drizzle(client);
+const users = await db.select().from(users).where(eq(users.active, true)).limit(10);
+```
+
+**Kysely:**
+```typescript
+import { Kysely, PostgresDialect } from "kysely"; // kysely@^0.27+
+import postgres from "postgres";
+
+const db = new Kysely({
+ dialect: new PostgresDialect({
+ postgres: postgres(env.HYPERDRIVE.connectionString, {max: 5, prepare: true}),
+ }),
+});
+const users = await db.selectFrom("users").selectAll().where("active", "=", true).execute();
+```
+
+See [patterns.md](./patterns.md) for use cases, [gotchas.md](./gotchas.md) for limits.
diff --git a/.agents/skills/cloudflare-deploy/references/hyperdrive/configuration.md b/.agents/skills/cloudflare-deploy/references/hyperdrive/configuration.md
new file mode 100644
index 0000000..6d429a9
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/hyperdrive/configuration.md
@@ -0,0 +1,159 @@
+# Configuration
+
+See [README.md](./README.md) for overview.
+
+## Create Config
+
+**PostgreSQL:**
+```bash
+# Basic
+npx wrangler hyperdrive create my-db \
+ --connection-string="postgres://user:pass@host:5432/db"
+
+# Custom cache
+npx wrangler hyperdrive create my-db \
+ --connection-string="postgres://..." \
+ --max-age=120 --swr=30
+
+# No cache
+npx wrangler hyperdrive create my-db \
+ --connection-string="postgres://..." \
+ --caching-disabled=true
+```
+
+**MySQL:**
+```bash
+npx wrangler hyperdrive create my-db \
+ --connection-string="mysql://user:pass@host:3306/db"
+```
+
+## wrangler.jsonc
+
+```jsonc
+{
+ "compatibility_date": "2025-01-01", // Use latest for new projects
+ "compatibility_flags": ["nodejs_compat"],
+ "hyperdrive": [
+ {
+ "binding": "HYPERDRIVE",
+ "id": "",
+ "localConnectionString": "postgres://user:pass@localhost:5432/dev"
+ }
+ ]
+}
+```
+
+**Generate TypeScript types:** Run `npx wrangler types` to auto-generate `worker-configuration.d.ts` from your wrangler.jsonc.
+
+**Multiple configs:**
+```jsonc
+{
+ "hyperdrive": [
+ {"binding": "HYPERDRIVE_CACHED", "id": ""},
+ {"binding": "HYPERDRIVE_NO_CACHE", "id": ""}
+ ]
+}
+```
+
+## Management
+
+```bash
+npx wrangler hyperdrive list
+npx wrangler hyperdrive get
+npx wrangler hyperdrive update --max-age=180
+npx wrangler hyperdrive delete
+```
+
+## Config Options
+
+Hyperdrive create/update CLI flags:
+
+| Option | Default | Notes |
+|--------|---------|-------|
+| `--caching-disabled` | `false` | Disable caching |
+| `--max-age` | `60` | Cache TTL (max 3600s) |
+| `--swr` | `15` | Stale-while-revalidate |
+| `--origin-connection-limit` | 20/100 | Free/paid |
+| `--access-client-id` | - | Tunnel auth |
+| `--access-client-secret` | - | Tunnel auth |
+| `--sslmode` | `require` | PostgreSQL only |
+
+## Smart Placement Integration
+
+For Workers making **multiple queries** per request, enable Smart Placement to execute near your database:
+
+```jsonc
+{
+ "compatibility_date": "2025-01-01",
+ "compatibility_flags": ["nodejs_compat"],
+ "placement": {
+ "mode": "smart"
+ },
+ "hyperdrive": [
+ {
+ "binding": "HYPERDRIVE",
+ "id": ""
+ }
+ ]
+}
+```
+
+**Benefits:** Multi-query Workers run closer to DB, reducing round-trip latency. See [patterns.md](./patterns.md) for examples.
+
+## Private DB via Tunnel
+
+```
+Worker → Hyperdrive → Access → Tunnel → Private Network → DB
+```
+
+**Setup:**
+```bash
+# 1. Create tunnel
+cloudflared tunnel create my-db-tunnel
+
+# 2. Configure hostname in Zero Trust dashboard
+# Domain: db-tunnel.example.com
+# Service: TCP -> localhost:5432
+
+# 3. Create service token (Zero Trust > Service Auth)
+# Save Client ID/Secret
+
+# 4. Create Access app (db-tunnel.example.com)
+# Policy: Service Auth token from step 3
+
+# 5. Create Hyperdrive
+npx wrangler hyperdrive create my-private-db \
+ --host=db-tunnel.example.com \
+ --user=dbuser --password=dbpass --database=prod \
+ --access-client-id= --access-client-secret=
+```
+
+**⚠️ Don't specify `--port` with Tunnel** - port configured in tunnel service settings.
+
+## Local Dev
+
+**Option 1: Local (RECOMMENDED):**
+```bash
+# Env var (takes precedence)
+export CLOUDFLARE_HYPERDRIVE_LOCAL_CONNECTION_STRING_HYPERDRIVE="postgres://user:pass@localhost:5432/dev"
+npx wrangler dev
+
+# wrangler.jsonc
+{"hyperdrive": [{"binding": "HYPERDRIVE", "localConnectionString": "postgres://..."}]}
+```
+
+**Remote DB locally:**
+```bash
+# PostgreSQL
+export CLOUDFLARE_HYPERDRIVE_LOCAL_CONNECTION_STRING_HYPERDRIVE="postgres://user:pass@remote:5432/db?sslmode=require"
+
+# MySQL
+export CLOUDFLARE_HYPERDRIVE_LOCAL_CONNECTION_STRING_HYPERDRIVE="mysql://user:pass@remote:3306/db?sslMode=REQUIRED"
+```
+
+**Option 2: Remote execution:**
+```bash
+npx wrangler dev --remote # Uses deployed config, affects production
+```
+
+See [api.md](./api.md), [patterns.md](./patterns.md), [gotchas.md](./gotchas.md).
diff --git a/.agents/skills/cloudflare-deploy/references/hyperdrive/gotchas.md b/.agents/skills/cloudflare-deploy/references/hyperdrive/gotchas.md
new file mode 100644
index 0000000..efa2ead
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/hyperdrive/gotchas.md
@@ -0,0 +1,77 @@
+# Gotchas
+
+See [README.md](./README.md), [configuration.md](./configuration.md), [api.md](./api.md), [patterns.md](./patterns.md).
+
+## Common Errors
+
+### "Too many open connections" / "Connection limit exceeded"
+
+**Cause:** Workers have a hard limit of **6 concurrent connections per invocation**
+**Solution:** Set `max: 5` in driver config, reuse connections, ensure proper cleanup with `client.end()` or `ctx.waitUntil(conn.end())`
+
+### "Failed to acquire a connection (Pool exhausted)"
+
+**Cause:** All connections in pool are in use, often due to long-running transactions
+**Solution:** Reduce transaction duration, avoid queries >60s, don't hold connections during external calls, or upgrade to paid plan for more connections
+
+### "connection_refused"
+
+**Cause:** Database refusing connections due to firewall, connection limits, or service down
+**Solution:** Check firewall allows Cloudflare IPs, verify DB listening on port, confirm service running, and validate credentials
+
+### "Query timeout (deadline exceeded)"
+
+**Cause:** Query execution exceeding 60s timeout limit
+**Solution:** Optimize with indexes, reduce dataset with LIMIT, break into smaller queries, or use async processing
+
+### "password authentication failed"
+
+**Cause:** Invalid credentials in Hyperdrive configuration
+**Solution:** Check username and password in Hyperdrive config match database credentials
+
+### "SSL/TLS connection error"
+
+**Cause:** SSL/TLS configuration mismatch between Hyperdrive and database
+**Solution:** Add `sslmode=require` (Postgres) or `sslMode=REQUIRED` (MySQL), upload CA cert if self-signed, verify DB has SSL enabled, and check cert expiry
+
+### "Queries not being cached"
+
+**Cause:** Query is mutating (INSERT/UPDATE/DELETE), contains volatile functions (NOW(), RANDOM()), or caching disabled
+**Solution:** Verify query is non-mutating SELECT, avoid volatile functions, confirm caching enabled, use `wrangler dev --remote` to test, and set `prepare=true` for postgres.js
+
+### "Slow multi-query Workers despite Hyperdrive"
+
+**Cause:** Worker executing at edge, each query round-trips to DB region
+**Solution:** Enable Smart Placement (`"placement": {"mode": "smart"}` in wrangler.jsonc) to execute Worker near DB. See [patterns.md](./patterns.md) Multi-Query pattern.
+
+### "Local database connection failed"
+
+**Cause:** `localConnectionString` incorrect or database not running
+**Solution:** Verify `localConnectionString` correct, check DB running, confirm env var name matches binding, and test with psql/mysql client
+
+### "Environment variable not working"
+
+**Cause:** Environment variable format incorrect or not exported
+**Solution:** Use format `CLOUDFLARE_HYPERDRIVE_LOCAL_CONNECTION_STRING_`, ensure binding matches wrangler.jsonc, export variable in shell, and restart wrangler dev
+
+## Limits
+
+| Limit | Free | Paid | Notes |
+|-------|------|------|-------|
+| Max configs | 10 | 25 | Hyperdrive configurations per account |
+| Worker connections | 6 | 6 | Max concurrent connections per Worker invocation |
+| Username/DB name | 63 bytes | 63 bytes | Maximum length |
+| Connection timeout | 15s | 15s | Time to establish connection |
+| Idle timeout | 10 min | 10 min | Connection idle timeout |
+| Max origin connections | ~20 | ~100 | Connections to origin database |
+| Query duration max | 60s | 60s | Queries >60s terminated |
+| Cached response max | 50 MB | 50 MB | Responses >50MB returned but not cached |
+
+## Resources
+
+- [Docs](https://developers.cloudflare.com/hyperdrive/)
+- [Getting Started](https://developers.cloudflare.com/hyperdrive/get-started/)
+- [Wrangler Reference](https://developers.cloudflare.com/hyperdrive/reference/wrangler-commands/)
+- [Supported DBs](https://developers.cloudflare.com/hyperdrive/reference/supported-databases-and-features/)
+- [Discord #hyperdrive](https://discord.cloudflare.com)
+- [Limit Increase Form](https://forms.gle/ukpeZVLWLnKeixDu7)
diff --git a/.agents/skills/cloudflare-deploy/references/hyperdrive/patterns.md b/.agents/skills/cloudflare-deploy/references/hyperdrive/patterns.md
new file mode 100644
index 0000000..bd794b9
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/hyperdrive/patterns.md
@@ -0,0 +1,190 @@
+# Patterns
+
+See [README.md](./README.md), [configuration.md](./configuration.md), [api.md](./api.md).
+
+## High-Traffic Read-Heavy
+
+```typescript
+const sql = postgres(env.HYPERDRIVE.connectionString, {max: 5, prepare: true});
+
+// Cacheable: popular content
+const posts = await sql`SELECT * FROM posts WHERE published = true ORDER BY views DESC LIMIT 20`;
+
+// Cacheable: user profiles
+const [user] = await sql`SELECT id, username, bio FROM users WHERE id = ${userId}`;
+```
+
+**Benefits:** Trending/profiles cached (60s), connection pooling handles spikes.
+
+## Mixed Read/Write
+
+```typescript
+interface Env {
+ HYPERDRIVE_CACHED: Hyperdrive; // max_age=120
+ HYPERDRIVE_REALTIME: Hyperdrive; // caching disabled
+}
+
+// Reads: cached
+if (req.method === "GET") {
+ const sql = postgres(env.HYPERDRIVE_CACHED.connectionString, {prepare: true});
+ const products = await sql`SELECT * FROM products WHERE category = ${cat}`;
+}
+
+// Writes: no cache (immediate consistency)
+if (req.method === "POST") {
+ const sql = postgres(env.HYPERDRIVE_REALTIME.connectionString, {prepare: true});
+ await sql`INSERT INTO orders ${sql(data)}`;
+}
+```
+
+## Analytics Dashboard
+
+```typescript
+const client = new Client({connectionString: env.HYPERDRIVE.connectionString});
+await client.connect();
+
+// Aggregate queries cached (use fixed timestamps for caching)
+const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString();
+const dailyStats = await client.query(`
+ SELECT DATE(created_at) as date, COUNT(*) as orders, SUM(amount) as revenue
+ FROM orders WHERE created_at >= $1
+ GROUP BY DATE(created_at) ORDER BY date DESC
+`, [thirtyDaysAgo]);
+
+const sevenDaysAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
+const topProducts = await client.query(`
+ SELECT p.name, COUNT(oi.id) as count, SUM(oi.quantity * oi.price) as revenue
+ FROM order_items oi JOIN products p ON oi.product_id = p.id
+ WHERE oi.created_at >= $1
+ GROUP BY p.id, p.name ORDER BY revenue DESC LIMIT 10
+`, [sevenDaysAgo]);
+```
+
+**Benefits:** Expensive aggregations cached (avoid NOW() for cacheability), dashboard instant, reduced DB load.
+
+## Multi-Tenant
+
+```typescript
+const tenantId = req.headers.get("X-Tenant-ID");
+const sql = postgres(env.HYPERDRIVE.connectionString, {prepare: true});
+
+// Tenant-scoped queries cached separately
+const docs = await sql`
+ SELECT * FROM documents
+ WHERE tenant_id = ${tenantId} AND deleted_at IS NULL
+ ORDER BY updated_at DESC LIMIT 50
+`;
+```
+
+**Benefits:** Per-tenant caching, shared connection pool, protects DB from multi-tenant load.
+
+## Geographically Distributed
+
+```typescript
+// Worker runs at edge nearest user
+// Connection setup at edge (fast), pooling near DB (efficient)
+const sql = postgres(env.HYPERDRIVE.connectionString, {prepare: true});
+const [user] = await sql`SELECT * FROM users WHERE id = ${userId}`;
+
+return Response.json({
+ user,
+ serverRegion: req.cf?.colo, // Edge location
+});
+```
+
+**Benefits:** Edge setup + DB pooling = global → single-region DB without replication.
+
+## Multi-Query + Smart Placement
+
+For Workers making **multiple queries** per request, enable Smart Placement to execute near DB:
+
+```jsonc
+// wrangler.jsonc
+{
+ "placement": {"mode": "smart"},
+ "hyperdrive": [{"binding": "HYPERDRIVE", "id": ""}]
+}
+```
+
+```typescript
+const sql = postgres(env.HYPERDRIVE.connectionString, {prepare: true});
+
+// Multiple queries benefit from Smart Placement
+const [user] = await sql`SELECT * FROM users WHERE id = ${userId}`;
+const orders = await sql`SELECT * FROM orders WHERE user_id = ${userId} ORDER BY created_at DESC LIMIT 10`;
+const stats = await sql`SELECT COUNT(*) as total, SUM(amount) as spent FROM orders WHERE user_id = ${userId}`;
+
+return Response.json({user, orders, stats});
+```
+
+**Benefits:** Worker executes near DB → reduces latency for each query. Without Smart Placement, each query round-trips from edge.
+
+## Connection Pooling
+
+Operates in **transaction mode**: connection acquired per transaction, `RESET` on return.
+
+**SET statements:**
+```typescript
+// ✅ Within transaction
+await client.query("BEGIN");
+await client.query("SET work_mem = '256MB'");
+await client.query("SELECT * FROM large_table"); // Uses SET
+await client.query("COMMIT"); // RESET after
+
+// ✅ Single statement
+await client.query("SET work_mem = '256MB'; SELECT * FROM large_table");
+
+// ❌ Across queries (may get different connection)
+await client.query("SET work_mem = '256MB'");
+await client.query("SELECT * FROM large_table"); // SET not applied
+```
+
+**Best practices:**
+```typescript
+// ❌ Long transactions block pooling
+await client.query("BEGIN");
+await processThousands(); // Connection held entire time
+await client.query("COMMIT");
+
+// ✅ Short transactions
+await client.query("BEGIN");
+await client.query("UPDATE users SET status = $1 WHERE id = $2", [status, id]);
+await client.query("COMMIT");
+
+// ✅ SET LOCAL within transaction
+await client.query("BEGIN");
+await client.query("SET LOCAL work_mem = '256MB'");
+await client.query("SELECT * FROM large_table");
+await client.query("COMMIT");
+```
+
+## Performance Tips
+
+**Enable prepared statements (required for caching):**
+```typescript
+const sql = postgres(connectionString, {prepare: true}); // Default, enables caching
+```
+
+**Optimize connection settings:**
+```typescript
+const sql = postgres(connectionString, {
+ max: 5, // Stay under Workers' 6 connection limit
+ fetch_types: false, // Reduce latency if not using arrays
+ idle_timeout: 60, // Match Worker lifetime
+});
+```
+
+**Write cache-friendly queries:**
+```typescript
+// ✅ Cacheable (deterministic)
+await sql`SELECT * FROM products WHERE category = 'electronics' LIMIT 10`;
+
+// ❌ Not cacheable (volatile NOW())
+await sql`SELECT * FROM logs WHERE created_at > NOW()`;
+
+// ✅ Cacheable (parameterized timestamp)
+const ts = Date.now();
+await sql`SELECT * FROM logs WHERE created_at > ${ts}`;
+```
+
+See [gotchas.md](./gotchas.md) for limits, troubleshooting.
diff --git a/.agents/skills/cloudflare-deploy/references/images/README.md b/.agents/skills/cloudflare-deploy/references/images/README.md
new file mode 100644
index 0000000..f1dd644
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/images/README.md
@@ -0,0 +1,61 @@
+# Cloudflare Images Skill Reference
+
+**Cloudflare Images** is an end-to-end image management solution providing storage, transformation, optimization, and delivery at scale via Cloudflare's global network.
+
+## Quick Decision Tree
+
+**Need to:**
+- **Transform in Worker?** → [api.md](api.md#workers-binding-api-2026-primary-method) (Workers Binding API)
+- **Upload from Worker?** → [api.md](api.md#upload-from-worker) (REST API)
+- **Upload from client?** → [patterns.md](patterns.md#upload-from-client-direct-creator-upload) (Direct Creator Upload)
+- **Set up variants?** → [configuration.md](configuration.md#variants-configuration)
+- **Serve responsive images?** → [patterns.md](patterns.md#responsive-images)
+- **Add watermarks?** → [patterns.md](patterns.md#watermarking)
+- **Fix errors?** → [gotchas.md](gotchas.md#common-errors)
+
+## Reading Order
+
+**For building image upload/transform feature:**
+1. [configuration.md](configuration.md) - Setup Workers binding
+2. [api.md](api.md#workers-binding-api-2026-primary-method) - Learn transform API
+3. [patterns.md](patterns.md#upload-from-client-direct-creator-upload) - Direct upload pattern
+4. [gotchas.md](gotchas.md) - Check limits and errors
+
+**For URL-based transforms:**
+1. [configuration.md](configuration.md#variants-configuration) - Create variants
+2. [api.md](api.md#url-transform-api) - URL syntax
+3. [patterns.md](patterns.md#responsive-images) - Responsive patterns
+
+**For troubleshooting:**
+1. [gotchas.md](gotchas.md#common-errors) - Error messages
+2. [gotchas.md](gotchas.md#limits) - Size/format limits
+
+## Core Methods
+
+| Method | Use Case | Location |
+|--------|----------|----------|
+| `env.IMAGES.input().transform()` | Transform in Worker | [api.md:11](api.md) |
+| REST API `/images/v1` | Upload images | [api.md:57](api.md) |
+| Direct Creator Upload | Client-side upload | [api.md:127](api.md) |
+| URL transforms | Static image delivery | [api.md:112](api.md) |
+
+## In This Reference
+
+- **[api.md](api.md)** - Complete API: Workers binding, REST endpoints, URL transforms
+- **[configuration.md](configuration.md)** - Setup: wrangler.toml, variants, auth, signed URLs
+- **[patterns.md](patterns.md)** - Patterns: responsive images, watermarks, format negotiation, caching
+- **[gotchas.md](gotchas.md)** - Troubleshooting: limits, errors, best practices
+
+## Key Features
+
+- **Automatic Optimization** - AVIF/WebP format negotiation
+- **On-the-fly Transforms** - Resize, crop, blur, sharpen via URL or API
+- **Workers Binding** - Transform images in Workers (2026 primary method)
+- **Direct Upload** - Secure client-side uploads without backend proxy
+- **Global Delivery** - Cached at 300+ Cloudflare data centers
+- **Watermarking** - Overlay images programmatically
+
+## See Also
+
+- [Official Docs](https://developers.cloudflare.com/images/)
+- [Workers Examples](https://developers.cloudflare.com/images/tutorials/)
diff --git a/.agents/skills/cloudflare-deploy/references/images/api.md b/.agents/skills/cloudflare-deploy/references/images/api.md
new file mode 100644
index 0000000..c172e22
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/images/api.md
@@ -0,0 +1,96 @@
+# API Reference
+
+## Workers Binding API
+
+```toml
+# wrangler.toml
+[images]
+binding = "IMAGES"
+```
+
+### Transform Images
+
+```typescript
+const imageResponse = await env.IMAGES
+ .input(fileBuffer)
+ .transform({ width: 800, height: 600, fit: "cover", quality: 85, format: "avif" })
+ .output();
+return imageResponse.response();
+```
+
+### Transform Options
+
+```typescript
+interface TransformOptions {
+ width?: number; height?: number;
+ fit?: "scale-down" | "contain" | "cover" | "crop" | "pad";
+ quality?: number; // 1-100
+ format?: "avif" | "webp" | "jpeg" | "png";
+ dpr?: number; // 1-3
+ gravity?: "auto" | "left" | "right" | "top" | "bottom" | "face" | string;
+ sharpen?: number; // 0-10
+ blur?: number; // 1-250
+ rotate?: 90 | 180 | 270;
+ background?: string; // CSS color for pad
+ metadata?: "none" | "copyright" | "keep";
+ brightness?: number; contrast?: number; gamma?: number; // 0-2
+}
+```
+
+### Draw/Watermark
+
+```typescript
+await env.IMAGES.input(baseImage)
+ .draw(env.IMAGES.input(watermark).transform({ width: 100 }), { top: 10, left: 10, opacity: 0.8 })
+ .output();
+```
+
+## REST API
+
+### Upload Image
+
+```bash
+curl -X POST https://api.cloudflare.com/client/v4/accounts/{account_id}/images/v1 \
+ -H "Authorization: Bearer {token}" -F file=@image.jpg -F metadata='{"key":"value"}'
+```
+
+### Other Operations
+
+```bash
+GET /accounts/{account_id}/images/v1/{image_id} # Get details
+DELETE /accounts/{account_id}/images/v1/{image_id} # Delete
+GET /accounts/{account_id}/images/v1?page=1 # List
+```
+
+## URL Transform API
+
+```
+https://imagedelivery.net/{hash}/{id}/width=800,height=600,fit=cover,format=avif
+```
+
+**Params:** `w=`, `h=`, `fit=`, `q=`, `f=`, `dpr=`, `gravity=`, `sharpen=`, `blur=`, `rotate=`, `background=`, `metadata=`
+
+## Direct Creator Upload
+
+```typescript
+// 1. Get upload URL (backend)
+const { result } = await fetch(
+ `https://api.cloudflare.com/client/v4/accounts/${accountId}/images/v2/direct_upload`,
+ { method: 'POST', headers: { 'Authorization': `Bearer ${token}` },
+ body: JSON.stringify({ requireSignedURLs: false }) }
+).then(r => r.json());
+
+// 2. Client uploads to result.uploadURL
+const formData = new FormData();
+formData.append('file', file);
+await fetch(result.uploadURL, { method: 'POST', body: formData });
+```
+
+## Error Codes
+
+| Code | Message | Solution |
+|------|---------|----------|
+| 5400 | Invalid format | Use JPEG, PNG, GIF, WebP |
+| 5401 | Too large | Max 100MB |
+| 5403 | Invalid transform | Check params |
+| 9413 | Rate limit | Implement backoff |
diff --git a/.agents/skills/cloudflare-deploy/references/images/configuration.md b/.agents/skills/cloudflare-deploy/references/images/configuration.md
new file mode 100644
index 0000000..9fa2deb
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/images/configuration.md
@@ -0,0 +1,211 @@
+# Configuration
+
+## Wrangler Integration
+
+### Workers Binding Setup
+
+Add to `wrangler.toml`:
+
+```toml
+name = "my-image-worker"
+main = "src/index.ts"
+compatibility_date = "2024-01-01"
+
+[images]
+binding = "IMAGES"
+```
+
+Access in Worker:
+
+```typescript
+interface Env {
+ IMAGES: ImageBinding;
+}
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ return await env.IMAGES
+ .input(imageBuffer)
+ .transform({ width: 800 })
+ .output()
+ .response();
+ }
+};
+```
+
+### Upload via Script
+
+Wrangler doesn't have built-in Images commands, use REST API:
+
+```typescript
+// scripts/upload-image.ts
+import fs from 'fs';
+import FormData from 'form-data';
+
+async function uploadImage(filePath: string) {
+ const accountId = process.env.CLOUDFLARE_ACCOUNT_ID!;
+ const apiToken = process.env.CLOUDFLARE_API_TOKEN!;
+
+ const formData = new FormData();
+ formData.append('file', fs.createReadStream(filePath));
+
+ const response = await fetch(
+ `https://api.cloudflare.com/client/v4/accounts/${accountId}/images/v1`,
+ {
+ method: 'POST',
+ headers: {
+ 'Authorization': `Bearer ${apiToken}`,
+ },
+ body: formData,
+ }
+ );
+
+ const result = await response.json();
+ console.log('Uploaded:', result);
+}
+
+uploadImage('./photo.jpg');
+```
+
+### Environment Variables
+
+Store account hash for URL construction:
+
+```toml
+[vars]
+IMAGES_ACCOUNT_HASH = "your-account-hash"
+ACCOUNT_ID = "your-account-id"
+```
+
+Access in Worker:
+
+```typescript
+const imageUrl = `https://imagedelivery.net/${env.IMAGES_ACCOUNT_HASH}/${imageId}/public`;
+```
+
+## Variants Configuration
+
+Variants are named presets for transformations.
+
+### Create Variant (Dashboard)
+
+1. Navigate to Images → Variants
+2. Click "Create Variant"
+3. Set name (e.g., `thumbnail`)
+4. Configure: `width=200,height=200,fit=cover`
+
+### Create Variant (API)
+
+```bash
+curl -X POST \
+ https://api.cloudflare.com/client/v4/accounts/{account_id}/images/v1/variants \
+ -H "Authorization: Bearer {api_token}" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "id": "thumbnail",
+ "options": {
+ "width": 200,
+ "height": 200,
+ "fit": "cover"
+ },
+ "neverRequireSignedURLs": true
+ }'
+```
+
+### Use Variant
+
+```
+https://imagedelivery.net/{account_hash}/{image_id}/thumbnail
+```
+
+### Common Variant Presets
+
+```json
+{
+ "thumbnail": {
+ "width": 200,
+ "height": 200,
+ "fit": "cover"
+ },
+ "avatar": {
+ "width": 128,
+ "height": 128,
+ "fit": "cover",
+ "gravity": "face"
+ },
+ "hero": {
+ "width": 1920,
+ "height": 1080,
+ "fit": "cover",
+ "quality": 90
+ },
+ "mobile": {
+ "width": 640,
+ "fit": "scale-down",
+ "quality": 80,
+ "format": "avif"
+ }
+}
+```
+
+## Authentication
+
+### API Token (Recommended)
+
+Generate at: Dashboard → My Profile → API Tokens
+
+Required permissions:
+- Account → Cloudflare Images → Edit
+
+```bash
+curl -H "Authorization: Bearer {api_token}" \
+ https://api.cloudflare.com/client/v4/accounts/{account_id}/images/v1
+```
+
+### API Key (Legacy)
+
+```bash
+curl -H "X-Auth-Email: {email}" \
+ -H "X-Auth-Key: {api_key}" \
+ https://api.cloudflare.com/client/v4/accounts/{account_id}/images/v1
+```
+
+## Signed URLs
+
+For private images, enable signed URLs:
+
+```bash
+# Upload with signed URLs required
+curl -X POST \
+ https://api.cloudflare.com/client/v4/accounts/{account_id}/images/v1 \
+ -H "Authorization: Bearer {api_token}" \
+ -F file=@private.jpg \
+ -F requireSignedURLs=true
+```
+
+Generate signed URL:
+
+```typescript
+import { createHmac } from 'crypto';
+
+function signUrl(imageId: string, variant: string, expiry: number, key: string): string {
+ const path = `/${imageId}/${variant}`;
+ const toSign = `${path}${expiry}`;
+ const signature = createHmac('sha256', key)
+ .update(toSign)
+ .digest('hex');
+
+ return `https://imagedelivery.net/{hash}${path}?exp=${expiry}&sig=${signature}`;
+}
+
+// Sign URL valid for 1 hour
+const signedUrl = signUrl('image-id', 'public', Date.now() + 3600, env.SIGNING_KEY);
+```
+
+## Local Development
+
+```bash
+npx wrangler dev --remote
+```
+
+Must use `--remote` for Images binding access.
diff --git a/.agents/skills/cloudflare-deploy/references/images/gotchas.md b/.agents/skills/cloudflare-deploy/references/images/gotchas.md
new file mode 100644
index 0000000..6f52455
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/images/gotchas.md
@@ -0,0 +1,99 @@
+# Gotchas & Best Practices
+
+## Fit Modes
+
+| Mode | Best For | Behavior |
+|------|----------|----------|
+| `cover` | Hero images, thumbnails | Fills space, crops excess |
+| `contain` | Product images, artwork | Preserves full image, may add padding |
+| `scale-down` | User uploads | Never enlarges |
+| `crop` | Precise crops | Uses gravity |
+| `pad` | Fixed aspect ratio | Adds background |
+
+## Format Selection
+
+```typescript
+format: 'auto' // Recommended - negotiates best format
+```
+
+**Support:** AVIF (Chrome 85+, Firefox 93+, Safari 16.4+), WebP (Chrome 23+, Firefox 65+, Safari 14+)
+
+## Quality Settings
+
+| Use Case | Quality |
+|----------|---------|
+| Thumbnails | 75-80 |
+| Standard | 85 (default) |
+| High-quality | 90-95 |
+
+## Common Errors
+
+### 5403: "Image transformation failed"
+- Verify `width`/`height` ≤ 12000
+- Check `quality` 1-100, `dpr` 1-3
+- Don't combine incompatible options
+
+### 9413: "Rate limit exceeded"
+Implement caching and exponential backoff:
+```typescript
+for (let i = 0; i < 3; i++) {
+ try { return await env.IMAGES.input(buffer).transform({...}).output(); }
+ catch { await new Promise(r => setTimeout(r, 2 ** i * 1000)); }
+}
+```
+
+### 5401: "Image too large"
+Pre-process images before upload (max 100MB, 12000×12000px)
+
+### 5400: "Invalid image format"
+Supported: JPEG, PNG, GIF, WebP, AVIF, SVG
+
+### 401/403: "Unauthorized"
+Verify API token has `Cloudflare Images → Edit` permission
+
+## Limits
+
+| Resource | Limit |
+|----------|-------|
+| Max input size | 100MB |
+| Max dimensions | 12000×12000px |
+| Quality range | 1-100 |
+| DPR range | 1-3 |
+| API rate limit | ~1200 req/min |
+
+## AVIF Gotchas
+
+- **Slower encoding**: First request may have higher latency
+- **Browser detection**:
+```typescript
+const format = /image\/avif/.test(request.headers.get('Accept') || '') ? 'avif' : 'webp';
+```
+
+## Anti-Patterns
+
+```typescript
+// ❌ No caching - transforms every request
+return env.IMAGES.input(buffer).transform({...}).output().response();
+
+// ❌ cover without both dimensions
+transform({ width: 800, fit: 'cover' })
+
+// ✅ Always set both for cover
+transform({ width: 800, height: 600, fit: 'cover' })
+
+// ❌ Exposes API token to client
+// ✅ Use Direct Creator Upload (patterns.md)
+```
+
+## Debugging
+
+```typescript
+// Check response headers
+console.log('Content-Type:', response.headers.get('Content-Type'));
+
+// Test with curl
+// curl -I "https://imagedelivery.net/{hash}/{id}/width=800,format=avif"
+
+// Monitor logs
+// npx wrangler tail
+```
diff --git a/.agents/skills/cloudflare-deploy/references/images/patterns.md b/.agents/skills/cloudflare-deploy/references/images/patterns.md
new file mode 100644
index 0000000..c07bf3c
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/images/patterns.md
@@ -0,0 +1,115 @@
+# Common Patterns
+
+## URL Transform Options
+
+```
+width= height= fit=scale-down|contain|cover|crop|pad
+quality=85 format=auto|webp|avif|jpeg|png dpr=2
+gravity=auto|face|left|right|top|bottom sharpen=2 blur=10
+rotate=90|180|270 background=white metadata=none|copyright|keep
+```
+
+## Responsive Images (srcset)
+
+```html
+
+```
+
+## Format Negotiation
+
+```typescript
+async fetch(request: Request, env: Env): Promise {
+ const accept = request.headers.get('Accept') || '';
+ const format = /image\/avif/.test(accept) ? 'avif' : /image\/webp/.test(accept) ? 'webp' : 'jpeg';
+ return env.IMAGES.input(buffer).transform({ format, quality: 85 }).output().response();
+}
+```
+
+## Direct Creator Upload
+
+```typescript
+// Backend: Generate upload URL
+const response = await fetch(
+ `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/images/v2/direct_upload`,
+ { method: 'POST', headers: { 'Authorization': `Bearer ${env.API_TOKEN}` },
+ body: JSON.stringify({ requireSignedURLs: false, metadata: { userId } }) }
+);
+
+// Frontend: Upload to returned uploadURL
+const formData = new FormData();
+formData.append('file', file);
+await fetch(result.uploadURL, { method: 'POST', body: formData });
+// Use: https://imagedelivery.net/{hash}/${result.id}/public
+```
+
+## Transform & Store to R2
+
+```typescript
+async fetch(request: Request, env: Env): Promise {
+ const file = (await request.formData()).get('image') as File;
+ const transformed = await env.IMAGES
+ .input(await file.arrayBuffer())
+ .transform({ width: 800, format: 'avif', quality: 80 })
+ .output();
+ await env.R2.put(`images/${Date.now()}.avif`, transformed.response().body);
+ return Response.json({ success: true });
+}
+```
+
+## Watermarking
+
+```typescript
+const watermark = await env.ASSETS.fetch(new URL('/watermark.png', request.url));
+const result = await env.IMAGES
+ .input(await image.arrayBuffer())
+ .draw(env.IMAGES.input(watermark.body).transform({ width: 100 }), { bottom: 20, right: 20, opacity: 0.7 })
+ .transform({ format: 'avif' })
+ .output();
+return result.response();
+```
+
+## Device-Based Transforms
+
+```typescript
+const ua = request.headers.get('User-Agent') || '';
+const isMobile = /Mobile|Android|iPhone/i.test(ua);
+return env.IMAGES.input(buffer)
+ .transform({ width: isMobile ? 400 : 1200, quality: isMobile ? 75 : 85, format: 'avif' })
+ .output().response();
+```
+
+## Caching Strategy
+
+```typescript
+async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ const cache = caches.default;
+ let response = await cache.match(request);
+ if (!response) {
+ response = await env.IMAGES.input(buffer).transform({ width: 800, format: 'avif' }).output().response();
+ response = new Response(response.body, { headers: { ...response.headers, 'Cache-Control': 'public, max-age=86400' } });
+ ctx.waitUntil(cache.put(request, response.clone()));
+ }
+ return response;
+}
+```
+
+## Batch Processing
+
+```typescript
+const results = await Promise.all(images.map(buffer =>
+ env.IMAGES.input(buffer).transform({ width: 800, fit: 'cover', format: 'avif' }).output()
+));
+```
+
+## Error Handling
+
+```typescript
+try {
+ return (await env.IMAGES.input(buffer).transform({ width: 800 }).output()).response();
+} catch (error) {
+ console.error('Transform failed:', error);
+ return new Response('Image processing failed', { status: 500 });
+}
+```
diff --git a/.agents/skills/cloudflare-deploy/references/kv/README.md b/.agents/skills/cloudflare-deploy/references/kv/README.md
new file mode 100644
index 0000000..9e43e01
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/kv/README.md
@@ -0,0 +1,89 @@
+# Cloudflare Workers KV
+
+Globally-distributed, eventually-consistent key-value store optimized for high read volume and low latency.
+
+## Overview
+
+KV provides:
+- Eventual consistency (60s global propagation)
+- Read-optimized performance
+- 25 MiB value limit per key
+- Auto-replication to Cloudflare edge
+- Metadata support (1024 bytes)
+
+**Use cases:** Config storage, user sessions, feature flags, caching, A/B testing
+
+## When to Use KV
+
+| Need | Recommendation |
+|------|----------------|
+| Strong consistency | → [Durable Objects](../durable-objects/) |
+| SQL queries | → [D1](../d1/) |
+| Object storage (files) | → [R2](../r2/) |
+| High read, low write volume | → KV ✅ |
+| Sub-10ms global reads | → KV ✅ |
+
+**Quick comparison:**
+
+| Feature | KV | D1 | Durable Objects |
+|---------|----|----|-----------------|
+| Consistency | Eventual | Strong | Strong |
+| Read latency | <10ms | ~50ms | <1ms |
+| Write limit | 1/s per key | Unlimited | Unlimited |
+| Use case | Config, cache | Relational data | Coordination |
+
+## Quick Start
+
+```bash
+wrangler kv namespace create MY_NAMESPACE
+# Add binding to wrangler.jsonc
+```
+
+```typescript
+// Write
+await env.MY_KV.put("key", "value", { expirationTtl: 300 });
+
+// Read
+const value = await env.MY_KV.get("key");
+const json = await env.MY_KV.get("config", "json");
+```
+
+## Core Operations
+
+| Method | Purpose | Returns |
+|--------|---------|---------|
+| `get(key, type?)` | Single read | `string \| null` |
+| `get(keys, type?)` | Bulk read (≤100) | `Map` |
+| `put(key, value, options?)` | Write | `Promise` |
+| `delete(key)` | Delete | `Promise` |
+| `list(options?)` | List keys | `{ keys, list_complete, cursor? }` |
+| `getWithMetadata(key)` | Get + metadata | `{ value, metadata }` |
+
+## Consistency Model
+
+- **Write visibility:** Immediate in same location, ≤60s globally
+- **Read path:** Eventually consistent
+- **Write rate:** 1 write/second per key (429 on exceed)
+
+## Reading Order
+
+| Task | Files to Read |
+|------|---------------|
+| Quick start | README → configuration.md |
+| Implement feature | README → api.md → patterns.md |
+| Debug issues | gotchas.md → api.md |
+| Batch operations | api.md (bulk section) → patterns.md |
+| Performance tuning | gotchas.md (performance) → patterns.md (caching) |
+
+## In This Reference
+
+- [configuration.md](./configuration.md) - wrangler.jsonc setup, namespace creation, TypeScript types
+- [api.md](./api.md) - KV methods, bulk operations, cacheTtl, content types
+- [patterns.md](./patterns.md) - Caching, sessions, rate limiting, A/B testing
+- [gotchas.md](./gotchas.md) - Eventual consistency, concurrent writes, value limits
+
+## See Also
+
+- [workers](../workers/) - Worker runtime for KV access
+- [d1](../d1/) - Use D1 for strong consistency needs
+- [durable-objects](../durable-objects/) - Strongly consistent alternative
diff --git a/.agents/skills/cloudflare-deploy/references/kv/api.md b/.agents/skills/cloudflare-deploy/references/kv/api.md
new file mode 100644
index 0000000..35063f2
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/kv/api.md
@@ -0,0 +1,160 @@
+# KV API Reference
+
+## Read Operations
+
+```typescript
+// Single key (string)
+const value = await env.MY_KV.get("user:123");
+
+// JSON type (auto-parsed)
+const config = await env.MY_KV.get("config", "json");
+
+// ArrayBuffer for binary
+const buffer = await env.MY_KV.get("image", "arrayBuffer");
+
+// Stream for large values
+const stream = await env.MY_KV.get("large-file", "stream");
+
+// With cache TTL (min 60s)
+const value = await env.MY_KV.get("key", { type: "text", cacheTtl: 300 });
+
+// Bulk get (max 100 keys, counts as 1 operation)
+const keys = ["user:1", "user:2", "user:3", "missing:key"];
+const results = await env.MY_KV.get(keys);
+// Returns Map
+
+console.log(results.get("user:1")); // "John" (if exists)
+console.log(results.get("missing:key")); // null
+
+// Process results with null handling
+for (const [key, value] of results) {
+ if (value !== null) {
+ // Handle found keys
+ console.log(`${key}: ${value}`);
+ }
+}
+
+// TypeScript with generics (type-safe JSON parsing)
+interface UserProfile { name: string; email: string; }
+const profile = await env.USERS.get("user:123", "json");
+// profile is typed as UserProfile | null
+if (profile) {
+ console.log(profile.name); // Type-safe access
+}
+
+// Bulk get with type
+const configs = await env.MY_KV.get(["config:app", "config:feature"], "json");
+// Map
+```
+
+## Write Operations
+
+```typescript
+// Basic put
+await env.MY_KV.put("key", "value");
+await env.MY_KV.put("config", JSON.stringify({ theme: "dark" }));
+
+// With expiration (UNIX timestamp)
+await env.MY_KV.put("session", token, {
+ expiration: Math.floor(Date.now() / 1000) + 3600
+});
+
+// With TTL (seconds from now, min 60)
+await env.MY_KV.put("cache", data, { expirationTtl: 300 });
+
+// With metadata (max 1024 bytes)
+await env.MY_KV.put("user:profile", userData, {
+ metadata: { version: 2, lastUpdated: Date.now() }
+});
+
+// Combined
+await env.MY_KV.put("temp", value, {
+ expirationTtl: 3600,
+ metadata: { temporary: true }
+});
+```
+
+## Get with Metadata
+
+```typescript
+// Single key
+const result = await env.MY_KV.getWithMetadata("user:profile");
+// { value: string | null, metadata: any | null }
+
+if (result.value && result.metadata) {
+ const { version, lastUpdated } = result.metadata;
+}
+
+// Multiple keys (bulk)
+const keys = ["key1", "key2", "key3"];
+const results = await env.MY_KV.getWithMetadata(keys);
+// Returns Map
+
+for (const [key, result] of results) {
+ if (result.value) {
+ console.log(`${key}: ${result.value}`);
+ console.log(`Metadata: ${JSON.stringify(result.metadata)}`);
+ // cacheStatus field indicates cache hit/miss (when available)
+ }
+}
+
+// With type
+const result = await env.MY_KV.getWithMetadata("user:123", "json");
+// result: { value: UserData | null, metadata: any | null, cacheStatus?: string }
+```
+
+## Delete Operations
+
+```typescript
+await env.MY_KV.delete("key"); // Always succeeds (even if key missing)
+```
+
+## List Operations
+
+```typescript
+// List all
+const keys = await env.MY_KV.list();
+// { keys: [...], list_complete: boolean, cursor?: string }
+
+// With prefix
+const userKeys = await env.MY_KV.list({ prefix: "user:" });
+
+// Pagination
+let cursor: string | undefined;
+let allKeys = [];
+do {
+ const result = await env.MY_KV.list({ cursor, limit: 1000 });
+ allKeys.push(...result.keys);
+ cursor = result.cursor;
+} while (!result.list_complete);
+```
+
+## Performance Considerations
+
+### Type Selection
+
+| Type | Use Case | Performance |
+|------|----------|-------------|
+| `stream` | Large values (>1MB) | Fastest - no buffering |
+| `arrayBuffer` | Binary data | Fast - single allocation |
+| `text` | String values | Medium |
+| `json` | Objects (parse overhead) | Slowest - parsing cost |
+
+### Parallel Reads
+
+```typescript
+// Efficient parallel reads with Promise.all()
+const [user, settings, cache] = await Promise.all([
+ env.USERS.get("user:123", "json"),
+ env.SETTINGS.get("config:app", "json"),
+ env.CACHE.get("data:latest")
+]);
+```
+
+## Error Handling
+
+- **Missing keys:** Return `null` (not an error)
+- **Rate limit (429):** Retry with exponential backoff (see gotchas.md)
+- **Response too large (413):** Values >25MB fail with 413 error
+
+See [gotchas.md](./gotchas.md) for detailed error patterns and solutions.
diff --git a/.agents/skills/cloudflare-deploy/references/kv/configuration.md b/.agents/skills/cloudflare-deploy/references/kv/configuration.md
new file mode 100644
index 0000000..0aefa5f
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/kv/configuration.md
@@ -0,0 +1,144 @@
+# KV Configuration
+
+## Create Namespace
+
+```bash
+wrangler kv namespace create MY_NAMESPACE
+# Output: { binding = "MY_NAMESPACE", id = "abc123..." }
+
+wrangler kv namespace create MY_NAMESPACE --preview # For local dev
+```
+
+## Workers Binding
+
+**wrangler.jsonc:**
+```jsonc
+{
+ "kv_namespaces": [
+ {
+ "binding": "MY_KV",
+ "id": "abc123xyz789"
+ },
+ // Optional: Different namespace for preview/development
+ {
+ "binding": "MY_KV",
+ "preview_id": "preview-abc123"
+ }
+ ]
+}
+```
+
+## TypeScript Types
+
+**env.d.ts:**
+```typescript
+interface Env {
+ MY_KV: KVNamespace;
+ SESSIONS: KVNamespace;
+ CACHE: KVNamespace;
+}
+```
+
+**worker.ts:**
+```typescript
+export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ // env.MY_KV is now typed as KVNamespace
+ const value = await env.MY_KV.get("key");
+ return new Response(value || "Not found");
+ }
+} satisfies ExportedHandler;
+```
+
+**Type-safe JSON operations:**
+```typescript
+interface UserProfile {
+ name: string;
+ email: string;
+ role: "admin" | "user";
+}
+
+const profile = await env.USERS.get("user:123", "json");
+// profile: UserProfile | null (type-safe!)
+if (profile) {
+ console.log(profile.name); // TypeScript knows this is a string
+}
+```
+
+## CLI Operations
+
+```bash
+# Put
+wrangler kv key put --binding=MY_KV "key" "value"
+wrangler kv key put --binding=MY_KV "key" --path=./file.json --ttl=3600
+
+# Get
+wrangler kv key get --binding=MY_KV "key"
+
+# Delete
+wrangler kv key delete --binding=MY_KV "key"
+
+# List
+wrangler kv key list --binding=MY_KV --prefix="user:"
+
+# Bulk operations (max 10,000 keys per file)
+wrangler kv bulk put data.json --binding=MY_KV
+wrangler kv bulk get keys.json --binding=MY_KV
+wrangler kv bulk delete keys.json --binding=MY_KV --force
+```
+
+## Local Development
+
+```bash
+wrangler dev # Local KV (isolated)
+wrangler dev --remote # Remote KV (production)
+
+# Or in wrangler.jsonc:
+# "kv_namespaces": [{ "binding": "MY_KV", "id": "...", "remote": true }]
+```
+
+## REST API
+
+### Single Operations
+
+```typescript
+import Cloudflare from 'cloudflare';
+
+const client = new Cloudflare({
+ apiEmail: process.env.CLOUDFLARE_EMAIL,
+ apiKey: process.env.CLOUDFLARE_API_KEY
+});
+
+// Single key operations
+await client.kv.namespaces.values.update(namespaceId, 'key', {
+ account_id: accountId,
+ value: 'value',
+ expiration_ttl: 3600
+});
+```
+
+### Bulk Operations
+
+```typescript
+// Bulk update (up to 10,000 keys, max 100MB total)
+await client.kv.namespaces.bulkUpdate(namespaceId, {
+ account_id: accountId,
+ body: [
+ { key: "key1", value: "value1", expiration_ttl: 3600 },
+ { key: "key2", value: "value2", metadata: { version: 1 } },
+ { key: "key3", value: "value3" }
+ ]
+});
+
+// Bulk get (up to 100 keys)
+const results = await client.kv.namespaces.bulkGet(namespaceId, {
+ account_id: accountId,
+ keys: ["key1", "key2", "key3"]
+});
+
+// Bulk delete (up to 10,000 keys)
+await client.kv.namespaces.bulkDelete(namespaceId, {
+ account_id: accountId,
+ keys: ["key1", "key2", "key3"]
+});
+```
diff --git a/.agents/skills/cloudflare-deploy/references/kv/gotchas.md b/.agents/skills/cloudflare-deploy/references/kv/gotchas.md
new file mode 100644
index 0000000..5ad3213
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/kv/gotchas.md
@@ -0,0 +1,131 @@
+# KV Gotchas & Troubleshooting
+
+## Common Errors
+
+### "Stale Read After Write"
+
+**Cause:** Eventual consistency means writes may not be immediately visible in other regions
+**Solution:** Don't read immediately after write; return confirmation without reading or use the local value you just wrote. Writes visible immediately in same location, ≤60s globally
+
+```typescript
+// ❌ BAD: Read immediately after write
+await env.KV.put("key", "value");
+const value = await env.KV.get("key"); // May be null in other regions!
+
+// ✅ GOOD: Use the value you just wrote
+const newValue = "value";
+await env.KV.put("key", newValue);
+return new Response(newValue); // Don't re-read
+```
+
+### "429 Rate Limit on Concurrent Writes"
+
+**Cause:** Multiple concurrent writes to same key exceeding 1 write/second limit
+**Solution:** Use sequential writes, unique keys for concurrent operations, or implement retry with exponential backoff
+
+```typescript
+async function putWithRetry(
+ kv: KVNamespace,
+ key: string,
+ value: string,
+ maxAttempts = 5
+): Promise {
+ let delay = 1000;
+ for (let i = 0; i < maxAttempts; i++) {
+ try {
+ await kv.put(key, value);
+ return;
+ } catch (err) {
+ if (err instanceof Error && err.message.includes("429")) {
+ if (i === maxAttempts - 1) throw err;
+ await new Promise(r => setTimeout(r, delay));
+ delay *= 2; // Exponential backoff
+ } else {
+ throw err;
+ }
+ }
+ }
+}
+```
+
+### "Inefficient Multiple Gets"
+
+**Cause:** Making multiple individual get() calls instead of bulk operation
+**Solution:** Use bulk get with array of keys: `env.USERS.get(["user:1", "user:2", "user:3"])` to reduce to 1 operation
+
+### "Null Reference Error"
+
+**Cause:** Attempting to use value without checking for null when key doesn't exist
+**Solution:** Always handle null returns - KV returns `null` for missing keys, not undefined
+
+```typescript
+// ❌ BAD: Assumes value exists
+const config = await env.KV.get("config", "json");
+return config.theme; // TypeError if null!
+
+// ✅ GOOD: Null checks
+const config = await env.KV.get("config", "json");
+return config?.theme ?? "default";
+
+// ✅ GOOD: Early return
+const config = await env.KV.get("config", "json");
+if (!config) return new Response("Not found", { status: 404 });
+return new Response(config.theme);
+```
+
+### "Negative Lookup Caching"
+
+**Cause:** Keys that don't exist are cached as "not found" for up to 60s
+**Solution:** Creating a key after checking won't be visible until cache expires
+
+```typescript
+// Check → create pattern has race condition
+const exists = await env.KV.get("key"); // null, cached as "not found"
+if (!exists) {
+ await env.KV.put("key", "value");
+ // Next get() may still return null for ~60s due to negative cache
+}
+
+// Alternative: Always assume key may not exist, use defaults
+const value = await env.KV.get("key") ?? "default-value";
+```
+
+## Performance Tips
+
+| Scenario | Recommendation | Why |
+|----------|----------------|-----|
+| Large values (>1MB) | Use `stream` type | Avoids buffering entire value in memory |
+| Many small keys | Coalesce into one JSON object | Reduces operations, improves cache hit rate |
+| High write volume | Spread across different keys | Avoid 1 write/second per-key limit |
+| Cold reads | Increase `cacheTtl` parameter | Reduces latency for frequently-read data |
+| Bulk operations | Use array form of get() | Single operation, better performance |
+
+## Cost Examples
+
+**Free tier:**
+- 100K reads/day = 3M/month ✅
+- 1K writes/day = 30K/month ✅
+- 1GB storage ✅
+
+**Example paid workload:**
+- 10M reads/month = $5.00
+- 100K writes/month = $0.50
+- 1GB storage = $0.50
+- **Total: ~$6/month**
+
+## Limits
+
+| Limit | Value | Notes |
+|-------|-------|-------|
+| Key size | 512 bytes | Maximum key length |
+| Value size | 25 MiB | Maximum value; 413 error if exceeded |
+| Metadata size | 1024 bytes | Maximum metadata per key |
+| cacheTtl minimum | 60s | Minimum cache TTL |
+| Write rate per key | 1 write/second | All plans; 429 error if exceeded |
+| Propagation time | ≤60s | Global propagation time |
+| Bulk get max | 100 keys | Maximum keys per bulk operation |
+| Operations per Worker | 1,000 | Per request (bulk counts as 1) |
+| Reads pricing | $0.50 per 10M | Per million reads |
+| Writes pricing | $5.00 per 1M | Per million writes |
+| Deletes pricing | $5.00 per 1M | Per million deletes |
+| Storage pricing | $0.50 per GB-month | Per GB per month |
diff --git a/.agents/skills/cloudflare-deploy/references/kv/patterns.md b/.agents/skills/cloudflare-deploy/references/kv/patterns.md
new file mode 100644
index 0000000..8386074
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/kv/patterns.md
@@ -0,0 +1,196 @@
+# KV Patterns & Best Practices
+
+## Multi-Tier Caching
+
+```typescript
+// Memory → KV → Origin (3-tier cache)
+const memoryCache = new Map();
+
+async function getCached(env: Env, key: string): Promise {
+ const now = Date.now();
+
+ // L1: Memory cache (fastest)
+ const cached = memoryCache.get(key);
+ if (cached && cached.expires > now) {
+ return cached.data;
+ }
+
+ // L2: KV cache (fast)
+ const kvValue = await env.CACHE.get(key, "json");
+ if (kvValue) {
+ memoryCache.set(key, { data: kvValue, expires: now + 60000 }); // 1min in memory
+ return kvValue;
+ }
+
+ // L3: Origin (slow)
+ const origin = await fetch(`https://api.example.com/${key}`).then(r => r.json());
+
+ // Backfill caches
+ await env.CACHE.put(key, JSON.stringify(origin), { expirationTtl: 300 }); // 5min in KV
+ memoryCache.set(key, { data: origin, expires: now + 60000 });
+
+ return origin;
+}
+```
+
+## API Response Caching
+
+```typescript
+async function getCachedData(env: Env, key: string, fetcher: () => Promise): Promise {
+ const cached = await env.MY_KV.get(key, "json");
+ if (cached) return cached;
+
+ const data = await fetcher();
+ await env.MY_KV.put(key, JSON.stringify(data), { expirationTtl: 300 });
+ return data;
+}
+
+const apiData = await getCachedData(
+ env,
+ "cache:users",
+ () => fetch("https://api.example.com/users").then(r => r.json())
+);
+```
+
+## Session Management
+
+```typescript
+interface Session { userId: string; expiresAt: number; }
+
+async function createSession(env: Env, userId: string): Promise {
+ const sessionId = crypto.randomUUID();
+ const expiresAt = Date.now() + (24 * 60 * 60 * 1000);
+
+ await env.SESSIONS.put(
+ `session:${sessionId}`,
+ JSON.stringify({ userId, expiresAt }),
+ { expirationTtl: 86400, metadata: { createdAt: Date.now() } }
+ );
+
+ return sessionId;
+}
+
+async function getSession(env: Env, sessionId: string): Promise {
+ const data = await env.SESSIONS.get(`session:${sessionId}`, "json");
+ if (!data || data.expiresAt < Date.now()) return null;
+ return data;
+}
+```
+
+## Coalesce Cold Keys
+
+```typescript
+// ❌ BAD: Many individual keys
+await env.KV.put("user:123:name", "John");
+await env.KV.put("user:123:email", "john@example.com");
+
+// ✅ GOOD: Single coalesced object
+await env.USERS.put("user:123:profile", JSON.stringify({
+ name: "John",
+ email: "john@example.com",
+ role: "admin"
+}));
+
+// Benefits: Hot key cache, single read, reduced operations
+// Trade-off: Harder to update individual fields
+```
+
+## Prefix-Based Namespacing
+
+```typescript
+// Logical partitioning within single namespace
+const PREFIXES = {
+ users: "user:",
+ sessions: "session:",
+ cache: "cache:",
+ features: "feature:"
+} as const;
+
+// Write with prefix
+async function setUser(env: Env, id: string, data: any) {
+ await env.KV.put(`${PREFIXES.users}${id}`, JSON.stringify(data));
+}
+
+// Read with prefix
+async function getUser(env: Env, id: string) {
+ return await env.KV.get(`${PREFIXES.users}${id}`, "json");
+}
+
+// List by prefix
+async function listUserIds(env: Env): Promise {
+ const result = await env.KV.list({ prefix: PREFIXES.users });
+ return result.keys.map(k => k.name.replace(PREFIXES.users, ""));
+}
+
+// Example hierarchy
+"user:123:profile"
+"user:123:settings"
+"cache:api:users"
+"session:abc-def"
+"feature:flags:beta"
+```
+
+## Metadata Versioning
+
+```typescript
+interface VersionedData {
+ version: number;
+ data: any;
+}
+
+async function migrateIfNeeded(env: Env, key: string) {
+ const result = await env.DATA.getWithMetadata(key, "json");
+
+ if (!result.value) return null;
+
+ const currentVersion = result.metadata?.version || 1;
+ const targetVersion = 2;
+
+ if (currentVersion < targetVersion) {
+ // Migrate data format
+ const migrated = migrate(result.value, currentVersion, targetVersion);
+
+ // Store with new version
+ await env.DATA.put(key, JSON.stringify(migrated), {
+ metadata: { version: targetVersion, migratedAt: Date.now() }
+ });
+
+ return migrated;
+ }
+
+ return result.value;
+}
+
+function migrate(data: any, from: number, to: number): any {
+ if (from === 1 && to === 2) {
+ // V1 → V2: Rename field
+ return { ...data, userName: data.name };
+ }
+ return data;
+}
+```
+
+## Error Boundary Pattern
+
+```typescript
+// Resilient get with fallback
+async function resilientGet(
+ env: Env,
+ key: string,
+ fallback: T
+): Promise {
+ try {
+ const value = await env.KV.get(key, "json");
+ return value ?? fallback;
+ } catch (err) {
+ console.error(`KV error for ${key}:`, err);
+ return fallback;
+ }
+}
+
+// Usage
+const config = await resilientGet(env, "config:app", {
+ theme: "light",
+ maxItems: 10
+});
+```
diff --git a/.agents/skills/cloudflare-deploy/references/miniflare/README.md b/.agents/skills/cloudflare-deploy/references/miniflare/README.md
new file mode 100644
index 0000000..82baf7c
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/miniflare/README.md
@@ -0,0 +1,105 @@
+# Miniflare
+
+Local simulator for Cloudflare Workers development/testing. Runs Workers in workerd sandbox implementing runtime APIs - no internet required.
+
+## Features
+
+- Full-featured: KV, Durable Objects, R2, D1, WebSockets, Queues
+- Fully-local: test without internet, instant reload
+- TypeScript-native: detailed logging, source maps
+- Advanced testing: dispatch events without HTTP, simulate Worker connections
+
+## When to Use
+
+**Decision tree for testing Workers:**
+
+```
+Need to test Workers?
+│
+├─ Unit tests for business logic only?
+│ └─ getPlatformProxy (Vitest/Jest) → [patterns.md](./patterns.md#getplatformproxy)
+│ Fast, no HTTP, direct binding access
+│
+├─ Integration tests with full runtime?
+│ ├─ Single Worker?
+│ │ └─ Miniflare API → [Quick Start](#quick-start)
+│ │ Full control, programmatic access
+│ │
+│ ├─ Multiple Workers + service bindings?
+│ │ └─ Miniflare workers array → [configuration.md](./configuration.md#multiple-workers)
+│ │ Shared storage, inter-worker calls
+│ │
+│ └─ Vitest test runner integration?
+│ └─ vitest-pool-workers → [patterns.md](./patterns.md#vitest-pool-workers)
+│ Full Workers env in Vitest
+│
+└─ Local dev server?
+ └─ wrangler dev (not Miniflare)
+ Hot reload, automatic config
+```
+
+**Use Miniflare for:**
+- Integration tests with full Worker runtime
+- Testing bindings/storage locally
+- Multiple Workers with service bindings
+- Programmatic event dispatch (fetch, queue, scheduled)
+
+**Use getPlatformProxy for:**
+- Fast unit tests of business logic
+- Testing without HTTP overhead
+- Vitest/Jest environments
+
+**Use Wrangler for:**
+- Local development workflow
+- Production deployments
+
+## Setup
+
+```bash
+npm i -D miniflare
+```
+
+Requires ES modules in `package.json`:
+```json
+{"type": "module"}
+```
+
+## Quick Start
+
+```js
+import { Miniflare } from "miniflare";
+
+const mf = new Miniflare({
+ modules: true,
+ script: `
+ export default {
+ async fetch(request, env, ctx) {
+ return new Response("Hello Miniflare!");
+ }
+ }
+ `,
+});
+
+const res = await mf.dispatchFetch("http://localhost:8787/");
+console.log(await res.text()); // Hello Miniflare!
+await mf.dispose();
+```
+
+## Reading Order
+
+**New to Miniflare?** Start here:
+1. [Quick Start](#quick-start) - Running in 2 minutes
+2. [When to Use](#when-to-use) - Choose your testing approach
+3. [patterns.md](./patterns.md) - Testing patterns (getPlatformProxy, Vitest, node:test)
+4. [configuration.md](./configuration.md) - Configure bindings, storage, multiple workers
+
+**Troubleshooting:**
+- [gotchas.md](./gotchas.md) - Common errors and debugging
+
+**API reference:**
+- [api.md](./api.md) - Complete method reference
+
+## See Also
+- [wrangler](../wrangler/) - CLI tool that embeds Miniflare for `wrangler dev`
+- [workerd](../workerd/) - Runtime that powers Miniflare
+- [workers](../workers/) - Workers runtime API documentation
diff --git a/.agents/skills/cloudflare-deploy/references/miniflare/api.md b/.agents/skills/cloudflare-deploy/references/miniflare/api.md
new file mode 100644
index 0000000..e4df4d7
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/miniflare/api.md
@@ -0,0 +1,187 @@
+# Programmatic API
+
+## Miniflare Class
+
+```typescript
+class Miniflare {
+ constructor(options: MiniflareOptions);
+
+ // Lifecycle
+ ready: Promise; // Resolves when server ready, returns URL
+ dispose(): Promise; // Cleanup resources
+ setOptions(options: MiniflareOptions): Promise; // Reload config
+
+ // Event dispatching
+ dispatchFetch(url: string | URL | Request, init?: RequestInit): Promise;
+ getWorker(name?: string): Promise;
+
+ // Bindings access
+ getBindings>(name?: string): Promise;
+ getCf(name?: string): Promise;
+ getKVNamespace(name: string): Promise;
+ getR2Bucket(name: string): Promise;
+ getDurableObjectNamespace(name: string): Promise;
+ getDurableObjectStorage(id: DurableObjectId): Promise;
+ getD1Database(name: string): Promise;
+ getCaches(): Promise;
+ getQueueProducer(name: string): Promise;
+
+ // Debugging
+ getInspectorURL(): Promise; // Chrome DevTools inspector URL
+}
+```
+
+## Event Dispatching
+
+**Fetch (no HTTP server):**
+```js
+const res = await mf.dispatchFetch("http://localhost:8787/path", {
+ method: "POST",
+ headers: { "Authorization": "Bearer token" },
+ body: JSON.stringify({ data: "value" }),
+});
+```
+
+**Custom Host routing:**
+```js
+const res = await mf.dispatchFetch("http://localhost:8787/", {
+ headers: { "Host": "api.example.com" },
+});
+```
+
+**Scheduled:**
+```js
+const worker = await mf.getWorker();
+const result = await worker.scheduled({ cron: "30 * * * *" });
+// result: { outcome: "ok", noRetry: false }
+```
+
+**Queue:**
+```js
+const worker = await mf.getWorker();
+const result = await worker.queue("queue-name", [
+ { id: "msg1", timestamp: new Date(), body: "data", attempts: 1 },
+]);
+// result: { outcome: "ok", retryAll: false, ackAll: false, ... }
+```
+
+## Bindings Access
+
+**Environment variables:**
+```js
+// Basic usage
+const bindings = await mf.getBindings();
+console.log(bindings.SECRET_KEY);
+
+// With type safety (recommended):
+interface Env {
+ SECRET_KEY: string;
+ API_URL: string;
+ KV: KVNamespace;
+}
+const env = await mf.getBindings();
+env.SECRET_KEY; // string (typed!)
+env.KV.get("key"); // KVNamespace methods available
+```
+
+**Request.cf object:**
+```js
+const cf = await mf.getCf();
+console.log(cf?.colo); // "DFW"
+console.log(cf?.country); // "US"
+```
+
+**KV:**
+```js
+const ns = await mf.getKVNamespace("TEST_NAMESPACE");
+await ns.put("key", "value");
+const value = await ns.get("key");
+```
+
+**R2:**
+```js
+const bucket = await mf.getR2Bucket("BUCKET");
+await bucket.put("file.txt", "content");
+const object = await bucket.get("file.txt");
+```
+
+**Durable Objects:**
+```js
+const ns = await mf.getDurableObjectNamespace("COUNTER");
+const id = ns.idFromName("test");
+const stub = ns.get(id);
+const res = await stub.fetch("http://localhost/");
+
+// Access storage directly:
+const storage = await mf.getDurableObjectStorage(id);
+await storage.put("key", "value");
+```
+
+**D1:**
+```js
+const db = await mf.getD1Database("DB");
+await db.exec(`CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)`);
+await db.prepare("INSERT INTO users (name) VALUES (?)").bind("Alice").run();
+```
+
+**Cache:**
+```js
+const caches = await mf.getCaches();
+const defaultCache = caches.default;
+await defaultCache.put("http://example.com", new Response("cached"));
+```
+
+**Queue producer:**
+```js
+const producer = await mf.getQueueProducer("QUEUE");
+await producer.send({ body: "message data" });
+```
+
+## Lifecycle
+
+**Reload:**
+```js
+await mf.setOptions({
+ scriptPath: "worker.js",
+ bindings: { VERSION: "2.0" },
+});
+```
+
+**Watch (manual):**
+```js
+import { watch } from "fs";
+
+const config = { scriptPath: "worker.js" };
+const mf = new Miniflare(config);
+
+watch("worker.js", async () => {
+ console.log("Reloading...");
+ await mf.setOptions(config);
+});
+```
+
+**Cleanup:**
+```js
+await mf.dispose();
+```
+
+## Debugging
+
+**Inspector URL for DevTools:**
+```js
+const url = await mf.getInspectorURL();
+console.log(`DevTools: ${url}`);
+// Open in Chrome DevTools for breakpoints, profiling
+```
+
+**Wait for server ready:**
+```js
+const mf = new Miniflare({ scriptPath: "worker.js" });
+const url = await mf.ready; // Promise
+console.log(`Server running at ${url}`); // http://127.0.0.1:8787
+
+// Note: dispatchFetch() waits automatically, no need to await ready
+const res = await mf.dispatchFetch("http://localhost/"); // Works immediately
+```
+
+See [configuration.md](./configuration.md) for all constructor options.
diff --git a/.agents/skills/cloudflare-deploy/references/miniflare/configuration.md b/.agents/skills/cloudflare-deploy/references/miniflare/configuration.md
new file mode 100644
index 0000000..b269b24
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/miniflare/configuration.md
@@ -0,0 +1,173 @@
+# Configuration
+
+## Script Loading
+
+```js
+// Inline
+new Miniflare({ modules: true, script: `export default { ... }` });
+
+// File-based
+new Miniflare({ scriptPath: "worker.js" });
+
+// Multi-module
+new Miniflare({
+ scriptPath: "src/index.js",
+ modules: true,
+ modulesRules: [
+ { type: "ESModule", include: ["**/*.js"] },
+ { type: "Text", include: ["**/*.txt"] },
+ ],
+});
+```
+
+## Compatibility
+
+```js
+new Miniflare({
+ compatibilityDate: "2026-01-01", // Use recent date for latest features
+ compatibilityFlags: [
+ "nodejs_compat", // Node.js APIs (process, Buffer, etc)
+ "streams_enable_constructors", // Stream constructors
+ ],
+ upstream: "https://example.com", // Fallback for unhandled requests
+});
+```
+
+**Critical:** Use `compatibilityDate: "2026-01-01"` or latest to match production runtime. Old dates limit available APIs.
+
+## HTTP Server & Request.cf
+
+```js
+new Miniflare({
+ port: 8787, // Default: 8787
+ host: "127.0.0.1",
+ https: true, // Self-signed cert
+ liveReload: true, // Auto-reload HTML
+
+ cf: true, // Fetch live Request.cf data (cached)
+ // cf: "./cf.json", // Or load from file
+ // cf: { colo: "DFW" }, // Or inline mock
+});
+```
+
+**Note:** For tests, use `dispatchFetch()` (no port conflicts).
+
+## Storage Bindings
+
+```js
+new Miniflare({
+ // KV
+ kvNamespaces: ["TEST_NAMESPACE", "CACHE"],
+ kvPersist: "./kv-data", // Optional: persist to disk
+
+ // R2
+ r2Buckets: ["BUCKET", "IMAGES"],
+ r2Persist: "./r2-data",
+
+ // Durable Objects
+ modules: true,
+ durableObjects: {
+ COUNTER: "Counter", // className
+ API_OBJECT: { className: "ApiObject", scriptName: "api-worker" },
+ },
+ durableObjectsPersist: "./do-data",
+
+ // D1
+ d1Databases: ["DB"],
+ d1Persist: "./d1-data",
+
+ // Cache
+ cache: true, // Default
+ cachePersist: "./cache-data",
+});
+```
+
+## Bindings
+
+```js
+new Miniflare({
+ // Environment variables
+ bindings: {
+ SECRET_KEY: "my-secret-value",
+ API_URL: "https://api.example.com",
+ DEBUG: true,
+ },
+
+ // Other bindings
+ wasmBindings: { ADD_MODULE: "./add.wasm" },
+ textBlobBindings: { TEXT: "./data.txt" },
+ queueProducers: ["QUEUE"],
+});
+```
+
+## Multiple Workers
+
+```js
+new Miniflare({
+ workers: [
+ {
+ name: "main",
+ kvNamespaces: { DATA: "shared" },
+ serviceBindings: { API: "api-worker" },
+ script: `export default { ... }`,
+ },
+ {
+ name: "api-worker",
+ kvNamespaces: { DATA: "shared" }, // Shared storage
+ script: `export default { ... }`,
+ },
+ ],
+});
+```
+
+**With routing:**
+```js
+workers: [
+ { name: "api", scriptPath: "./api.js", routes: ["api.example.com/*"] },
+ { name: "web", scriptPath: "./web.js", routes: ["example.com/*"] },
+],
+```
+
+## Logging & Performance
+
+```js
+import { Log, LogLevel } from "miniflare";
+
+new Miniflare({
+ log: new Log(LogLevel.DEBUG), // DEBUG | INFO | WARN | ERROR | NONE
+ scriptTimeout: 30000, // CPU limit (ms)
+ workersConcurrencyLimit: 10, // Max concurrent workers
+});
+```
+
+## Workers Sites
+
+```js
+new Miniflare({
+ sitePath: "./public",
+ siteInclude: ["**/*.html", "**/*.css"],
+ siteExclude: ["**/*.map"],
+});
+```
+
+## From wrangler.toml
+
+Miniflare doesn't auto-read `wrangler.toml`:
+
+```toml
+# wrangler.toml
+name = "my-worker"
+main = "src/index.ts"
+compatibility_date = "2026-01-01"
+[[kv_namespaces]]
+binding = "KV"
+```
+
+```js
+// Miniflare equivalent
+new Miniflare({
+ scriptPath: "src/index.ts",
+ compatibilityDate: "2026-01-01",
+ kvNamespaces: ["KV"],
+});
+```
diff --git a/.agents/skills/cloudflare-deploy/references/miniflare/gotchas.md b/.agents/skills/cloudflare-deploy/references/miniflare/gotchas.md
new file mode 100644
index 0000000..dfcd157
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/miniflare/gotchas.md
@@ -0,0 +1,160 @@
+# Gotchas & Troubleshooting
+
+## Miniflare Limitations
+
+**Not supported:**
+- Analytics Engine (use mocks)
+- Cloudflare Images/Stream
+- Browser Rendering API
+- Tail Workers
+- Workers for Platforms (partial support)
+
+**Behavior differences from production:**
+- Runs workerd locally, not Cloudflare edge
+- Storage is local (filesystem/memory), not distributed
+- `Request.cf` is cached/mocked, not real edge data
+- Performance differs from edge
+- Caching implementation may vary slightly
+
+## Common Errors
+
+### "Cannot find module"
+**Cause:** Module path wrong or `modulesRules` not configured
+**Solution:**
+```js
+new Miniflare({
+ modules: true,
+ modulesRules: [{ type: "ESModule", include: ["**/*.js"] }],
+});
+```
+
+### "Data not persisting"
+**Cause:** Persist paths are files, not directories
+**Solution:**
+```js
+kvPersist: "./data/kv", // Directory, not file
+```
+
+### "Cannot run TypeScript"
+**Cause:** Miniflare doesn't transpile TypeScript
+**Solution:** Build first with esbuild/tsc, then run compiled JS
+
+### "`request.cf` is undefined"
+**Cause:** CF data not configured
+**Solution:**
+```js
+new Miniflare({ cf: true }); // Or cf: "./cf.json"
+```
+
+### "EADDRINUSE" port conflict
+**Cause:** Multiple instances using same port
+**Solution:** Use `dispatchFetch()` (no HTTP server) or `port: 0` for auto-assign
+
+### "Durable Object not found"
+**Cause:** Class export doesn't match config name
+**Solution:**
+```js
+export class Counter {} // Must match
+new Miniflare({ durableObjects: { COUNTER: "Counter" } });
+```
+
+## Debugging
+
+**Enable verbose logging:**
+```js
+import { Log, LogLevel } from "miniflare";
+new Miniflare({ log: new Log(LogLevel.DEBUG) });
+```
+
+**Chrome DevTools:**
+```js
+const url = await mf.getInspectorURL();
+console.log(`DevTools: ${url}`); // Open in Chrome
+```
+
+**Inspect bindings:**
+```js
+const env = await mf.getBindings();
+console.log(Object.keys(env));
+```
+
+**Verify storage:**
+```js
+const ns = await mf.getKVNamespace("TEST");
+const { keys } = await ns.list();
+```
+
+## Best Practices
+
+**✓ Do:**
+- Use `dispatchFetch()` for tests (no HTTP server)
+- In-memory storage for CI (omit persist options)
+- New instances per test for isolation
+- Type-safe bindings with interfaces
+- `await mf.dispose()` in cleanup
+
+**✗ Avoid:**
+- HTTP server in tests
+- Shared instances without cleanup
+- Old compatibility dates (use 2026+)
+
+## Migration Guides
+
+### From Miniflare 2.x to 3+
+
+Breaking changes in v3+:
+
+| v2 | v3+ |
+|----|-----|
+| `getBindings()` sync | `getBindings()` returns Promise |
+| `ready` is void | `ready` returns `Promise` |
+| service-worker-mock | Built on workerd |
+| Different options | Restructured constructor |
+
+**Example migration:**
+```js
+// v2
+const bindings = mf.getBindings();
+mf.ready; // void
+
+// v3+
+const bindings = await mf.getBindings();
+const url = await mf.ready; // Promise
+```
+
+### From unstable_dev to Miniflare
+
+```js
+// Old (deprecated)
+import { unstable_dev } from "wrangler";
+const worker = await unstable_dev("src/index.ts");
+
+// New
+import { Miniflare } from "miniflare";
+const mf = new Miniflare({ scriptPath: "src/index.ts" });
+```
+
+### From Wrangler Dev
+
+Miniflare doesn't auto-read `wrangler.toml`:
+
+```js
+// Translate manually:
+new Miniflare({
+ scriptPath: "dist/worker.js",
+ compatibilityDate: "2026-01-01",
+ kvNamespaces: ["KV"],
+ bindings: { API_KEY: process.env.API_KEY },
+});
+```
+
+## Resource Limits
+
+| Limit | Value | Notes |
+|-------|-------|-------|
+| CPU time | 30s default | Configurable via `scriptTimeout` |
+| Storage | Filesystem | Performance varies by disk |
+| Memory | System dependent | No artificial limits |
+| Request.cf | Cached/mocked | Not live edge data |
+
+See [patterns.md](./patterns.md) for testing examples.
diff --git a/.agents/skills/cloudflare-deploy/references/miniflare/patterns.md b/.agents/skills/cloudflare-deploy/references/miniflare/patterns.md
new file mode 100644
index 0000000..c89c3a5
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/miniflare/patterns.md
@@ -0,0 +1,181 @@
+# Testing Patterns
+
+## Choosing a Testing Approach
+
+| Approach | Use Case | Speed | Setup | Runtime |
+|----------|----------|-------|-------|---------|
+| **getPlatformProxy** | Unit tests, logic testing | Fast | Low | Miniflare |
+| **Miniflare API** | Integration tests, full control | Medium | Medium | Miniflare |
+| **vitest-pool-workers** | Vitest runner integration | Medium | Medium | workerd |
+
+**Quick guide:**
+- Unit tests → getPlatformProxy
+- Integration tests → Miniflare API
+- Vitest workflows → vitest-pool-workers
+
+## getPlatformProxy
+
+Lightweight unit testing - provides bindings without full Worker runtime.
+
+```js
+// vitest.config.js
+export default { test: { environment: "node" } };
+```
+
+```js
+import { env } from "cloudflare:test";
+import { describe, it, expect } from "vitest";
+
+describe("Business logic", () => {
+ it("processes data with KV", async () => {
+ await env.KV.put("test", "value");
+ expect(await env.KV.get("test")).toBe("value");
+ });
+});
+```
+
+**Pros:** Fast, simple
+**Cons:** No full runtime, can't test fetch handler
+
+## vitest-pool-workers
+
+Full Workers runtime in Vitest. Reads `wrangler.toml`.
+
+```bash
+npm i -D @cloudflare/vitest-pool-workers
+```
+
+```js
+// vitest.config.js
+import { defineWorkersConfig } from "@cloudflare/vitest-pool-workers/config";
+
+export default defineWorkersConfig({
+ test: {
+ poolOptions: { workers: { wrangler: { configPath: "./wrangler.toml" } } },
+ },
+});
+```
+
+```js
+import { env, SELF } from "cloudflare:test";
+import { it, expect } from "vitest";
+
+it("handles fetch", async () => {
+ const res = await SELF.fetch("http://example.com/");
+ expect(res.status).toBe(200);
+});
+```
+
+**Pros:** Full runtime, uses wrangler.toml
+**Cons:** Requires Wrangler config
+
+## Miniflare API (node:test)
+
+```js
+import assert from "node:assert";
+import test, { after, before } from "node:test";
+import { Miniflare } from "miniflare";
+
+let mf;
+before(() => {
+ mf = new Miniflare({ scriptPath: "src/index.js", kvNamespaces: ["TEST_KV"] });
+});
+
+test("fetch", async () => {
+ const res = await mf.dispatchFetch("http://localhost/");
+ assert.strictEqual(await res.text(), "Hello");
+});
+
+after(() => mf.dispose());
+```
+
+## Testing Durable Objects & Events
+
+```js
+// Durable Objects
+const ns = await mf.getDurableObjectNamespace("COUNTER");
+const stub = ns.get(ns.idFromName("test-counter"));
+await stub.fetch("http://localhost/increment");
+
+// Direct storage
+const storage = await mf.getDurableObjectStorage(ns.idFromName("test-counter"));
+const count = await storage.get("count");
+
+// Queue
+const worker = await mf.getWorker();
+await worker.queue("my-queue", [
+ { id: "msg1", timestamp: new Date(), body: { userId: 123 }, attempts: 1 },
+]);
+
+// Scheduled
+await worker.scheduled({ cron: "0 0 * * *" });
+```
+
+## Test Isolation & Mocking
+
+```js
+// Per-test isolation
+beforeEach(() => { mf = new Miniflare({ kvNamespaces: ["TEST"] }); });
+afterEach(() => mf.dispose());
+
+// Mock external APIs
+new Miniflare({
+ workers: [
+ { name: "main", serviceBindings: { API: "mock-api" }, script: `...` },
+ { name: "mock-api", script: `export default { async fetch() { return Response.json({mock: true}); } }` },
+ ],
+});
+```
+
+## Type Safety
+
+```ts
+import type { KVNamespace } from "@cloudflare/workers-types";
+
+interface Env {
+ KV: KVNamespace;
+ API_KEY: string;
+}
+
+const env = await mf.getBindings();
+await env.KV.put("key", "value"); // Typed!
+
+export default {
+ async fetch(req: Request, env: Env) {
+ return new Response(await env.KV.get("key"));
+ }
+} satisfies ExportedHandler;
+```
+
+## WebSocket Testing
+
+```js
+const res = await mf.dispatchFetch("http://localhost/ws", {
+ headers: { Upgrade: "websocket" },
+});
+assert.strictEqual(res.status, 101);
+```
+
+## Migration from unstable_dev
+
+```js
+// Old (deprecated)
+import { unstable_dev } from "wrangler";
+const worker = await unstable_dev("src/index.ts");
+
+// New
+import { Miniflare } from "miniflare";
+const mf = new Miniflare({ scriptPath: "src/index.ts" });
+```
+
+## CI/CD Tips
+
+```js
+// In-memory storage (faster)
+new Miniflare({ kvNamespaces: ["TEST"] }); // No persist = in-memory
+
+// Use dispatchFetch (no port conflicts)
+await mf.dispatchFetch("http://localhost/");
+```
+
+See [gotchas.md](./gotchas.md) for troubleshooting.
diff --git a/.agents/skills/cloudflare-deploy/references/network-interconnect/README.md b/.agents/skills/cloudflare-deploy/references/network-interconnect/README.md
new file mode 100644
index 0000000..e337f1b
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/network-interconnect/README.md
@@ -0,0 +1,99 @@
+# Cloudflare Network Interconnect (CNI)
+
+Private, high-performance connectivity to Cloudflare's network. **Enterprise-only**.
+
+## Connection Types
+
+**Direct**: Physical fiber in shared datacenter. 10/100 Gbps. You order cross-connect.
+
+**Partner**: Virtual via Console Connect, Equinix, Megaport, etc. Managed via partner SDN.
+
+**Cloud**: AWS Direct Connect or GCP Cloud Interconnect. Magic WAN only.
+
+## Dataplane Versions
+
+**v1 (Classic)**: GRE tunnel support, VLAN/BFD/LACP, asymmetric MTU (1500↓/1476↑), peering support.
+
+**v2 (Beta)**: No GRE, 1500 MTU both ways, no VLAN/BFD/LACP yet, ECMP instead.
+
+## Use Cases
+
+- **Magic Transit DSR**: DDoS protection, egress via ISP (v1/v2)
+- **Magic Transit + Egress**: DDoS + egress via CF (v1/v2)
+- **Magic WAN + Zero Trust**: Private backbone (v1 needs GRE, v2 native)
+- **Peering**: Public routes at PoP (v1 only)
+- **App Security**: WAF/Cache/LB (v1/v2 over Magic Transit)
+
+## Prerequisites
+
+- Enterprise plan
+- IPv4 /24+ or IPv6 /48+ prefixes
+- BGP ASN for v1
+- See [locations PDF](https://developers.cloudflare.com/network-interconnect/static/cni-locations-2026-01.pdf)
+
+## Specs
+
+- /31 point-to-point subnets
+- 10km max optical distance
+- 10G: 10GBASE-LR single-mode
+- 100G: 100GBASE-LR4 single-mode
+- **No SLA** (free service)
+- Backup Internet required
+
+## Throughput
+
+| Direction | 10G | 100G |
+|-----------|-----|------|
+| CF → Customer | 10 Gbps | 100 Gbps |
+| Customer → CF (peering) | 10 Gbps | 100 Gbps |
+| Customer → CF (Magic) | 1 Gbps/tunnel or CNI | 1 Gbps/tunnel or CNI |
+
+## Timeline
+
+2-4 weeks typical. Steps: request → config review → order connection → configure → test → enable health checks → activate → monitor.
+
+## In This Reference
+- [configuration.md](./configuration.md) - BGP, routing, setup
+- [api.md](./api.md) - API endpoints, SDKs
+- [patterns.md](./patterns.md) - HA, hybrid cloud, failover
+- [gotchas.md](./gotchas.md) - Troubleshooting, limits
+
+## Reading Order by Task
+
+| Task | Files to Load |
+|------|---------------|
+| Initial setup | README → configuration.md → api.md |
+| Create interconnect via API | api.md → gotchas.md |
+| Design HA architecture | patterns.md → README |
+| Troubleshoot connection | gotchas.md → configuration.md |
+| Cloud integration (AWS/GCP) | configuration.md → patterns.md |
+| Monitor + alerts | configuration.md |
+
+## Automation Boundary
+
+**API-Automatable:**
+- List/create/delete interconnects (Direct, Partner)
+- List available slots
+- Get interconnect status
+- Download LOA PDF
+- Create/update CNI objects (BGP config)
+- Query settings
+
+**Requires Account Team:**
+- Initial request approval
+- AWS Direct Connect setup (send LOA+VLAN to CF)
+- GCP Cloud Interconnect final activation
+- Partner interconnect acceptance (Equinix, Megaport)
+- VLAN assignment (v1)
+- Configuration document generation (v1)
+- Escalations + troubleshooting support
+
+**Cannot Be Automated:**
+- Physical cross-connect installation (Direct)
+- Partner portal operations (virtual circuit ordering)
+- AWS/GCP portal operations
+- Maintenance window coordination
+
+## See Also
+- [tunnel](../tunnel/) - Alternative for private network connectivity
+- [spectrum](../spectrum/) - Layer 4 proxy for TCP/UDP traffic
diff --git a/.agents/skills/cloudflare-deploy/references/network-interconnect/api.md b/.agents/skills/cloudflare-deploy/references/network-interconnect/api.md
new file mode 100644
index 0000000..85e5e12
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/network-interconnect/api.md
@@ -0,0 +1,199 @@
+# CNI API Reference
+
+See [README.md](README.md) for overview.
+
+## Base
+
+```
+https://api.cloudflare.com/client/v4
+Auth: Authorization: Bearer
+```
+
+## SDK Namespaces
+
+**Primary (recommended):**
+```typescript
+client.networkInterconnects.interconnects.*
+client.networkInterconnects.cnis.*
+client.networkInterconnects.slots.*
+```
+
+**Alternate (deprecated):**
+```typescript
+client.magicTransit.cfInterconnects.*
+```
+
+Use `networkInterconnects` namespace for all new code.
+
+## Interconnects
+
+```http
+GET /accounts/{account_id}/cni/interconnects # Query: page, per_page
+POST /accounts/{account_id}/cni/interconnects # Query: validate_only=true (optional)
+GET /accounts/{account_id}/cni/interconnects/{icon}
+GET /accounts/{account_id}/cni/interconnects/{icon}/status
+GET /accounts/{account_id}/cni/interconnects/{icon}/loa # Returns PDF
+DELETE /accounts/{account_id}/cni/interconnects/{icon}
+```
+
+**Create Body:** `account`, `slot_id`, `type`, `facility`, `speed`, `name`, `description`
+**Status Values:** `active` | `healthy` | `unhealthy` | `pending` | `down`
+
+**Response Example:**
+```json
+{"result": [{"id": "icon_abc", "name": "prod", "type": "direct", "facility": "EWR1", "speed": "10G", "status": "active"}]}
+```
+
+## CNI Objects (BGP config)
+
+```http
+GET /accounts/{account_id}/cni/cnis
+POST /accounts/{account_id}/cni/cnis
+GET /accounts/{account_id}/cni/cnis/{cni}
+PUT /accounts/{account_id}/cni/cnis/{cni}
+DELETE /accounts/{account_id}/cni/cnis/{cni}
+```
+
+Body: `account`, `cust_ip`, `cf_ip`, `bgp_asn`, `bgp_password`, `vlan`
+
+## Slots
+
+```http
+GET /accounts/{account_id}/cni/slots
+GET /accounts/{account_id}/cni/slots/{slot}
+```
+
+Query: `facility`, `occupied`, `speed`
+
+## Health Checks
+
+Configure via Magic Transit/WAN tunnel endpoints (CNI v2).
+
+```typescript
+await client.magicTransit.tunnels.update(accountId, tunnelId, {
+ health_check: { enabled: true, target: '192.0.2.1', rate: 'high', type: 'request' },
+});
+```
+
+Rates: `high` | `medium` | `low`. Types: `request` | `reply`. See [Magic Transit docs](https://developers.cloudflare.com/magic-transit/how-to/configure-tunnel-endpoints/#add-tunnels).
+
+## Settings
+
+```http
+GET /accounts/{account_id}/cni/settings
+PUT /accounts/{account_id}/cni/settings
+```
+
+Body: `default_asn`
+
+## TypeScript SDK
+
+```typescript
+import Cloudflare from 'cloudflare';
+
+const client = new Cloudflare({ apiToken: process.env.CF_TOKEN });
+
+// List
+await client.networkInterconnects.interconnects.list({ account_id: id });
+
+// Create with validation
+await client.networkInterconnects.interconnects.create({
+ account_id: id,
+ account: id,
+ slot_id: 'slot_abc',
+ type: 'direct',
+ facility: 'EWR1',
+ speed: '10G',
+ name: 'prod-interconnect',
+}, {
+ query: { validate_only: true }, // Dry-run validation
+});
+
+// Create without validation
+await client.networkInterconnects.interconnects.create({
+ account_id: id,
+ account: id,
+ slot_id: 'slot_abc',
+ type: 'direct',
+ facility: 'EWR1',
+ speed: '10G',
+ name: 'prod-interconnect',
+});
+
+// Status
+await client.networkInterconnects.interconnects.get(accountId, iconId);
+
+// LOA (use fetch)
+const res = await fetch(`https://api.cloudflare.com/client/v4/accounts/${id}/cni/interconnects/${iconId}/loa`, {
+ headers: { Authorization: `Bearer ${token}` },
+});
+await fs.writeFile('loa.pdf', Buffer.from(await res.arrayBuffer()));
+
+// CNI object
+await client.networkInterconnects.cnis.create({
+ account_id: id,
+ account: id,
+ cust_ip: '192.0.2.1/31',
+ cf_ip: '192.0.2.0/31',
+ bgp_asn: 65000,
+ vlan: 100,
+});
+
+// Slots (filter by facility and speed)
+await client.networkInterconnects.slots.list({
+ account_id: id,
+ occupied: false,
+ facility: 'EWR1',
+ speed: '10G',
+});
+```
+
+## Python SDK
+
+```python
+from cloudflare import Cloudflare
+
+client = Cloudflare(api_token=os.environ["CF_TOKEN"])
+
+# List, create, status (same pattern as TypeScript)
+client.network_interconnects.interconnects.list(account_id=id)
+client.network_interconnects.interconnects.create(account_id=id, account=id, slot_id="slot_abc", type="direct", facility="EWR1", speed="10G")
+client.network_interconnects.interconnects.get(account_id=id, icon=icon_id)
+
+# CNI objects and slots
+client.network_interconnects.cnis.create(account_id=id, cust_ip="192.0.2.1/31", cf_ip="192.0.2.0/31", bgp_asn=65000)
+client.network_interconnects.slots.list(account_id=id, occupied=False)
+```
+
+## cURL
+
+```bash
+# List interconnects
+curl "https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/cni/interconnects" \
+ -H "Authorization: Bearer ${CF_TOKEN}"
+
+# Create interconnect
+curl -X POST "https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/cni/interconnects?validate_only=true" \
+ -H "Authorization: Bearer ${CF_TOKEN}" -H "Content-Type: application/json" \
+ -d '{"account": "id", "slot_id": "slot_abc", "type": "direct", "facility": "EWR1", "speed": "10G"}'
+
+# LOA PDF
+curl "https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/cni/interconnects/${ICON_ID}/loa" \
+ -H "Authorization: Bearer ${CF_TOKEN}" --output loa.pdf
+```
+
+## Not Available via API
+
+**Missing Capabilities:**
+- BGP session state query (use Dashboard or BGP logs)
+- Bandwidth utilization metrics (use external monitoring)
+- Traffic statistics per interconnect
+- Historical uptime/downtime data
+- Light level readings (contact account team)
+- Maintenance window scheduling (notifications only)
+
+## Resources
+
+- [API Docs](https://developers.cloudflare.com/api/resources/network_interconnects/)
+- [TypeScript SDK](https://github.com/cloudflare/cloudflare-typescript)
+- [Python SDK](https://github.com/cloudflare/cloudflare-python)
diff --git a/.agents/skills/cloudflare-deploy/references/network-interconnect/configuration.md b/.agents/skills/cloudflare-deploy/references/network-interconnect/configuration.md
new file mode 100644
index 0000000..0f1005c
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/network-interconnect/configuration.md
@@ -0,0 +1,114 @@
+# CNI Configuration
+
+See [README.md](README.md) for overview.
+
+## Workflow (2-4 weeks)
+
+1. **Submit request** (Week 1): Contact account team, provide type/location/use case
+2. **Review config** (Week 1-2, v1 only): Approve IP/VLAN/spec doc
+3. **Order connection** (Week 2-3):
+ - **Direct**: Get LOA, order cross-connect from facility
+ - **Partner**: Order virtual circuit in partner portal
+ - **Cloud**: Order Direct Connect/Cloud Interconnect, send LOA+VLAN to CF
+4. **Configure** (Week 3): Both sides configure per doc
+5. **Test** (Week 3-4): Ping, verify BGP, check routes
+6. **Health checks** (Week 4): Configure [Magic Transit](https://developers.cloudflare.com/magic-transit/how-to/configure-tunnel-endpoints/#add-tunnels) or [Magic WAN](https://developers.cloudflare.com/magic-wan/configuration/manually/how-to/configure-tunnel-endpoints/#add-tunnels) health checks
+7. **Activate** (Week 4): Route traffic, verify flow
+8. **Monitor**: Enable [maintenance notifications](https://developers.cloudflare.com/network-interconnect/monitoring-and-alerts/#enable-cloudflare-status-maintenance-notification)
+
+## BGP Configuration
+
+**v1 Requirements:**
+- BGP ASN (provide during setup)
+- /31 subnet for peering
+- Optional: BGP password
+
+**v2:** Simplified, less BGP config needed.
+
+**BGP over CNI (Dec 2024):** Magic WAN/Transit can now peer BGP directly over CNI v2 (no GRE tunnel required).
+
+**Example v1 BGP:**
+```
+Router ID: 192.0.2.1
+Peer IP: 192.0.2.0
+Remote ASN: 13335
+Local ASN: 65000
+Password: [optional]
+VLAN: 100
+```
+
+## Cloud Interconnect Setup
+
+### AWS Direct Connect (Beta)
+
+**Requirements:** Magic WAN, AWS Dedicated Direct Connect 1/10 Gbps.
+
+**Process:**
+1. Contact CF account team
+2. Choose location
+3. Order in AWS portal
+4. AWS provides LOA + VLAN ID
+5. Send to CF account team
+6. Wait ~4 weeks
+
+**Post-setup:** Add [static routes](https://developers.cloudflare.com/magic-wan/configuration/manually/how-to/configure-routes/#configure-static-routes) to Magic WAN. Enable [bidirectional health checks](https://developers.cloudflare.com/magic-wan/configuration/manually/how-to/configure-tunnel-endpoints/#legacy-bidirectional-health-checks).
+
+### GCP Cloud Interconnect (Beta)
+
+**Setup via Dashboard:**
+1. Interconnects → Create → Cloud Interconnect → Google
+2. Provide name, MTU (match GCP VLAN attachment), speed (50M-50G granular options available for partner interconnects)
+3. Enter VLAN attachment pairing key
+4. Confirm order
+
+**Routing to GCP:** Add [static routes](https://developers.cloudflare.com/magic-wan/configuration/manually/how-to/configure-routes/#configure-static-routes). BGP routes from GCP Cloud Router **ignored**.
+
+**Routing to CF:** Configure [custom learned routes](https://cloud.google.com/network-connectivity/docs/router/how-to/configure-custom-learned-routes) in Cloud Router. Request prefixes from CF account team.
+
+## Monitoring
+
+**Dashboard Status:**
+
+| Status | Meaning |
+|--------|---------|
+| **Healthy** | Link operational, traffic flowing, health checks passing |
+| **Active** | Link up, sufficient light, Ethernet negotiated |
+| **Unhealthy** | Link down, no/low light (<-20 dBm), can't negotiate |
+| **Pending** | Cross-connect incomplete, device unresponsive, RX/TX swapped |
+| **Down** | Physical link down, no connectivity |
+
+**Alerts:**
+
+**CNI Connection Maintenance** (Magic Networking only):
+```
+Dashboard → Notifications → Add
+Product: Cloudflare Network Interconnect
+Type: Connection Maintenance Alert
+```
+Warnings up to 2 weeks advance. 6hr delay for new additions.
+
+**Cloudflare Status Maintenance** (entire PoP):
+```
+Dashboard → Notifications → Add
+Product: Cloudflare Status
+Filter PoPs: gru,fra,lhr
+```
+
+**Find PoP code:**
+```
+Dashboard → Magic Transit/WAN → Configuration → Interconnects
+Select CNI → Note Data Center (e.g., "gru-b")
+Use first 3 letters: "gru"
+```
+
+## Best Practices
+
+**Critical config-specific practices:**
+- /31 subnets required for BGP
+- BGP passwords recommended
+- BFD for fast failover (v1 only)
+- Test ping connectivity before BGP
+- Enable maintenance notifications immediately after activation
+- Monitor status programmatically via API
+
+For design patterns, HA architecture, and security best practices, see [patterns.md](./patterns.md).
diff --git a/.agents/skills/cloudflare-deploy/references/network-interconnect/gotchas.md b/.agents/skills/cloudflare-deploy/references/network-interconnect/gotchas.md
new file mode 100644
index 0000000..9880807
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/network-interconnect/gotchas.md
@@ -0,0 +1,165 @@
+# CNI Gotchas & Troubleshooting
+
+## Common Errors
+
+### "Status: Pending"
+
+**Cause:** Cross-connect not installed, RX/TX fibers reversed, wrong fiber type, or low light levels
+**Solution:**
+1. Verify cross-connect installed
+2. Check fiber at patch panel
+3. Swap RX/TX fibers
+4. Check light with optical power meter (target > -20 dBm)
+5. Contact account team
+
+### "Status: Unhealthy"
+
+**Cause:** Physical issue, low light (<-20 dBm), optic mismatch, or dirty connectors
+**Solution:**
+1. Check physical connections
+2. Clean fiber connectors
+3. Verify optic types (10GBASE-LR/100GBASE-LR4)
+4. Test with known-good optics
+5. Check patch panel
+6. Contact account team
+
+### "BGP Session Down"
+
+**Cause:** Wrong IP addressing, wrong ASN, password mismatch, or firewall blocking TCP/179
+**Solution:**
+1. Verify IPs match CNI object
+2. Confirm ASN correct
+3. Check BGP password
+4. Verify no firewall on TCP/179
+5. Check BGP logs
+6. Review BGP timers
+
+### "Low Throughput"
+
+**Cause:** MTU mismatch, fragmentation, single GRE tunnel (v1), or routing inefficiency
+**Solution:**
+1. Check MTU (1500↓/1476↑ for v1, 1500 both for v2)
+2. Test various packet sizes
+3. Add more GRE tunnels (v1)
+4. Consider upgrading to v2
+5. Review routing tables
+6. Use LACP for bundling (v1)
+
+## API Errors
+
+### 400 Bad Request: "slot_id already occupied"
+
+**Cause:** Another interconnect already uses this slot
+**Solution:** Use `occupied=false` filter when listing slots:
+```typescript
+await client.networkInterconnects.slots.list({
+ account_id: id,
+ occupied: false,
+ facility: 'EWR1',
+});
+```
+
+### 400 Bad Request: "invalid facility code"
+
+**Cause:** Typo or unsupported facility
+**Solution:** Check [locations PDF](https://developers.cloudflare.com/network-interconnect/static/cni-locations-2026-01.pdf) for valid codes
+
+### 403 Forbidden: "Enterprise plan required"
+
+**Cause:** Account not enterprise-level
+**Solution:** Contact account team to upgrade
+
+### 422 Unprocessable: "validate_only request failed"
+
+**Cause:** Dry-run validation found issues (wrong slot, invalid config)
+**Solution:** Review error message details, fix config before real creation
+
+### Rate Limiting
+
+**Limit:** 1200 requests/5min per token
+**Solution:** Implement exponential backoff, cache slot listings
+
+## Cloud-Specific Issues
+
+### AWS Direct Connect: "VLAN not matching"
+
+**Cause:** VLAN ID from AWS LOA doesn't match CNI config
+**Solution:**
+1. Get VLAN from AWS Console after ordering
+2. Send exact VLAN to CF account team
+3. Verify match in CNI object config
+
+### AWS: "Connection stuck in Pending"
+
+**Cause:** LOA not provided to CF or AWS connection not accepted
+**Solution:**
+1. Verify AWS connection status is "Available"
+2. Confirm LOA sent to CF account team
+3. Wait for CF team acceptance (can take days)
+
+### GCP: "BGP routes not propagating"
+
+**Cause:** BGP routes from GCP Cloud Router **ignored by design**
+**Solution:** Use [static routes](https://developers.cloudflare.com/magic-wan/configuration/manually/how-to/configure-routes/#configure-static-routes) in Magic WAN instead
+
+### GCP: "Cannot query VLAN attachment status via API"
+
+**Cause:** GCP Cloud Interconnect Dashboard-only (no API yet)
+**Solution:** Check status in CF Dashboard or GCP Console
+
+## Partner Interconnect Issues
+
+### Equinix: "Virtual circuit not appearing"
+
+**Cause:** CF hasn't accepted Equinix connection request
+**Solution:**
+1. Verify VC created in Equinix Fabric Portal
+2. Contact CF account team to accept
+3. Allow 2-3 business days
+
+### Console Connect/Megaport: "API creation fails"
+
+**Cause:** Partner interconnects require partner portal + CF approval
+**Solution:** Cannot fully automate. Order in partner portal, notify CF account team.
+
+## Anti-Patterns
+
+| Anti-Pattern | Why Bad | Solution |
+|--------------|---------|----------|
+| Single interconnect for production | No SLA, single point of failure | Use ≥2 with device diversity |
+| No backup Internet | CNI fails = total outage | Always maintain alternate path |
+| Polling status every second | Rate limits, wastes API calls | Poll every 30-60s max |
+| Using v1 for Magic WAN v2 workloads | GRE overhead, complexity | Use v2 for simplified routing |
+| Assuming BGP session = traffic flowing | BGP up ≠ routes installed | Verify routing tables + test traffic |
+| Not enabling maintenance alerts | Surprise downtime during maintenance | Enable notifications immediately |
+| Hardcoding VLAN in automation | VLAN assigned by CF (v1) | Get VLAN from CNI object response |
+| Using Direct without colocation | Can't access cross-connect | Use Partner or Cloud interconnect |
+
+## What's Not Queryable via API
+
+**Cannot retrieve:**
+- BGP session state (use Dashboard or BGP logs)
+- Light levels (contact account team)
+- Historical metrics (uptime, traffic)
+- Bandwidth utilization per interconnect
+- Maintenance window schedules (notifications only)
+- Fiber path details
+- Cross-connect installation status
+
+**Workarounds:**
+- External monitoring for BGP state
+- Log aggregation for historical data
+- Notifications for maintenance windows
+
+## Limits
+
+| Resource/Limit | Value | Notes |
+|----------------|-------|-------|
+| Max optical distance | 10km | Physical limit |
+| MTU (v1) | 1500↓ / 1476↑ | Asymmetric |
+| MTU (v2) | 1500 both | Symmetric |
+| GRE tunnel throughput | 1 Gbps | Per tunnel (v1) |
+| Recovery time | Days | No formal SLA |
+| Light level minimum | -20 dBm | Target threshold |
+| API rate limit | 1200 req/5min | Per token |
+| Health check delay | 6 hours | New maintenance alert subscriptions |
diff --git a/.agents/skills/cloudflare-deploy/references/network-interconnect/patterns.md b/.agents/skills/cloudflare-deploy/references/network-interconnect/patterns.md
new file mode 100644
index 0000000..7ff9dd3
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/network-interconnect/patterns.md
@@ -0,0 +1,166 @@
+# CNI Patterns
+
+See [README.md](README.md) for overview.
+
+## High Availability
+
+**Critical:** Design for resilience from day one.
+
+**Requirements:**
+- Device-level diversity (separate hardware)
+- Backup Internet connectivity (no SLA on CNI)
+- Network-resilient locations preferred
+- Regular failover testing
+
+**Architecture:**
+```
+Your Network A ──10G CNI v2──> CF CCR Device 1
+ │
+Your Network B ──10G CNI v2──> CF CCR Device 2
+ │
+ CF Global Network (AS13335)
+```
+
+**Capacity Planning:**
+- Plan across all links
+- Account for failover scenarios
+- Your responsibility
+
+## Pattern: Magic Transit + CNI v2
+
+**Use Case:** DDoS protection, private connectivity, no GRE overhead.
+
+```typescript
+// 1. Create interconnect
+const ic = await client.networkInterconnects.interconnects.create({
+ account_id: id,
+ type: 'direct',
+ facility: 'EWR1',
+ speed: '10G',
+ name: 'magic-transit-primary',
+});
+
+// 2. Poll until active
+const status = await pollUntilActive(id, ic.id);
+
+// 3. Configure Magic Transit tunnel via Dashboard/API
+```
+
+**Benefits:** 1500 MTU both ways, simplified routing.
+
+## Pattern: Multi-Cloud Hybrid
+
+**Use Case:** AWS/GCP workloads with Cloudflare.
+
+**AWS Direct Connect:**
+```typescript
+// 1. Order Direct Connect in AWS Console
+// 2. Get LOA + VLAN from AWS
+// 3. Send to CF account team (no API)
+// 4. Configure static routes in Magic WAN
+
+await configureStaticRoutes(id, {
+ prefix: '10.0.0.0/8',
+ nexthop: 'aws-direct-connect',
+});
+```
+
+**GCP Cloud Interconnect:**
+```
+1. Get VLAN attachment pairing key from GCP Console
+2. Create via Dashboard: Interconnects → Create → Cloud Interconnect → Google
+ - Enter pairing key, name, MTU, speed
+3. Configure static routes in Magic WAN (BGP routes from GCP ignored)
+4. Configure custom learned routes in GCP Cloud Router
+```
+
+**Note:** Dashboard-only. No API/SDK support yet.
+
+## Pattern: Multi-Location HA
+
+**Use Case:** 99.99%+ uptime.
+
+```typescript
+// Primary (NY)
+const primary = await client.networkInterconnects.interconnects.create({
+ account_id: id,
+ type: 'direct',
+ facility: 'EWR1',
+ speed: '10G',
+ name: 'primary-ewr1',
+});
+
+// Secondary (NY, different hardware)
+const secondary = await client.networkInterconnects.interconnects.create({
+ account_id: id,
+ type: 'direct',
+ facility: 'EWR2',
+ speed: '10G',
+ name: 'secondary-ewr2',
+});
+
+// Tertiary (LA, different geography)
+const tertiary = await client.networkInterconnects.interconnects.create({
+ account_id: id,
+ type: 'partner',
+ facility: 'LAX1',
+ speed: '10G',
+ name: 'tertiary-lax1',
+});
+
+// BGP local preferences:
+// Primary: 200
+// Secondary: 150
+// Tertiary: 100
+// Internet: Last resort
+```
+
+## Pattern: Partner Interconnect (Equinix)
+
+**Use Case:** Quick deployment, no colocation.
+
+**Setup:**
+1. Order virtual circuit in Equinix Fabric Portal
+2. Select Cloudflare as destination
+3. Choose facility
+4. Send details to CF account team
+5. CF accepts in portal
+6. Configure BGP
+
+**No API automation** – partner portals managed separately.
+
+## Failover & Security
+
+**Failover Best Practices:**
+- Use BGP local preferences for priority
+- Configure BFD for fast detection (v1)
+- Test regularly with traffic shift
+- Document runbooks
+
+**Security:**
+- BGP password authentication
+- BGP route filtering
+- Monitor unexpected routes
+- Magic Firewall for DDoS/threats
+- Minimum API token permissions
+- Rotate credentials periodically
+
+## Decision Matrix
+
+| Requirement | Recommended |
+|-------------|-------------|
+| Collocated with CF | Direct |
+| Not collocated | Partner |
+| AWS/GCP workloads | Cloud |
+| 1500 MTU both ways | v2 |
+| VLAN tagging | v1 |
+| Public peering | v1 |
+| Simplest config | v2 |
+| BFD fast failover | v1 |
+| LACP bundling | v1 |
+
+## Resources
+
+- [Magic Transit Docs](https://developers.cloudflare.com/magic-transit/)
+- [Magic WAN Docs](https://developers.cloudflare.com/magic-wan/)
+- [Argo Smart Routing](https://developers.cloudflare.com/argo/)
diff --git a/.agents/skills/cloudflare-deploy/references/observability/README.md b/.agents/skills/cloudflare-deploy/references/observability/README.md
new file mode 100644
index 0000000..58feed6
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/observability/README.md
@@ -0,0 +1,87 @@
+# Cloudflare Observability Skill Reference
+
+**Purpose**: Comprehensive guidance for implementing observability in Cloudflare Workers, covering traces, logs, metrics, and analytics.
+
+**Scope**: Cloudflare Observability features ONLY - Workers Logs, Traces, Analytics Engine, Logpush, Metrics & Analytics, and OpenTelemetry exports.
+
+---
+
+## Decision Tree: Which File to Load?
+
+Use this to route to the correct file without loading all content:
+
+```
+├─ "How do I enable/configure X?" → configuration.md
+├─ "What's the API/method/binding for X?" → api.md
+├─ "How do I implement X pattern?" → patterns.md
+│ ├─ Usage tracking/billing → patterns.md
+│ ├─ Error tracking → patterns.md
+│ ├─ Performance monitoring → patterns.md
+│ ├─ Multi-tenant tracking → patterns.md
+│ ├─ Tail Worker filtering → patterns.md
+│ └─ OpenTelemetry export → patterns.md
+└─ "Why isn't X working?" / "Limits?" → gotchas.md
+```
+
+## Reading Order
+
+Load files in this order based on task:
+
+| Task Type | Load Order | Reason |
+|-----------|------------|--------|
+| **Initial setup** | configuration.md → gotchas.md | Setup first, avoid pitfalls |
+| **Implement feature** | patterns.md → api.md → gotchas.md | Pattern → API details → edge cases |
+| **Debug issue** | gotchas.md → configuration.md | Common issues first |
+| **Query data** | api.md → patterns.md | API syntax → query examples |
+
+## Product Overview
+
+### Workers Logs
+- **What:** Console output from Workers (console.log/warn/error)
+- **Access:** Dashboard (Real-time Logs), Logpush, Tail Workers
+- **Cost:** Free (included with all Workers)
+- **Retention:** Real-time only (no historical storage in dashboard)
+
+### Workers Traces
+- **What:** Execution traces with timing, CPU usage, outcome
+- **Access:** Dashboard (Workers Analytics → Traces), Logpush
+- **Cost:** $0.10/1M spans (GA pricing starts March 1, 2026), 10M free/month
+- **Retention:** 14 days included
+
+### Analytics Engine
+- **What:** High-cardinality event storage and SQL queries
+- **Access:** SQL API, Dashboard (Analytics → Analytics Engine)
+- **Cost:** $0.25/1M writes beyond 10M free/month
+- **Retention:** 90 days (configurable up to 1 year)
+
+### Tail Workers
+- **What:** Workers that receive logs/traces from other Workers
+- **Use Cases:** Log filtering, transformation, external export
+- **Cost:** Standard Workers pricing
+
+### Logpush
+- **What:** Stream logs to external storage (S3, R2, Datadog, etc.)
+- **Access:** Dashboard, API
+- **Cost:** Requires Business/Enterprise plan
+
+## Pricing Summary (2026)
+
+| Feature | Free Tier | Cost Beyond Free Tier | Plan Requirement |
+|---------|-----------|----------------------|------------------|
+| Workers Logs | Unlimited | Free | Any |
+| Workers Traces | 10M spans/month | $0.10/1M spans | Paid Workers (GA: March 1, 2026) |
+| Analytics Engine | 10M writes/month | $0.25/1M writes | Paid Workers |
+| Logpush | N/A | Included in plan | Business/Enterprise |
+
+## In This Reference
+
+- **[configuration.md](configuration.md)** - Setup, deployment, configuration (Logs, Traces, Analytics Engine, Tail Workers, Logpush)
+- **[api.md](api.md)** - API endpoints, methods, interfaces (GraphQL, SQL, bindings, types)
+- **[patterns.md](patterns.md)** - Common patterns, use cases, examples (billing, monitoring, error tracking, exports)
+- **[gotchas.md](gotchas.md)** - Troubleshooting, best practices, limitations (common errors, performance gotchas, pricing)
+
+## See Also
+
+- [Cloudflare Workers Docs](https://developers.cloudflare.com/workers/)
+- [Analytics Engine Docs](https://developers.cloudflare.com/analytics/analytics-engine/)
+- [Workers Traces Docs](https://developers.cloudflare.com/workers/observability/traces/)
diff --git a/.agents/skills/cloudflare-deploy/references/observability/api.md b/.agents/skills/cloudflare-deploy/references/observability/api.md
new file mode 100644
index 0000000..a0161de
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/observability/api.md
@@ -0,0 +1,164 @@
+## API Reference
+
+### GraphQL Analytics API
+
+**Endpoint**: `https://api.cloudflare.com/client/v4/graphql`
+
+**Query Workers Metrics**:
+```graphql
+query {
+ viewer {
+ accounts(filter: { accountTag: $accountId }) {
+ workersInvocationsAdaptive(
+ limit: 100
+ filter: {
+ datetime_geq: "2025-01-01T00:00:00Z"
+ datetime_leq: "2025-01-31T23:59:59Z"
+ scriptName: "my-worker"
+ }
+ ) {
+ sum {
+ requests
+ errors
+ subrequests
+ }
+ quantiles {
+ cpuTimeP50
+ cpuTimeP99
+ wallTimeP50
+ wallTimeP99
+ }
+ }
+ }
+ }
+}
+```
+
+### Analytics Engine SQL API
+
+**Endpoint**: `https://api.cloudflare.com/client/v4/accounts/{account_id}/analytics_engine/sql`
+
+**Authentication**: `Authorization: Bearer ` (Account Analytics Read permission)
+
+**Common Queries**:
+
+```sql
+-- List all datasets
+SHOW TABLES;
+
+-- Time-series aggregation (5-minute buckets)
+SELECT
+ intDiv(toUInt32(timestamp), 300) * 300 AS time_bucket,
+ blob1 AS endpoint,
+ SUM(_sample_interval) AS total_requests,
+ AVG(double1) AS avg_response_time_ms
+FROM api_metrics
+WHERE timestamp >= NOW() - INTERVAL '24' HOUR
+GROUP BY time_bucket, endpoint
+ORDER BY time_bucket DESC;
+
+-- Top customers by usage
+SELECT
+ index1 AS customer_id,
+ SUM(_sample_interval * double1) AS total_api_calls,
+ AVG(double2) AS avg_response_time_ms
+FROM api_usage
+WHERE timestamp >= NOW() - INTERVAL '7' DAY
+GROUP BY customer_id
+ORDER BY total_api_calls DESC
+LIMIT 100;
+
+-- Error rate analysis
+SELECT
+ blob1 AS error_type,
+ COUNT(*) AS occurrences,
+ MAX(timestamp) AS last_seen
+FROM error_tracking
+WHERE timestamp >= NOW() - INTERVAL '1' HOUR
+GROUP BY error_type
+ORDER BY occurrences DESC;
+```
+
+### Console Logging API
+
+**Methods**:
+```typescript
+// Standard methods (all appear in Workers Logs)
+console.log('info message');
+console.info('info message');
+console.warn('warning message');
+console.error('error message');
+console.debug('debug message');
+
+// Structured logging (recommended)
+console.log({
+ level: 'info',
+ user_id: '123',
+ action: 'checkout',
+ amount: 99.99,
+ currency: 'USD'
+});
+```
+
+**Log Levels**: All console methods produce logs; use structured fields for filtering:
+```typescript
+console.log({
+ level: 'error',
+ message: 'Payment failed',
+ error_code: 'CARD_DECLINED'
+});
+```
+
+### Analytics Engine Binding Types
+
+```typescript
+interface AnalyticsEngineDataset {
+ writeDataPoint(event: AnalyticsEngineDataPoint): void;
+}
+
+interface AnalyticsEngineDataPoint {
+ // Indexed strings (use for filtering/grouping)
+ indexes?: string[];
+
+ // Non-indexed strings (metadata, IDs, URLs)
+ blobs?: string[];
+
+ // Numeric values (counts, durations, amounts)
+ doubles?: number[];
+}
+```
+
+**Field Limits**:
+- Max 20 indexes
+- Max 20 blobs
+- Max 20 doubles
+- Max 25 `writeDataPoint` calls per request
+
+### Tail Consumer Event Type
+
+```typescript
+interface TraceItem {
+ event: TraceEvent;
+ logs: TraceLog[];
+ exceptions: TraceException[];
+ scriptName?: string;
+}
+
+interface TraceEvent {
+ outcome: 'ok' | 'exception' | 'exceededCpu' | 'exceededMemory' | 'unknown';
+ cpuTime: number; // microseconds
+ wallTime: number; // microseconds
+}
+
+interface TraceLog {
+ timestamp: number;
+ level: 'log' | 'info' | 'debug' | 'warn' | 'error';
+ message: any; // string or structured object
+}
+
+interface TraceException {
+ name: string;
+ message: string;
+ timestamp: number;
+}
+```
\ No newline at end of file
diff --git a/.agents/skills/cloudflare-deploy/references/observability/configuration.md b/.agents/skills/cloudflare-deploy/references/observability/configuration.md
new file mode 100644
index 0000000..483de4c
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/observability/configuration.md
@@ -0,0 +1,169 @@
+## Configuration Patterns
+
+### Enable Workers Logs
+
+```jsonc
+{
+ "observability": {
+ "enabled": true,
+ "head_sampling_rate": 1 // 100% sampling (default)
+ }
+}
+```
+
+**Best Practice**: Use structured JSON logging for better indexing
+
+```typescript
+// Good - structured logging
+console.log({
+ user_id: 123,
+ action: "login",
+ status: "success",
+ duration_ms: 45
+});
+
+// Avoid - unstructured string
+console.log("user_id: 123 logged in successfully in 45ms");
+```
+
+### Enable Workers Traces
+
+```jsonc
+{
+ "observability": {
+ "traces": {
+ "enabled": true,
+ "head_sampling_rate": 0.05 // 5% sampling
+ }
+ }
+}
+```
+
+**Note**: Default sampling is 100%. For high-traffic Workers, use lower sampling (0.01-0.1).
+
+### Configure Analytics Engine
+
+**Bind to Worker**:
+```toml
+# wrangler.toml
+analytics_engine_datasets = [
+ { binding = "ANALYTICS", dataset = "api_metrics" }
+]
+```
+
+**Write Data Points**:
+```typescript
+export interface Env {
+ ANALYTICS: AnalyticsEngineDataset;
+}
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ // Track metrics
+ env.ANALYTICS.writeDataPoint({
+ blobs: ['customer_123', 'POST', '/api/v1/users'],
+ doubles: [1, 245.5], // request_count, response_time_ms
+ indexes: ['customer_123'] // for efficient filtering
+ });
+
+ return new Response('OK');
+ }
+}
+```
+
+### Configure Tail Workers
+
+Tail Workers receive logs/traces from other Workers for filtering, transformation, or export.
+
+**Setup**:
+```toml
+# wrangler.toml
+name = "log-processor"
+main = "src/tail.ts"
+
+[[tail_consumers]]
+service = "my-worker" # Worker to tail
+```
+
+**Tail Worker Example**:
+```typescript
+export default {
+ async tail(events: TraceItem[], env: Env, ctx: ExecutionContext) {
+ // Filter errors only
+ const errors = events.filter(event =>
+ event.outcome === 'exception' || event.outcome === 'exceededCpu'
+ );
+
+ if (errors.length > 0) {
+ // Send to external monitoring
+ ctx.waitUntil(
+ fetch('https://monitoring.example.com/errors', {
+ method: 'POST',
+ body: JSON.stringify(errors)
+ })
+ );
+ }
+ }
+}
+```
+
+### Configure Logpush
+
+Send logs to external storage (S3, R2, GCS, Azure, Datadog, etc.). Requires Business/Enterprise plan.
+
+**Via Dashboard**:
+1. Navigate to Analytics → Logs → Logpush
+2. Select destination type
+3. Provide credentials and bucket/endpoint
+4. Choose dataset (e.g., Workers Trace Events)
+5. Configure filters and fields
+
+**Via API**:
+```bash
+curl -X POST "https://api.cloudflare.com/client/v4/accounts/{account_id}/logpush/jobs" \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -d '{
+ "name": "workers-logs-to-s3",
+ "destination_conf": "s3://my-bucket/logs?region=us-east-1",
+ "dataset": "workers_trace_events",
+ "enabled": true,
+ "frequency": "high",
+ "filter": "{\"where\":{\"and\":[{\"key\":\"ScriptName\",\"operator\":\"eq\",\"value\":\"my-worker\"}]}}"
+ }'
+```
+
+### Environment-Specific Configuration
+
+**Development** (verbose logs, full sampling):
+```jsonc
+// wrangler.dev.jsonc
+{
+ "observability": {
+ "enabled": true,
+ "head_sampling_rate": 1.0,
+ "traces": {
+ "enabled": true
+ }
+ }
+}
+```
+
+**Production** (reduced sampling, structured logs):
+```jsonc
+// wrangler.prod.jsonc
+{
+ "observability": {
+ "enabled": true,
+ "head_sampling_rate": 0.1, // 10% sampling
+ "traces": {
+ "enabled": true
+ }
+ }
+}
+```
+
+Deploy with env-specific config:
+```bash
+wrangler deploy --config wrangler.prod.jsonc --env production
+```
\ No newline at end of file
diff --git a/.agents/skills/cloudflare-deploy/references/observability/gotchas.md b/.agents/skills/cloudflare-deploy/references/observability/gotchas.md
new file mode 100644
index 0000000..42bc738
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/observability/gotchas.md
@@ -0,0 +1,115 @@
+## Common Errors
+
+### "Logs not appearing"
+
+**Cause:** Observability disabled, Worker not redeployed, no traffic, low sampling rate, or log size exceeds 256 KB
+**Solution:**
+```bash
+# Verify config
+cat wrangler.jsonc | jq '.observability'
+
+# Check deployment
+wrangler deployments list
+
+# Test with curl
+curl https://your-worker.workers.dev
+```
+Ensure `observability.enabled = true`, redeploy Worker, check `head_sampling_rate`, verify traffic
+
+### "Traces not being captured"
+
+**Cause:** Traces not enabled, incorrect sampling rate, Worker not redeployed, or destination unavailable
+**Solution:**
+```jsonc
+// Temporarily set to 100% sampling for debugging
+{
+ "observability": {
+ "enabled": true,
+ "head_sampling_rate": 1.0,
+ "traces": {
+ "enabled": true
+ }
+ }
+}
+```
+Ensure `observability.traces.enabled = true`, set `head_sampling_rate` to 1.0 for testing, redeploy, check destination status
+
+## Limits
+
+| Resource/Limit | Value | Notes |
+|----------------|-------|-------|
+| Max log size | 256 KB | Logs exceeding this are truncated |
+| Default sampling rate | 1.0 (100%) | Reduce for high-traffic Workers |
+| Max destinations | Varies by plan | Check dashboard |
+| Trace context propagation | 100 spans max | Deep call chains may lose spans |
+| Analytics Engine write rate | 25 writes/request | Excess writes dropped silently |
+
+## Performance Gotchas
+
+### Spectre Mitigation Timing
+
+**Problem:** `Date.now()` and `performance.now()` have reduced precision (coarsened to 100μs)
+**Cause:** Spectre vulnerability mitigation in V8
+**Solution:** Accept reduced precision or use Workers Traces for accurate timing
+```typescript
+// Date.now() is coarsened - trace spans are accurate
+export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ // For user-facing timing, Date.now() is fine
+ const start = Date.now();
+ const response = await processRequest(request);
+ const duration = Date.now() - start;
+
+ // For detailed performance analysis, use Workers Traces instead
+ return response;
+ }
+}
+```
+
+### Analytics Engine _sample_interval Aggregation
+
+**Problem:** Queries return incorrect totals when not multiplying by `_sample_interval`
+**Cause:** Analytics Engine stores sampled data points, each representing multiple events
+**Solution:** Always multiply counts/sums by `_sample_interval` in aggregations
+```sql
+-- WRONG: Undercounts actual events
+SELECT blob1 AS customer_id, COUNT(*) AS total_calls
+FROM api_usage GROUP BY customer_id;
+
+-- CORRECT: Accounts for sampling
+SELECT blob1 AS customer_id, SUM(_sample_interval) AS total_calls
+FROM api_usage GROUP BY customer_id;
+```
+
+### Trace Context Propagation Limits
+
+**Problem:** Deep call chains lose trace context after 100 spans
+**Cause:** Cloudflare limits trace depth to prevent performance impact
+**Solution:** Design for flatter architectures or use custom correlation IDs for deep chains
+```typescript
+// For deep call chains, add custom correlation ID
+const correlationId = crypto.randomUUID();
+console.log({ correlationId, event: 'request_start' });
+
+// Pass correlationId through headers to downstream services
+await fetch('https://api.example.com', {
+ headers: { 'X-Correlation-ID': correlationId }
+});
+```
+
+## Pricing (2026)
+
+### Workers Traces
+- **GA Pricing (starts March 1, 2026):**
+ - $0.10 per 1M trace spans captured
+ - Retention: 14 days included
+- **Free tier:** 10M trace spans/month
+- **Note:** Beta usage (before March 1, 2026) is free
+
+### Workers Logs
+- **Included:** Free for all Workers
+- **Logpush:** Requires Business/Enterprise plan
+
+### Analytics Engine
+- **Included:** 10M writes/month on Paid Workers plan
+- **Additional:** $0.25 per 1M writes beyond included quota
diff --git a/.agents/skills/cloudflare-deploy/references/observability/patterns.md b/.agents/skills/cloudflare-deploy/references/observability/patterns.md
new file mode 100644
index 0000000..9135c68
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/observability/patterns.md
@@ -0,0 +1,105 @@
+# Observability Patterns
+
+## Usage-Based Billing
+
+```typescript
+env.ANALYTICS.writeDataPoint({
+ blobs: [customerId, request.url, request.method],
+ doubles: [1], // request_count
+ indexes: [customerId]
+});
+```
+
+```sql
+SELECT blob1 AS customer_id, SUM(_sample_interval * double1) AS total_calls
+FROM api_usage WHERE timestamp >= DATE_TRUNC('month', NOW())
+GROUP BY customer_id
+```
+
+## Performance Monitoring
+
+```typescript
+const start = Date.now();
+const response = await fetch(url);
+env.ANALYTICS.writeDataPoint({
+ blobs: [url, response.status.toString()],
+ doubles: [Date.now() - start, response.status]
+});
+```
+
+```sql
+SELECT blob1 AS url, AVG(double1) AS avg_ms, percentile(double1, 0.95) AS p95_ms
+FROM fetch_metrics WHERE timestamp >= NOW() - INTERVAL '1' HOUR
+GROUP BY url
+```
+
+## Error Tracking
+
+```typescript
+env.ANALYTICS.writeDataPoint({
+ blobs: [error.name, request.url, request.method],
+ doubles: [1],
+ indexes: [error.name]
+});
+```
+
+## Multi-Tenant Tracking
+
+```typescript
+env.ANALYTICS.writeDataPoint({
+ indexes: [tenantId], // efficient filtering
+ blobs: [tenantId, url.pathname, method, status],
+ doubles: [1, duration, bytesSize]
+});
+```
+
+## Tail Worker Log Filtering
+
+```typescript
+export default {
+ async tail(events, env, ctx) {
+ const critical = events.filter(e =>
+ e.exceptions.length > 0 || e.event.wallTime > 1000000
+ );
+ if (critical.length === 0) return;
+
+ ctx.waitUntil(
+ fetch('https://logging.example.com/ingest', {
+ method: 'POST',
+ headers: { 'Authorization': `Bearer ${env.API_KEY}` },
+ body: JSON.stringify(critical.map(e => ({
+ outcome: e.event.outcome,
+ cpu_ms: e.event.cpuTime / 1000,
+ errors: e.exceptions
+ })))
+ })
+ );
+ }
+};
+```
+
+## OpenTelemetry Export
+
+```typescript
+export default {
+ async tail(events, env, ctx) {
+ const otelSpans = events.map(e => ({
+ traceId: generateId(32),
+ spanId: generateId(16),
+ name: e.scriptName || 'worker.request',
+ attributes: [
+ { key: 'worker.outcome', value: { stringValue: e.event.outcome } },
+ { key: 'worker.cpu_time_us', value: { intValue: String(e.event.cpuTime) } }
+ ]
+ }));
+
+ ctx.waitUntil(
+ fetch('https://api.honeycomb.io/v1/traces', {
+ method: 'POST',
+ headers: { 'X-Honeycomb-Team': env.HONEYCOMB_KEY },
+ body: JSON.stringify({ resourceSpans: [{ scopeSpans: [{ spans: otelSpans }] }] })
+ })
+ );
+ }
+};
+```
diff --git a/.agents/skills/cloudflare-deploy/references/pages-functions/README.md b/.agents/skills/cloudflare-deploy/references/pages-functions/README.md
new file mode 100644
index 0000000..deaf461
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages-functions/README.md
@@ -0,0 +1,98 @@
+# Cloudflare Pages Functions
+
+Serverless functions on Cloudflare Pages using Workers runtime. Full-stack dev with file-based routing.
+
+## Quick Navigation
+
+**Need to...**
+| Task | Go to |
+|------|-------|
+| Set up TypeScript types | [configuration.md](./configuration.md) - TypeScript Setup |
+| Configure bindings (KV, D1, R2) | [configuration.md](./configuration.md) - wrangler.jsonc |
+| Access request/env/params | [api.md](./api.md) - EventContext |
+| Add middleware or auth | [patterns.md](./patterns.md) - Middleware, Auth |
+| Background tasks (waitUntil) | [patterns.md](./patterns.md) - Background Tasks |
+| Debug errors or check limits | [gotchas.md](./gotchas.md) - Common Errors, Limits |
+
+## Decision Tree: Is This Pages Functions?
+
+```
+Need serverless backend?
+├─ Yes, for a static site → Pages Functions
+├─ Yes, standalone API → Workers
+└─ Just static hosting → Pages (no functions)
+
+Have existing Worker?
+├─ Complex routing logic → Use _worker.js (Advanced Mode)
+└─ Simple routes → Migrate to /functions (File-Based)
+
+Framework-based?
+├─ Next.js/SvelteKit/Remix → Uses _worker.js automatically
+└─ Vanilla/HTML/React SPA → Use /functions
+```
+
+## File-Based Routing
+
+```
+/functions
+ ├── index.js → /
+ ├── api.js → /api
+ ├── users/
+ │ ├── index.js → /users/
+ │ ├── [user].js → /users/:user
+ │ └── [[catchall]].js → /users/*
+ └── _middleware.js → runs on all routes
+```
+
+**Rules:**
+- `index.js` → directory root
+- Trailing slash optional
+- Specific routes precede catch-alls
+- Falls back to static if no match
+
+## Dynamic Routes
+
+**Single segment** `[param]` → string:
+```js
+// /functions/users/[user].js
+export function onRequest(context) {
+ return new Response(`Hello ${context.params.user}`);
+}
+// Matches: /users/nevi
+```
+
+**Multi-segment** `[[param]]` → array:
+```js
+// /functions/users/[[catchall]].js
+export function onRequest(context) {
+ return new Response(JSON.stringify(context.params.catchall));
+}
+// Matches: /users/nevi/foobar → ["nevi", "foobar"]
+```
+
+## Key Features
+
+- **Method handlers:** `onRequestGet`, `onRequestPost`, etc.
+- **Middleware:** `_middleware.js` for cross-cutting concerns
+- **Bindings:** KV, D1, R2, Durable Objects, Workers AI, Service bindings
+- **TypeScript:** Full type support via `wrangler types` command
+- **Advanced mode:** Use `_worker.js` for custom routing logic
+
+## Reading Order
+
+**New to Pages Functions?** Start here:
+1. [README.md](./README.md) - Overview, routing, decision tree (you are here)
+2. [configuration.md](./configuration.md) - TypeScript setup, wrangler.jsonc, bindings
+3. [api.md](./api.md) - EventContext, handlers, bindings reference
+4. [patterns.md](./patterns.md) - Middleware, auth, CORS, rate limiting, caching
+5. [gotchas.md](./gotchas.md) - Common errors, debugging, limits
+
+**Quick reference lookup:**
+- Bindings table → [api.md](./api.md)
+- Error diagnosis → [gotchas.md](./gotchas.md)
+- TypeScript setup → [configuration.md](./configuration.md)
+
+## See Also
+- [pages](../pages/) - Pages platform overview and static site deployment
+- [workers](../workers/) - Workers runtime API reference
+- [d1](../d1/) - D1 database integration with Pages Functions
diff --git a/.agents/skills/cloudflare-deploy/references/pages-functions/api.md b/.agents/skills/cloudflare-deploy/references/pages-functions/api.md
new file mode 100644
index 0000000..5263372
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages-functions/api.md
@@ -0,0 +1,143 @@
+# Function API
+
+## EventContext
+
+```typescript
+interface EventContext {
+ request: Request; // Incoming request
+ functionPath: string; // Request path
+ waitUntil(promise: Promise): void; // Background tasks (non-blocking)
+ passThroughOnException(): void; // Fallback to static on error
+ next(input?: Request | string, init?: RequestInit): Promise;
+ env: Env; // Bindings, vars, secrets
+ params: Record; // Route params ([user] or [[catchall]])
+ data: any; // Middleware shared state
+}
+```
+
+**TypeScript:** See [configuration.md](./configuration.md) for `wrangler types` setup
+
+## Handlers
+
+```typescript
+// Generic (fallback for any method)
+export async function onRequest(ctx: EventContext): Promise {
+ return new Response('Any method');
+}
+
+// Method-specific (takes precedence over generic)
+export async function onRequestGet(ctx: EventContext): Promise {
+ return Response.json({ message: 'GET' });
+}
+
+export async function onRequestPost(ctx: EventContext): Promise {
+ const body = await ctx.request.json();
+ return Response.json({ received: body });
+}
+// Also: onRequestPut, onRequestPatch, onRequestDelete, onRequestHead, onRequestOptions
+```
+
+## Bindings Reference
+
+| Binding Type | Interface | Config Key | Use Case |
+|--------------|-----------|------------|----------|
+| KV | `KVNamespace` | `kv_namespaces` | Key-value cache, sessions, config |
+| D1 | `D1Database` | `d1_databases` | Relational data, SQL queries |
+| R2 | `R2Bucket` | `r2_buckets` | Large files, user uploads, assets |
+| Durable Objects | `DurableObjectNamespace` | `durable_objects.bindings` | Stateful coordination, websockets |
+| Workers AI | `Ai` | `ai.binding` | LLM inference, embeddings |
+| Vectorize | `VectorizeIndex` | `vectorize` | Vector search, embeddings |
+| Service Binding | `Fetcher` | `services` | Worker-to-worker RPC |
+| Analytics Engine | `AnalyticsEngineDataset` | `analytics_engine_datasets` | Event logging, metrics |
+| Environment Vars | `string` | `vars` | Non-sensitive config |
+
+See [configuration.md](./configuration.md) for wrangler.jsonc examples.
+
+## Bindings
+
+### KV
+
+```typescript
+interface Env { KV: KVNamespace; }
+export const onRequest: PagesFunction = async (ctx) => {
+ await ctx.env.KV.put('key', 'value', { expirationTtl: 3600 });
+ const val = await ctx.env.KV.get('key', { type: 'json' });
+ const keys = await ctx.env.KV.list({ prefix: 'user:' });
+ return Response.json({ val });
+};
+```
+
+### D1
+
+```typescript
+interface Env { DB: D1Database; }
+export const onRequest: PagesFunction = async (ctx) => {
+ const user = await ctx.env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(123).first();
+ return Response.json(user);
+};
+```
+
+### R2
+
+```typescript
+interface Env { BUCKET: R2Bucket; }
+export const onRequest: PagesFunction = async (ctx) => {
+ const obj = await ctx.env.BUCKET.get('file.txt');
+ if (!obj) return new Response('Not found', { status: 404 });
+ await ctx.env.BUCKET.put('file.txt', ctx.request.body);
+ return new Response(obj.body);
+};
+```
+
+### Durable Objects
+
+```typescript
+interface Env { COUNTER: DurableObjectNamespace; }
+export const onRequest: PagesFunction = async (ctx) => {
+ const stub = ctx.env.COUNTER.get(ctx.env.COUNTER.idFromName('global'));
+ return stub.fetch(ctx.request);
+};
+```
+
+### Workers AI
+
+```typescript
+interface Env { AI: Ai; }
+export const onRequest: PagesFunction = async (ctx) => {
+ const resp = await ctx.env.AI.run('@cf/meta/llama-3.1-8b-instruct', { prompt: 'Hello' });
+ return Response.json(resp);
+};
+```
+
+### Service Bindings & Env Vars
+
+```typescript
+interface Env { AUTH: Fetcher; API_KEY: string; }
+export const onRequest: PagesFunction = async (ctx) => {
+ // Service binding: forward to another Worker
+ return ctx.env.AUTH.fetch(ctx.request);
+
+ // Environment variable
+ return Response.json({ key: ctx.env.API_KEY });
+};
+```
+
+## Advanced Mode (env.ASSETS)
+
+When using `_worker.js`, access static assets via `env.ASSETS.fetch()`:
+
+```typescript
+interface Env { ASSETS: Fetcher; KV: KVNamespace; }
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const url = new URL(request.url);
+ if (url.pathname.startsWith('/api/')) {
+ return Response.json({ data: await env.KV.get('key') });
+ }
+ return env.ASSETS.fetch(request); // Fallback to static
+ }
+} satisfies ExportedHandler;
+```
+
+**See also:** [configuration.md](./configuration.md) for TypeScript setup and wrangler.jsonc | [patterns.md](./patterns.md) for middleware and auth patterns
diff --git a/.agents/skills/cloudflare-deploy/references/pages-functions/configuration.md b/.agents/skills/cloudflare-deploy/references/pages-functions/configuration.md
new file mode 100644
index 0000000..62ba298
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages-functions/configuration.md
@@ -0,0 +1,122 @@
+# Configuration
+
+## TypeScript Setup
+
+**Generate types from wrangler.jsonc** (replaces deprecated `@cloudflare/workers-types`):
+
+```bash
+npx wrangler types
+```
+
+Creates `worker-configuration.d.ts` with typed `Env` interface based on your bindings.
+
+```typescript
+// functions/api.ts
+export const onRequest: PagesFunction = async (ctx) => {
+ // ctx.env.KV, ctx.env.DB, etc. are fully typed
+ return Response.json({ ok: true });
+};
+```
+
+**Manual types** (if not using wrangler types):
+
+```typescript
+interface Env {
+ KV: KVNamespace;
+ DB: D1Database;
+ API_KEY: string;
+}
+export const onRequest: PagesFunction = async (ctx) => { /* ... */ };
+```
+
+## wrangler.jsonc
+
+```jsonc
+{
+ "$schema": "./node_modules/wrangler/config-schema.json",
+ "name": "my-pages-app",
+ "pages_build_output_dir": "./dist",
+ "compatibility_date": "2025-01-01",
+ "compatibility_flags": ["nodejs_compat"],
+
+ "vars": { "API_URL": "https://api.example.com" },
+ "kv_namespaces": [{ "binding": "KV", "id": "abc123" }],
+ "d1_databases": [{ "binding": "DB", "database_name": "prod-db", "database_id": "xyz789" }],
+ "r2_buckets": [{ "binding": "BUCKET", "bucket_name": "my-bucket" }],
+ "durable_objects": { "bindings": [{ "name": "COUNTER", "class_name": "Counter", "script_name": "counter-worker" }] },
+ "services": [{ "binding": "AUTH", "service": "auth-worker" }],
+ "ai": { "binding": "AI" },
+ "vectorize": [{ "binding": "VECTORIZE", "index_name": "my-index" }],
+ "analytics_engine_datasets": [{ "binding": "ANALYTICS" }]
+}
+```
+
+## Environment Overrides
+
+Top-level → local dev, `env.preview` → preview, `env.production` → production
+
+```jsonc
+{
+ "vars": { "API_URL": "http://localhost:8787" },
+ "env": {
+ "production": { "vars": { "API_URL": "https://api.example.com" } }
+ }
+}
+```
+
+**Note:** If overriding `vars`, `kv_namespaces`, `d1_databases`, etc., ALL must be redefined (non-inheritable)
+
+## Local Secrets (.dev.vars)
+
+**Local dev only** - NOT deployed:
+
+```bash
+# .dev.vars (add to .gitignore)
+SECRET_KEY="my-secret-value"
+```
+
+Accessed via `ctx.env.SECRET_KEY`. Set production secrets:
+```bash
+echo "value" | npx wrangler pages secret put SECRET_KEY --project-name=my-app
+```
+
+## Static Config Files
+
+**_routes.json** - Custom routing:
+```json
+{ "version": 1, "include": ["/api/*"], "exclude": ["/static/*"] }
+```
+
+**_headers** - Static headers:
+```
+/static/*
+ Cache-Control: public, max-age=31536000
+```
+
+**_redirects** - Redirects:
+```
+/old /new 301
+```
+
+## Local Dev & Deployment
+
+```bash
+# Dev server
+npx wrangler pages dev ./dist
+
+# With bindings
+npx wrangler pages dev ./dist --kv=KV --d1=DB=db-id --r2=BUCKET
+
+# Durable Objects (2 terminals)
+cd do-worker && npx wrangler dev
+cd pages-project && npx wrangler pages dev ./dist --do COUNTER=Counter@do-worker
+
+# Deploy
+npx wrangler pages deploy ./dist
+npx wrangler pages deploy ./dist --branch preview
+
+# Download config
+npx wrangler pages download config my-project
+```
+
+**See also:** [api.md](./api.md) for binding usage examples
diff --git a/.agents/skills/cloudflare-deploy/references/pages-functions/gotchas.md b/.agents/skills/cloudflare-deploy/references/pages-functions/gotchas.md
new file mode 100644
index 0000000..f63e608
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages-functions/gotchas.md
@@ -0,0 +1,94 @@
+# Gotchas & Debugging
+
+## Error Diagnosis
+
+| Symptom | Likely Cause | Solution |
+|---------|--------------|----------|
+| **Function not invoking** | Wrong `/functions` location, wrong extension, or `_routes.json` excludes path | Check `pages_build_output_dir`, use `.js`/`.ts`, verify `_routes.json` |
+| **`ctx.env.BINDING` undefined** | Binding not configured or name mismatch | Add to `wrangler.jsonc`, verify exact name (case-sensitive), redeploy |
+| **TypeScript errors on `ctx.env`** | Missing type definition | Run `wrangler types` or define `interface Env {}` |
+| **Middleware not running** | Wrong filename/location or missing `ctx.next()` | Name exactly `_middleware.js`, export `onRequest`, call `ctx.next()` |
+| **Secrets missing in production** | `.dev.vars` not deployed | `.dev.vars` is local only - set production secrets via dashboard or `wrangler secret put` |
+| **Type mismatch on binding** | Wrong interface type | See [api.md](./api.md) bindings table for correct types |
+| **"KV key not found" but exists** | Key in wrong namespace or env | Verify namespace binding, check preview vs production env |
+| **Function times out** | Synchronous wait or missing `await` | All I/O must be async/await, use `ctx.waitUntil()` for background tasks |
+
+## Common Errors
+
+### TypeScript type errors
+
+**Problem:** `ctx.env.MY_BINDING` shows type error
+**Cause:** No type definition for `Env`
+**Solution:** Run `npx wrangler types` or manually define:
+```typescript
+interface Env { MY_BINDING: KVNamespace; }
+export const onRequest: PagesFunction = async (ctx) => { /* ... */ };
+```
+
+### Secrets not available in production
+
+**Problem:** `ctx.env.SECRET_KEY` is undefined in production
+**Cause:** `.dev.vars` is local-only, not deployed
+**Solution:** Set production secrets:
+```bash
+echo "value" | npx wrangler pages secret put SECRET_KEY --project-name=my-app
+```
+
+## Debugging
+
+```typescript
+// Console logging
+export async function onRequest(ctx) {
+ console.log('Request:', ctx.request.method, ctx.request.url);
+ const res = await ctx.next();
+ console.log('Status:', res.status);
+ return res;
+}
+```
+
+```bash
+# Stream real-time logs
+npx wrangler pages deployment tail
+npx wrangler pages deployment tail --status error
+```
+
+```jsonc
+// Source maps (wrangler.jsonc)
+{ "upload_source_maps": true }
+```
+
+## Limits
+
+| Resource | Free | Paid |
+|----------|------|------|
+| CPU time | 10ms | 50ms |
+| Memory | 128 MB | 128 MB |
+| Script size | 10 MB compressed | 10 MB compressed |
+| Env vars | 5 KB per var, 64 max | 5 KB per var, 64 max |
+| Requests | 100k/day | Unlimited ($0.50/million) |
+
+## Best Practices
+
+**Performance:** Minimize deps (cold start), use KV for cache/D1 for relational/R2 for large files, set `Cache-Control` headers, batch DB ops, handle errors gracefully
+
+**Security:** Never commit secrets (use `.dev.vars` + gitignore), validate input, sanitize before DB, implement auth middleware, set CORS headers, rate limit per-IP
+
+## Migration
+
+**Workers → Pages Functions:**
+- `export default { fetch(req, env) {} }` → `export function onRequest(ctx) { const { request, env } = ctx; }`
+- Use `_worker.js` for complex routing: `env.ASSETS.fetch(request)` for static files
+
+**Other platforms → Pages:**
+- File-based routing: `/functions/api/users.js` → `/api/users`
+- Dynamic routes: `[param]` not `:param`
+- Replace Node.js deps with Workers APIs or add `nodejs_compat` flag
+
+## Resources
+
+- [Official Docs](https://developers.cloudflare.com/pages/functions/)
+- [Workers APIs](https://developers.cloudflare.com/workers/runtime-apis/)
+- [Examples](https://github.com/cloudflare/pages-example-projects)
+- [Discord](https://discord.gg/cloudflaredev)
+
+**See also:** [configuration.md](./configuration.md) for TypeScript setup | [patterns.md](./patterns.md) for middleware/auth | [api.md](./api.md) for bindings
diff --git a/.agents/skills/cloudflare-deploy/references/pages-functions/patterns.md b/.agents/skills/cloudflare-deploy/references/pages-functions/patterns.md
new file mode 100644
index 0000000..22289e8
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages-functions/patterns.md
@@ -0,0 +1,137 @@
+# Common Patterns
+
+## Background Tasks (waitUntil)
+
+Non-blocking tasks after response sent (analytics, cleanup, webhooks):
+
+```typescript
+export async function onRequest(ctx: EventContext) {
+ const res = Response.json({ success: true });
+
+ ctx.waitUntil(ctx.env.KV.put('last-visit', new Date().toISOString()));
+ ctx.waitUntil(Promise.all([
+ ctx.env.ANALYTICS.writeDataPoint({ event: 'view' }),
+ fetch('https://webhook.site/...', { method: 'POST' })
+ ]));
+
+ return res; // Returned immediately
+}
+```
+
+## Middleware & Auth
+
+```typescript
+// functions/_middleware.js (global) or functions/users/_middleware.js (scoped)
+export async function onRequest(ctx) {
+ try { return await ctx.next(); }
+ catch (err) { return new Response(err.message, { status: 500 }); }
+}
+
+// Chained: export const onRequest = [errorHandler, auth, logger];
+
+// Auth
+async function auth(ctx: EventContext) {
+ const token = ctx.request.headers.get('authorization')?.replace('Bearer ', '');
+ if (!token) return new Response('Unauthorized', { status: 401 });
+ const session = await ctx.env.KV.get(`session:${token}`);
+ if (!session) return new Response('Invalid', { status: 401 });
+ ctx.data.user = JSON.parse(session);
+ return ctx.next();
+}
+```
+
+## CORS & Rate Limiting
+
+```typescript
+// CORS middleware
+const cors = { 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Methods': 'GET, POST' };
+export async function onRequestOptions() { return new Response(null, { headers: cors }); }
+export async function onRequest(ctx) {
+ const res = await ctx.next();
+ Object.entries(cors).forEach(([k, v]) => res.headers.set(k, v));
+ return res;
+}
+
+// Rate limiting (KV-based)
+async function rateLimit(ctx: EventContext) {
+ const ip = ctx.request.headers.get('CF-Connecting-IP') || 'unknown';
+ const count = parseInt(await ctx.env.KV.get(`rate:${ip}`) || '0');
+ if (count >= 100) return new Response('Rate limited', { status: 429 });
+ await ctx.env.KV.put(`rate:${ip}`, (count + 1).toString(), { expirationTtl: 3600 });
+ return ctx.next();
+}
+```
+
+## Forms, Caching, Redirects
+
+```typescript
+// JSON & file upload
+export async function onRequestPost(ctx) {
+ const ct = ctx.request.headers.get('content-type') || '';
+ if (ct.includes('application/json')) return Response.json(await ctx.request.json());
+ if (ct.includes('multipart/form-data')) {
+ const file = (await ctx.request.formData()).get('file') as File;
+ await ctx.env.BUCKET.put(file.name, file.stream());
+ return Response.json({ uploaded: file.name });
+ }
+}
+
+// Cache API
+export async function onRequest(ctx) {
+ let res = await caches.default.match(ctx.request);
+ if (!res) {
+ res = new Response('Data');
+ res.headers.set('Cache-Control', 'public, max-age=3600');
+ ctx.waitUntil(caches.default.put(ctx.request, res.clone()));
+ }
+ return res;
+}
+
+// Redirects
+export async function onRequest(ctx) {
+ if (new URL(ctx.request.url).pathname === '/old') {
+ return Response.redirect(new URL('/new', ctx.request.url), 301);
+ }
+ return ctx.next();
+}
+```
+
+## Testing
+
+**Unit tests** (Vitest + cloudflare:test):
+```typescript
+import { env } from 'cloudflare:test';
+import { it, expect } from 'vitest';
+import { onRequest } from '../functions/api';
+
+it('returns JSON', async () => {
+ const req = new Request('http://localhost/api');
+ const ctx = { request: req, env, params: {}, data: {} } as EventContext;
+ const res = await onRequest(ctx);
+ expect(res.status).toBe(200);
+});
+```
+
+**Integration:** `wrangler pages dev` + Playwright/Cypress
+
+## Advanced Mode (_worker.js)
+
+Use `_worker.js` for complex routing (replaces `/functions`):
+
+```typescript
+interface Env { ASSETS: Fetcher; KV: KVNamespace; }
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const url = new URL(request.url);
+ if (url.pathname.startsWith('/api/')) {
+ return Response.json({ data: await env.KV.get('key') });
+ }
+ return env.ASSETS.fetch(request); // Static files
+ }
+} satisfies ExportedHandler;
+```
+
+**When:** Existing Worker, framework-generated (Next.js/SvelteKit), custom routing logic
+
+**See also:** [api.md](./api.md) for `env.ASSETS.fetch()` | [gotchas.md](./gotchas.md) for debugging
diff --git a/.agents/skills/cloudflare-deploy/references/pages/README.md b/.agents/skills/cloudflare-deploy/references/pages/README.md
new file mode 100644
index 0000000..bf0546f
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages/README.md
@@ -0,0 +1,88 @@
+# Cloudflare Pages
+
+JAMstack platform for full-stack apps on Cloudflare's global network.
+
+## Key Features
+
+- **Git-based deploys**: Auto-deploy from GitHub/GitLab
+- **Preview deployments**: Unique URL per branch/PR
+- **Pages Functions**: File-based serverless routing (Workers runtime)
+- **Static + dynamic**: Smart asset caching + edge compute
+- **Smart Placement**: Automatic function optimization based on traffic patterns
+- **Framework optimized**: SvelteKit, Astro, Nuxt, Qwik, Solid Start
+
+## Deployment Methods
+
+### 1. Git Integration (Production)
+Dashboard → Workers & Pages → Create → Connect to Git → Configure build
+
+### 2. Direct Upload
+```bash
+npx wrangler pages deploy ./dist --project-name=my-project
+npx wrangler pages deploy ./dist --project-name=my-project --branch=staging
+```
+
+### 3. C3 CLI
+```bash
+npm create cloudflare@latest my-app
+# Select framework → auto-setup + deploy
+```
+
+## vs Workers
+
+- **Pages**: Static sites, JAMstack, frameworks, git workflow, file-based routing
+- **Workers**: Pure APIs, complex routing, WebSockets, scheduled tasks, email handlers
+- **Combine**: Pages Functions use Workers runtime, can bind to Workers
+
+## Quick Start
+
+```bash
+# Create
+npm create cloudflare@latest
+
+# Local dev
+npx wrangler pages dev ./dist
+
+# Deploy
+npx wrangler pages deploy ./dist --project-name=my-project
+
+# Types
+npx wrangler types --path='./functions/types.d.ts'
+
+# Secrets
+echo "value" | npx wrangler pages secret put KEY --project-name=my-project
+
+# Logs
+npx wrangler pages deployment tail --project-name=my-project
+```
+
+## Resources
+
+- [Pages Docs](https://developers.cloudflare.com/pages/)
+- [Functions API](https://developers.cloudflare.com/pages/functions/api-reference/)
+- [Framework Guides](https://developers.cloudflare.com/pages/framework-guides/)
+- [Discord #functions](https://discord.com/channels/595317990191398933/910978223968518144)
+
+## Reading Order
+
+**New to Pages?** Start here:
+1. README.md (you are here) - Overview & quick start
+2. [configuration.md](./configuration.md) - Project setup, wrangler.jsonc, bindings
+3. [api.md](./api.md) - Functions API, routing, context
+4. [patterns.md](./patterns.md) - Common implementations
+5. [gotchas.md](./gotchas.md) - Troubleshooting & pitfalls
+
+**Quick reference?** Jump to relevant file above.
+
+## In This Reference
+
+- [configuration.md](./configuration.md) - wrangler.jsonc, build, env vars, Smart Placement
+- [api.md](./api.md) - Functions API, bindings, context, advanced mode
+- [patterns.md](./patterns.md) - Full-stack patterns, framework integration
+- [gotchas.md](./gotchas.md) - Build issues, limits, debugging, framework warnings
+
+## See Also
+
+- [pages-functions](../pages-functions/) - File-based routing, middleware
+- [d1](../d1/) - SQL database for Pages Functions
+- [kv](../kv/) - Key-value storage for caching/state
diff --git a/.agents/skills/cloudflare-deploy/references/pages/api.md b/.agents/skills/cloudflare-deploy/references/pages/api.md
new file mode 100644
index 0000000..a719585
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages/api.md
@@ -0,0 +1,204 @@
+# Functions API
+
+## File-Based Routing
+
+```
+/functions/index.ts → example.com/
+/functions/api/users.ts → example.com/api/users
+/functions/api/users/[id].ts → example.com/api/users/:id
+/functions/api/users/[[path]].ts → example.com/api/users/* (catchall)
+/functions/_middleware.ts → Runs before all routes
+```
+
+**Rules**: `[param]` = single segment, `[[param]]` = multi-segment catchall, more specific wins.
+
+## Request Handlers
+
+```typescript
+import type { PagesFunction } from '@cloudflare/workers-types';
+
+interface Env {
+ DB: D1Database;
+ KV: KVNamespace;
+}
+
+// All methods
+export const onRequest: PagesFunction = async (context) => {
+ return new Response('All methods');
+};
+
+// Method-specific
+export const onRequestGet: PagesFunction = async (context) => {
+ const { request, env, params, data } = context;
+
+ const user = await env.DB.prepare(
+ 'SELECT * FROM users WHERE id = ?'
+ ).bind(params.id).first();
+
+ return Response.json(user);
+};
+
+export const onRequestPost: PagesFunction = async (context) => {
+ const body = await context.request.json();
+ return Response.json({ success: true });
+};
+
+// Also: onRequestPut, onRequestPatch, onRequestDelete, onRequestHead, onRequestOptions
+```
+
+## Context Object
+
+```typescript
+interface EventContext {
+ request: Request; // HTTP request
+ env: Env; // Bindings (KV, D1, R2, etc.)
+ params: Params; // Route parameters
+ data: Data; // Middleware-shared data
+ waitUntil: (promise: Promise) => void; // Background tasks
+ next: () => Promise; // Next handler
+ passThroughOnException: () => void; // Error fallback (not in advanced mode)
+}
+```
+
+## Dynamic Routes
+
+```typescript
+// Single segment: functions/users/[id].ts
+export const onRequestGet: PagesFunction = async ({ params }) => {
+ // /users/123 → params.id = "123"
+ return Response.json({ userId: params.id });
+};
+
+// Multi-segment: functions/files/[[path]].ts
+export const onRequestGet: PagesFunction = async ({ params }) => {
+ // /files/docs/api/v1.md → params.path = ["docs", "api", "v1.md"]
+ const filePath = (params.path as string[]).join('/');
+ return new Response(filePath);
+};
+```
+
+## Middleware
+
+```typescript
+// functions/_middleware.ts
+// Single
+export const onRequest: PagesFunction = async (context) => {
+ const response = await context.next();
+ response.headers.set('X-Custom-Header', 'value');
+ return response;
+};
+
+// Chained (runs in order)
+const errorHandler: PagesFunction = async (context) => {
+ try {
+ return await context.next();
+ } catch (err) {
+ return new Response(err.message, { status: 500 });
+ }
+};
+
+const auth: PagesFunction = async (context) => {
+ const token = context.request.headers.get('Authorization');
+ if (!token) return new Response('Unauthorized', { status: 401 });
+ context.data.userId = await verifyToken(token);
+ return context.next();
+};
+
+export const onRequest = [errorHandler, auth];
+```
+
+**Scope**: `functions/_middleware.ts` → all; `functions/api/_middleware.ts` → `/api/*` only
+
+## Bindings Usage
+
+```typescript
+export const onRequestGet: PagesFunction = async ({ env }) => {
+ // KV
+ const cached = await env.KV.get('key', 'json');
+ await env.KV.put('key', JSON.stringify({data: 'value'}), {expirationTtl: 3600});
+
+ // D1
+ const result = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
+
+ // R2, Queue, AI - see respective reference docs
+
+ return Response.json({success: true});
+};
+```
+
+## Advanced Mode
+
+Full Workers API, bypasses file-based routing:
+
+```javascript
+// functions/_worker.js
+export default {
+ async fetch(request, env, ctx) {
+ const url = new URL(request.url);
+
+ // Custom routing
+ if (url.pathname.startsWith('/api/')) {
+ return new Response('API response');
+ }
+
+ // REQUIRED: Serve static assets
+ return env.ASSETS.fetch(request);
+ }
+};
+```
+
+**When to use**: WebSockets, complex routing, scheduled handlers, email handlers.
+
+## Smart Placement
+
+Automatically optimizes function execution location based on traffic patterns.
+
+**Configuration** (in wrangler.jsonc):
+```jsonc
+{
+ "placement": {
+ "mode": "smart" // Enables optimization (default: off)
+ }
+}
+```
+
+**How it works**: Analyzes traffic patterns over time and places functions closer to users or data sources (e.g., D1 databases). Requires no code changes.
+
+**Trade-offs**: Initial requests may see slightly higher latency during learning period (hours-days). Performance improves as system optimizes.
+
+**When to use**: Global apps with centralized databases or geographically concentrated traffic sources.
+
+## getRequestContext (Framework SSR)
+
+Access bindings in framework code:
+
+```typescript
+// SvelteKit
+import type { RequestEvent } from '@sveltejs/kit';
+export async function load({ platform }: RequestEvent) {
+ const data = await platform.env.DB.prepare('SELECT * FROM users').all();
+ return { users: data.results };
+}
+
+// Astro
+const { DB } = Astro.locals.runtime.env;
+const data = await DB.prepare('SELECT * FROM users').all();
+
+// Solid Start (server function)
+import { getRequestEvent } from 'solid-js/web';
+const event = getRequestEvent();
+const data = await event.locals.runtime.env.DB.prepare('SELECT * FROM users').all();
+```
+
+**✅ Supported adapters** (2026):
+- **SvelteKit**: `@sveltejs/adapter-cloudflare`
+- **Astro**: Built-in Cloudflare adapter
+- **Nuxt**: Set `nitro.preset: 'cloudflare-pages'` in `nuxt.config.ts`
+- **Qwik**: Built-in Cloudflare adapter
+- **Solid Start**: `@solidjs/start-cloudflare-pages`
+
+**❌ Deprecated/Unsupported**:
+- **Next.js**: Official adapter (`@cloudflare/next-on-pages`) deprecated. Use Vercel or self-host on Workers.
+- **Remix**: Official adapter (`@remix-run/cloudflare-pages`) deprecated. Migrate to supported frameworks.
+
+See [gotchas.md](./gotchas.md#framework-specific) for migration guidance.
diff --git a/.agents/skills/cloudflare-deploy/references/pages/configuration.md b/.agents/skills/cloudflare-deploy/references/pages/configuration.md
new file mode 100644
index 0000000..30ada89
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages/configuration.md
@@ -0,0 +1,201 @@
+# Configuration
+
+## wrangler.jsonc
+
+```jsonc
+{
+ "name": "my-pages-project",
+ "pages_build_output_dir": "./dist",
+ "compatibility_date": "2026-01-01", // Use current date for new projects
+ "compatibility_flags": ["nodejs_compat"],
+ "placement": {
+ "mode": "smart" // Optional: Enable Smart Placement
+ },
+ "kv_namespaces": [{"binding": "KV", "id": "abcd1234..."}],
+ "d1_databases": [{"binding": "DB", "database_id": "xxxx-xxxx", "database_name": "production-db"}],
+ "r2_buckets": [{"binding": "BUCKET", "bucket_name": "my-bucket"}],
+ "durable_objects": {"bindings": [{"name": "COUNTER", "class_name": "Counter", "script_name": "counter-worker"}]},
+ "services": [{"binding": "API", "service": "api-worker"}],
+ "queues": {"producers": [{"binding": "QUEUE", "queue": "my-queue"}]},
+ "vectorize": [{"binding": "VECTORIZE", "index_name": "my-index"}],
+ "ai": {"binding": "AI"},
+ "analytics_engine_datasets": [{"binding": "ANALYTICS"}],
+ "vars": {"API_URL": "https://api.example.com", "ENVIRONMENT": "production"},
+ "env": {
+ "preview": {
+ "vars": {"API_URL": "https://staging-api.example.com"},
+ "kv_namespaces": [{"binding": "KV", "id": "preview-namespace-id"}]
+ }
+ }
+}
+```
+
+## Build Config
+
+**Git deployment**: Dashboard → Project → Settings → Build settings
+Set build command, output dir, env vars. Framework auto-detection configures automatically.
+
+## Environment Variables
+
+### Local (.dev.vars)
+```bash
+# .dev.vars (never commit)
+SECRET_KEY="local-secret-key"
+API_TOKEN="dev-token-123"
+```
+
+### Production
+```bash
+echo "secret-value" | npx wrangler pages secret put SECRET_KEY --project-name=my-project
+npx wrangler pages secret list --project-name=my-project
+npx wrangler pages secret delete SECRET_KEY --project-name=my-project
+```
+
+Access: `env.SECRET_KEY`
+
+## Static Config Files
+
+### _redirects
+Place in build output (e.g., `dist/_redirects`):
+
+```txt
+/old-page /new-page 301 # 301 redirect
+/blog/* /news/:splat 301 # Splat wildcard
+/users/:id /members/:id 301 # Placeholders
+/api/* /api-v2/:splat 200 # Proxy (no redirect)
+```
+
+**Limits**: 2,100 total (2,000 static + 100 dynamic), 1,000 char/line
+**Note**: Functions take precedence
+
+### _headers
+```txt
+/secure/*
+ X-Frame-Options: DENY
+ X-Content-Type-Options: nosniff
+
+/api/*
+ Access-Control-Allow-Origin: *
+
+/static/*
+ Cache-Control: public, max-age=31536000, immutable
+```
+
+**Limits**: 100 rules, 2,000 char/line
+**Note**: Only static assets; Functions set headers in Response
+
+### _routes.json
+Controls which requests invoke Functions (auto-generated for most frameworks):
+
+```json
+{
+ "version": 1,
+ "include": ["/*"],
+ "exclude": ["/build/*", "/static/*", "/assets/*", "/*.{ico,png,jpg,css,js}"]
+}
+```
+
+**Purpose**: Functions are metered; static requests are free. `exclude` takes precedence. Max 100 rules, 100 char/rule.
+
+## TypeScript
+
+```bash
+npx wrangler types --path='./functions/types.d.ts'
+```
+
+Point `types` in `functions/tsconfig.json` to generated file.
+
+## Smart Placement
+
+Automatically optimizes function execution location based on request patterns.
+
+```jsonc
+{
+ "placement": {
+ "mode": "smart" // Enable optimization (default: off)
+ }
+}
+```
+
+**How it works**: System analyzes traffic over hours/days and places function execution closer to:
+- User clusters (e.g., regional traffic)
+- Data sources (e.g., D1 database primary location)
+
+**Benefits**:
+- Lower latency for read-heavy apps with centralized databases
+- Better performance for apps with regional traffic patterns
+
+**Trade-offs**:
+- Initial learning period: First requests may be slower while system optimizes
+- Optimization time: Performance improves over 24-48 hours
+
+**When to enable**: Global apps with D1/Durable Objects in specific regions, or apps with concentrated geographic traffic.
+
+**When to skip**: Evenly distributed global traffic with no data locality constraints.
+
+## Remote Bindings (Local Dev)
+
+Connect local dev server to production bindings instead of local mocks:
+
+```bash
+# All bindings remote
+npx wrangler pages dev ./dist --remote
+
+# Specific bindings remote (others local)
+npx wrangler pages dev ./dist --remote --kv=KV --d1=DB
+```
+
+**Use cases**:
+- Test against production data (read-only operations)
+- Debug binding-specific behavior
+- Validate changes before deployment
+
+**⚠️ Warning**:
+- Writes affect **real production data**
+- Use only for read-heavy debugging or with non-production accounts
+- Consider creating separate preview environments instead
+
+**Requirements**: Must be logged in (`npx wrangler login`) with access to bindings.
+
+## Local Dev
+
+```bash
+# Basic
+npx wrangler pages dev ./dist
+
+# With bindings
+npx wrangler pages dev ./dist --kv KV --d1 DB=local-db-id
+
+# Remote bindings (production data)
+npx wrangler pages dev ./dist --remote
+
+# Persistence
+npx wrangler pages dev ./dist --persist-to=./.wrangler/state/v3
+
+# Proxy mode (SSR frameworks)
+npx wrangler pages dev -- npm run dev
+```
+
+## Limits (as of Jan 2026)
+
+| Resource | Free | Paid |
+|----------|------|------|
+| **Functions Requests** | 100k/day | Unlimited (metered) |
+| **Function CPU Time** | 10ms/req | 30ms/req (Workers Paid) |
+| **Function Memory** | 128MB | 128MB |
+| **Script Size** | 1MB compressed | 10MB compressed |
+| **Deployments** | 500/month | 5,000/month |
+| **Files per Deploy** | 20,000 | 20,000 |
+| **File Size** | 25MB | 25MB |
+| **Build Time** | 20min | 20min |
+| **Redirects** | 2,100 (2k static + 100 dynamic) | Same |
+| **Header Rules** | 100 | 100 |
+| **Route Rules** | 100 | 100 |
+| **Subrequests** | 50/request | 1,000/request (Workers Paid) |
+
+**Notes**:
+- Functions use Workers runtime; Workers Paid plan increases limits
+- Free plan sufficient for most projects
+- Static requests always free (not counted toward limits)
+
+[Full limits](https://developers.cloudflare.com/pages/platform/limits/)
diff --git a/.agents/skills/cloudflare-deploy/references/pages/gotchas.md b/.agents/skills/cloudflare-deploy/references/pages/gotchas.md
new file mode 100644
index 0000000..943c2d3
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages/gotchas.md
@@ -0,0 +1,203 @@
+# Gotchas
+
+## Functions Not Running
+
+**Problem**: Function endpoints return 404 or don't execute
+**Causes**: `_routes.json` excludes path; wrong file extension (`.jsx`/`.tsx`); Functions dir not at output root
+**Solution**: Check `_routes.json`, rename to `.ts`/`.js`, verify build output structure
+
+## 404 on Static Assets
+
+**Problem**: Static files not serving
+**Causes**: Build output dir misconfigured; Functions catching requests; Advanced mode missing `env.ASSETS.fetch()`
+**Solution**: Verify output dir, add exclusions to `_routes.json`, call `env.ASSETS.fetch()` in `_worker.js`
+
+## Bindings Not Working
+
+**Problem**: `env.BINDING` undefined or errors
+**Causes**: wrangler.jsonc syntax error; wrong binding IDs; missing `.dev.vars`; out-of-sync types
+**Solution**: Validate config, verify IDs, create `.dev.vars`, run `npx wrangler types`
+
+## Build Failures
+
+**Problem**: Deployment fails during build
+**Causes**: Wrong build command/output dir; Node version incompatibility; missing env vars; 20min timeout; OOM
+**Solution**: Check Dashboard → Deployments → Build log; verify settings; add `.nvmrc`; optimize build
+
+## Middleware Not Running
+
+**Problem**: Middleware doesn't execute
+**Causes**: Wrong filename (not `_middleware.ts`); missing `onRequest` export; didn't call `next()`
+**Solution**: Rename file with underscore prefix; export handler; call `next()` or return Response
+
+## Headers/Redirects Not Working
+
+**Problem**: `_headers` or `_redirects` not applying
+**Causes**: Only work for static assets; Functions override; syntax errors; exceeded limits
+**Solution**: Set headers in Response object for Functions; verify syntax; check limits (100 headers, 2,100 redirects)
+
+## TypeScript Errors
+
+**Problem**: Type errors in Functions code
+**Causes**: Types not generated; Env interface doesn't match wrangler.jsonc
+**Solution**: Run `npx wrangler types --path='./functions/types.d.ts'`; update Env interface
+
+## Local Dev Issues
+
+**Problem**: Dev server errors or bindings don't work
+**Causes**: Port conflict; bindings not passed; local vs HTTPS differences
+**Solution**: Use `--port=3000`; pass bindings via CLI or wrangler.jsonc; account for HTTP/HTTPS differences
+
+## Performance Issues
+
+**Problem**: Slow responses or CPU limit errors
+**Causes**: Functions invoked for static assets; cold starts; 10ms CPU limit; large bundle
+**Solution**: Exclude static via `_routes.json`; optimize hot paths; keep bundle < 1MB
+
+## Framework-Specific
+
+### ⚠️ Deprecated Frameworks
+
+**Next.js**: Official adapter (`@cloudflare/next-on-pages`) **deprecated** and unmaintained.
+- **Problem**: No updates since 2024; incompatible with Next.js 15+; missing App Router features
+- **Cause**: Cloudflare discontinued official support; community fork exists but limited
+- **Solutions**:
+ 1. **Recommended**: Use Vercel (official Next.js host)
+ 2. **Advanced**: Self-host on Workers using custom adapter (complex, unsupported)
+ 3. **Migration**: Switch to SvelteKit/Nuxt (similar DX, full Pages support)
+
+**Remix**: Official adapter (`@remix-run/cloudflare-pages`) **deprecated**.
+- **Problem**: No maintenance from Remix team; compatibility issues with Remix v2+
+- **Cause**: Remix team deprecated all framework adapters
+- **Solutions**:
+ 1. **Recommended**: Migrate to SvelteKit (similar file-based routing, better DX)
+ 2. **Alternative**: Use Astro (static-first with optional SSR)
+ 3. **Workaround**: Continue using deprecated adapter (no future support)
+
+### ✅ Supported Frameworks
+
+**SvelteKit**:
+- Use `@sveltejs/adapter-cloudflare`
+- Access bindings via `platform.env` in server load functions
+- Set `platform: 'cloudflare'` in `svelte.config.js`
+
+**Astro**:
+- Built-in Cloudflare adapter
+- Access bindings via `Astro.locals.runtime.env`
+
+**Nuxt**:
+- Set `nitro.preset: 'cloudflare-pages'` in `nuxt.config.ts`
+- Access bindings via `event.context.cloudflare.env`
+
+**Qwik, Solid Start**:
+- Built-in or official Cloudflare adapters available
+- Check respective framework docs for binding access
+
+## Debugging
+
+```typescript
+// Log request details
+console.log('Request:', { method: request.method, url: request.url });
+console.log('Env:', Object.keys(env));
+console.log('Params:', params);
+```
+
+**View logs**: `npx wrangler pages deployment tail --project-name=my-project`
+
+## Smart Placement Issues
+
+### Increased Cold Start Latency
+
+**Problem**: First requests slower after enabling Smart Placement
+**Cause**: Initial optimization period while system learns traffic patterns
+**Solution**: Expected behavior during first 24-48 hours; monitor latency trends over time
+
+### Inconsistent Response Times
+
+**Problem**: Latency varies significantly across requests during initial deployment
+**Cause**: Smart Placement testing different execution locations to find optimal placement
+**Solution**: Normal during learning phase; stabilizes after traffic patterns emerge (1-2 days)
+
+### No Performance Improvement
+
+**Problem**: Smart Placement enabled but no latency reduction observed
+**Cause**: Traffic evenly distributed globally, or no data locality constraints
+**Solution**: Smart Placement most effective with centralized data (D1/DO) or regional traffic; disable if no benefit
+
+## Remote Bindings Issues
+
+### Accidentally Modified Production Data
+
+**Problem**: Local dev with `--remote` altered production database/KV
+**Cause**: Remote bindings connect directly to production resources; writes are real
+**Solution**:
+- Use `--remote` only for read-heavy debugging
+- Create separate preview environments for testing
+- Never use `--remote` for write operations during development
+
+### Remote Binding Auth Errors
+
+**Problem**: `npx wrangler pages dev --remote` fails with "Unauthorized" or auth error
+**Cause**: Not logged in, session expired, or insufficient account permissions
+**Solution**:
+1. Run `npx wrangler login` to re-authenticate
+2. Verify account has access to project and bindings
+3. Check binding IDs match production configuration
+
+### Slow Local Dev with Remote Bindings
+
+**Problem**: Local dev server slow when using `--remote`
+**Cause**: Every request makes network calls to production bindings
+**Solution**: Use local bindings for development; reserve `--remote` for final validation
+
+## Common Errors
+
+### "Module not found"
+**Cause**: Dependencies not bundled or build output incorrect
+**Solution**: Check build output directory, ensure dependencies bundled
+
+### "Binding not found"
+**Cause**: Binding not configured or types out of sync
+**Solution**: Verify wrangler.jsonc, run `npx wrangler types`
+
+### "Request exceeded CPU limit"
+**Cause**: Code execution too slow or heavy compute
+**Solution**: Optimize hot paths, upgrade to Workers Paid
+
+### "Script too large"
+**Cause**: Bundle size exceeds limit
+**Solution**: Tree-shake, use dynamic imports, code-split
+
+### "Too many subrequests"
+**Cause**: Exceeded 50 subrequest limit
+**Solution**: Batch or reduce fetch calls
+
+### "KV key not found"
+**Cause**: Key doesn't exist or wrong namespace
+**Solution**: Check namespace matches environment
+
+### "D1 error"
+**Cause**: Wrong database_id or missing migrations
+**Solution**: Verify config, run `wrangler d1 migrations list`
+
+## Limits Reference (Jan 2026)
+
+| Resource | Free | Paid |
+|----------|------|------|
+| Functions Requests | 100k/day | Unlimited |
+| CPU Time | 10ms/req | 30ms/req |
+| Memory | 128MB | 128MB |
+| Script Size | 1MB | 10MB |
+| Subrequests | 50/req | 1,000/req |
+| Deployments | 500/month | 5,000/month |
+
+**Tip**: Hitting CPU limit? Optimize hot paths or upgrade to Workers Paid plan.
+
+[Full limits](https://developers.cloudflare.com/pages/platform/limits/)
+
+## Getting Help
+
+1. Check [Pages Docs](https://developers.cloudflare.com/pages/)
+2. Search [Discord #functions](https://discord.com/channels/595317990191398933/910978223968518144)
+3. Review [Workers Examples](https://developers.cloudflare.com/workers/examples/)
+4. Check framework-specific docs/adapters
diff --git a/.agents/skills/cloudflare-deploy/references/pages/patterns.md b/.agents/skills/cloudflare-deploy/references/pages/patterns.md
new file mode 100644
index 0000000..883c4da
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pages/patterns.md
@@ -0,0 +1,204 @@
+# Patterns
+
+## API Routes
+
+```typescript
+// functions/api/todos/[id].ts
+export const onRequestGet: PagesFunction = async ({ env, params }) => {
+ const todo = await env.DB.prepare('SELECT * FROM todos WHERE id = ?').bind(params.id).first();
+ if (!todo) return new Response('Not found', { status: 404 });
+ return Response.json(todo);
+};
+
+export const onRequestPut: PagesFunction = async ({ env, params, request }) => {
+ const body = await request.json();
+ await env.DB.prepare('UPDATE todos SET title = ?, completed = ? WHERE id = ?')
+ .bind(body.title, body.completed, params.id).run();
+ return Response.json({ success: true });
+};
+// Also: onRequestDelete, onRequestPost
+```
+
+## Auth Middleware
+
+```typescript
+// functions/_middleware.ts
+const auth: PagesFunction = async (context) => {
+ if (context.request.url.includes('/public/')) return context.next();
+ const authHeader = context.request.headers.get('Authorization');
+ if (!authHeader?.startsWith('Bearer ')) {
+ return new Response('Unauthorized', { status: 401 });
+ }
+
+ try {
+ const payload = await verifyJWT(authHeader.substring(7), context.env.JWT_SECRET);
+ context.data.user = payload;
+ return context.next();
+ } catch (err) {
+ return new Response('Invalid token', { status: 401 });
+ }
+};
+export const onRequest = [auth];
+```
+
+## CORS
+
+```typescript
+// functions/api/_middleware.ts
+const corsHeaders = {
+ 'Access-Control-Allow-Origin': '*',
+ 'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
+ 'Access-Control-Allow-Headers': 'Content-Type, Authorization'
+};
+
+export const onRequest: PagesFunction = async (context) => {
+ if (context.request.method === 'OPTIONS') {
+ return new Response(null, {headers: corsHeaders});
+ }
+ const response = await context.next();
+ Object.entries(corsHeaders).forEach(([k, v]) => response.headers.set(k, v));
+ return response;
+};
+```
+
+## Form Handling
+
+```typescript
+// functions/api/contact.ts
+export const onRequestPost: PagesFunction = async ({ request, env }) => {
+ const formData = await request.formData();
+ await env.QUEUE.send({name: formData.get('name'), email: formData.get('email')});
+ return new Response('Thanks! ', { headers: { 'Content-Type': 'text/html' } });
+};
+```
+
+## Background Tasks
+
+```typescript
+export const onRequestPost: PagesFunction = async ({ request, waitUntil }) => {
+ const data = await request.json();
+ waitUntil(fetch('https://api.example.com/webhook', {
+ method: 'POST', body: JSON.stringify(data)
+ }));
+ return Response.json({ queued: true });
+};
+```
+
+## Error Handling
+
+```typescript
+// functions/_middleware.ts
+const errorHandler: PagesFunction = async (context) => {
+ try {
+ return await context.next();
+ } catch (error) {
+ console.error('Error:', error);
+ if (context.request.url.includes('/api/')) {
+ return Response.json({ error: error.message }, { status: 500 });
+ }
+ return new Response(`Error ${error.message}
`, {
+ status: 500, headers: { 'Content-Type': 'text/html' }
+ });
+ }
+};
+export const onRequest = [errorHandler];
+```
+
+## Caching
+
+```typescript
+// functions/api/data.ts
+export const onRequestGet: PagesFunction = async ({ env, request }) => {
+ const cacheKey = `data:${new URL(request.url).pathname}`;
+ const cached = await env.KV.get(cacheKey, 'json');
+ if (cached) return Response.json(cached, { headers: { 'X-Cache': 'HIT' } });
+
+ const data = await env.DB.prepare('SELECT * FROM data').first();
+ await env.KV.put(cacheKey, JSON.stringify(data), {expirationTtl: 3600});
+ return Response.json(data, {headers: {'X-Cache': 'MISS'}});
+};
+```
+
+## Smart Placement for Database Apps
+
+Enable Smart Placement for apps with D1 or centralized data sources:
+
+```jsonc
+// wrangler.jsonc
+{
+ "name": "global-app",
+ "placement": {
+ "mode": "smart"
+ },
+ "d1_databases": [{
+ "binding": "DB",
+ "database_id": "your-db-id"
+ }]
+}
+```
+
+```typescript
+// functions/api/data.ts
+export const onRequestGet: PagesFunction = async ({ env }) => {
+ // Smart Placement optimizes execution location over time
+ // Balances user location vs database location
+ const data = await env.DB.prepare('SELECT * FROM products LIMIT 10').all();
+ return Response.json(data);
+};
+```
+
+**Best for**: Read-heavy apps with D1/Durable Objects in specific regions.
+**Not needed**: Apps without data locality constraints or with evenly distributed traffic.
+
+## Framework Integration
+
+**Supported** (2026): SvelteKit, Astro, Nuxt, Qwik, Solid Start
+
+```bash
+npm create cloudflare@latest my-app -- --framework=svelte
+```
+
+### SvelteKit
+```typescript
+// src/routes/+page.server.ts
+export const load = async ({ platform }) => {
+ const todos = await platform.env.DB.prepare('SELECT * FROM todos').all();
+ return { todos: todos.results };
+};
+```
+
+### Astro
+```astro
+---
+const { DB } = Astro.locals.runtime.env;
+const todos = await DB.prepare('SELECT * FROM todos').all();
+---
+{todos.results.map(t => {t.title} )}
+```
+
+### Nuxt
+```typescript
+// server/api/todos.get.ts
+export default defineEventHandler(async (event) => {
+ const { DB } = event.context.cloudflare.env;
+ return await DB.prepare('SELECT * FROM todos').all();
+});
+```
+
+**⚠️ Framework Status** (2026):
+- ✅ **Supported**: SvelteKit, Astro, Nuxt, Qwik, Solid Start
+- ❌ **Deprecated**: Next.js (`@cloudflare/next-on-pages`), Remix (`@remix-run/cloudflare-pages`)
+
+For deprecated frameworks, see [gotchas.md](./gotchas.md#framework-specific) for migration options.
+
+[Framework Guides](https://developers.cloudflare.com/pages/framework-guides/)
+
+## Monorepo
+
+Dashboard → Settings → Build → Root directory. Set to subproject (e.g., `apps/web`).
+
+## Best Practices
+
+**Performance**: Exclude static via `_routes.json`; cache with KV; keep bundle < 1MB
+**Security**: Use secrets (not vars); validate inputs; rate limit with KV/DO
+**Workflow**: Preview per branch; local dev with `wrangler pages dev`; instant rollbacks in Dashboard
diff --git a/.agents/skills/cloudflare-deploy/references/pipelines/README.md b/.agents/skills/cloudflare-deploy/references/pipelines/README.md
new file mode 100644
index 0000000..2724485
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pipelines/README.md
@@ -0,0 +1,105 @@
+# Cloudflare Pipelines
+
+ETL streaming platform for ingesting, transforming, and loading data into R2 with SQL transformations.
+
+## Overview
+
+Pipelines provides:
+- **Streams**: Durable event buffers (HTTP/Workers ingestion)
+- **Pipelines**: SQL-based transformations
+- **Sinks**: R2 destinations (Iceberg tables or Parquet/JSON files)
+
+**Status**: Open beta (Workers Paid plan)
+**Pricing**: No charge beyond standard R2 storage/operations
+
+## Architecture
+
+```
+Data Sources → Streams → Pipelines (SQL) → Sinks → R2
+ ↑ ↓ ↓
+ HTTP/Workers Transform Iceberg/Parquet
+```
+
+| Component | Purpose | Key Feature |
+|-----------|---------|-------------|
+| Streams | Event ingestion | Structured (validated) or unstructured |
+| Pipelines | Transform with SQL | Immutable after creation |
+| Sinks | Write to R2 | Exactly-once delivery |
+
+## Quick Start
+
+```bash
+# Interactive setup (recommended)
+npx wrangler pipelines setup
+```
+
+**Minimal Worker example:**
+```typescript
+interface Env {
+ STREAM: Pipeline;
+}
+
+export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ const event = { user_id: "123", event_type: "purchase", amount: 29.99 };
+
+ // Fire-and-forget pattern
+ ctx.waitUntil(env.STREAM.send([event]));
+
+ return new Response('OK');
+ }
+} satisfies ExportedHandler;
+```
+
+## Which Sink Type?
+
+```
+Need SQL queries on data?
+ → R2 Data Catalog (Iceberg)
+ ✅ ACID transactions, time-travel, schema evolution
+ ❌ More setup complexity (namespace, table, catalog token)
+
+Just file storage/archival?
+ → R2 Storage (Parquet)
+ ✅ Simple, direct file access
+ ❌ No built-in SQL queries
+
+Using external tools (Spark/Athena)?
+ → R2 Storage (Parquet with partitioning)
+ ✅ Standard format, partition pruning for performance
+ ❌ Must manage schema compatibility yourself
+```
+
+## Common Use Cases
+
+- **Analytics pipelines**: Clickstream, telemetry, server logs
+- **Data warehousing**: ETL into queryable Iceberg tables
+- **Event processing**: Mobile/IoT with enrichment
+- **Ecommerce analytics**: User events, purchases, views
+
+## Reading Order
+
+**New to Pipelines?** Start here:
+1. [configuration.md](./configuration.md) - Setup streams, sinks, pipelines
+2. [api.md](./api.md) - Send events, TypeScript types, SQL functions
+3. [patterns.md](./patterns.md) - Best practices, integrations, complete example
+4. [gotchas.md](./gotchas.md) - Critical warnings, troubleshooting
+
+**Task-based routing:**
+- Setup pipeline → [configuration.md](./configuration.md)
+- Send/query data → [api.md](./api.md)
+- Implement pattern → [patterns.md](./patterns.md)
+- Debug issue → [gotchas.md](./gotchas.md)
+
+## In This Reference
+
+- [configuration.md](./configuration.md) - wrangler.jsonc bindings, schema definition, sink options, CLI commands
+- [api.md](./api.md) - Pipeline binding interface, send() method, HTTP ingest, SQL function reference
+- [patterns.md](./patterns.md) - Fire-and-forget, schema validation with Zod, integrations, performance tuning
+- [gotchas.md](./gotchas.md) - Silent validation failures, immutable pipelines, latency expectations, limits
+
+## See Also
+
+- [r2](../r2/) - R2 storage backend for sinks
+- [queues](../queues/) - Compare with Queues for async processing
+- [workers](../workers/) - Worker runtime for event ingestion
diff --git a/.agents/skills/cloudflare-deploy/references/pipelines/api.md b/.agents/skills/cloudflare-deploy/references/pipelines/api.md
new file mode 100644
index 0000000..ff302c7
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pipelines/api.md
@@ -0,0 +1,208 @@
+# Pipelines API Reference
+
+## Pipeline Binding Interface
+
+```typescript
+// From @cloudflare/workers-types
+interface Pipeline {
+ send(data: object | object[]): Promise;
+}
+
+interface Env {
+ STREAM: Pipeline;
+}
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ // send() returns Promise - no result data
+ await env.STREAM.send([event]);
+ return new Response('OK');
+ }
+} satisfies ExportedHandler;
+```
+
+**Key points:**
+- `send()` accepts single object or array
+- Always returns `Promise` (no confirmation data)
+- Throws on network/validation errors (wrap in try/catch)
+- Use `ctx.waitUntil()` for fire-and-forget pattern
+
+## Writing Events
+
+### Single Event
+
+```typescript
+await env.STREAM.send([{
+ user_id: "12345",
+ event_type: "purchase",
+ product_id: "widget-001",
+ amount: 29.99
+}]);
+```
+
+### Batch Events
+
+```typescript
+const events = [
+ { user_id: "user1", event_type: "view" },
+ { user_id: "user2", event_type: "purchase", amount: 50 }
+];
+await env.STREAM.send(events);
+```
+
+**Limits:**
+- Max 1 MB per request
+- 5 MB/s per stream
+
+### Fire-and-Forget Pattern
+
+```typescript
+export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ const event = { /* ... */ };
+
+ // Don't block response on send
+ ctx.waitUntil(env.STREAM.send([event]));
+
+ return new Response('OK');
+ }
+};
+```
+
+### Error Handling
+
+```typescript
+try {
+ await env.STREAM.send([event]);
+} catch (error) {
+ console.error('Pipeline send failed:', error);
+ // Log to another system, retry, or return error response
+ return new Response('Failed to track event', { status: 500 });
+}
+```
+
+## HTTP Ingest API
+
+### Endpoint Format
+
+```
+https://{stream-id}.ingest.cloudflare.com
+```
+
+Get `{stream-id}` from: `npx wrangler pipelines streams list`
+
+### Request Format
+
+**CRITICAL:** Must send array, not single object
+
+```bash
+# ✅ Correct
+curl -X POST https://{stream-id}.ingest.cloudflare.com \
+ -H "Content-Type: application/json" \
+ -d '[{"user_id": "123", "event_type": "purchase"}]'
+
+# ❌ Wrong - will fail
+curl -X POST https://{stream-id}.ingest.cloudflare.com \
+ -H "Content-Type: application/json" \
+ -d '{"user_id": "123", "event_type": "purchase"}'
+```
+
+### Authentication
+
+```bash
+curl -X POST https://{stream-id}.ingest.cloudflare.com \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer YOUR_API_TOKEN" \
+ -d '[{"event": "data"}]'
+```
+
+**Required permission:** Workers Pipeline Send
+
+Create token: Dashboard → Workers → API tokens → Create with Pipeline Send permission
+
+### Response Codes
+
+| Code | Meaning | Action |
+|------|---------|--------|
+| 200 | Accepted | Success |
+| 400 | Invalid format | Check JSON array, schema match |
+| 401 | Auth failed | Verify token valid |
+| 413 | Payload too large | Split into smaller batches (<1 MB) |
+| 429 | Rate limited | Back off, retry with delay |
+| 5xx | Server error | Retry with exponential backoff |
+
+## SQL Functions Quick Reference
+
+Available in `INSERT INTO sink SELECT ... FROM stream` transformations:
+
+| Function | Example | Use Case |
+|----------|---------|----------|
+| `UPPER(s)` | `UPPER(event_type)` | Normalize strings |
+| `LOWER(s)` | `LOWER(email)` | Case-insensitive matching |
+| `CONCAT(...)` | `CONCAT(user_id, '_', product_id)` | Generate composite keys |
+| `CASE WHEN ... THEN ... END` | `CASE WHEN amount > 100 THEN 'high' ELSE 'low' END` | Conditional enrichment |
+| `CAST(x AS type)` | `CAST(timestamp AS string)` | Type conversion |
+| `COALESCE(x, y)` | `COALESCE(amount, 0.0)` | Default values |
+| Math operators | `amount * 1.1`, `price / quantity` | Calculations |
+| Comparison | `amount > 100`, `status IN ('active', 'pending')` | Filtering |
+
+**String types for CAST:** `string`, `int32`, `int64`, `float32`, `float64`, `bool`, `timestamp`
+
+Full reference: [Pipelines SQL Reference](https://developers.cloudflare.com/pipelines/sql-reference/)
+
+## SQL Transform Examples
+
+### Filter Events
+
+```sql
+INSERT INTO my_sink
+SELECT * FROM my_stream
+WHERE event_type = 'purchase' AND amount > 100
+```
+
+### Select Specific Fields
+
+```sql
+INSERT INTO my_sink
+SELECT user_id, event_type, timestamp, amount
+FROM my_stream
+```
+
+### Transform and Enrich
+
+```sql
+INSERT INTO my_sink
+SELECT
+ user_id,
+ UPPER(event_type) as event_type,
+ timestamp,
+ amount * 1.1 as amount_with_tax,
+ CONCAT(user_id, '_', product_id) as unique_key,
+ CASE
+ WHEN amount > 1000 THEN 'high_value'
+ WHEN amount > 100 THEN 'medium_value'
+ ELSE 'low_value'
+ END as customer_tier
+FROM my_stream
+WHERE event_type IN ('purchase', 'refund')
+```
+
+## Querying Results (R2 Data Catalog)
+
+```bash
+export WRANGLER_R2_SQL_AUTH_TOKEN=YOUR_CATALOG_TOKEN
+
+npx wrangler r2 sql query "warehouse_name" "
+SELECT
+ event_type,
+ COUNT(*) as event_count,
+ SUM(amount) as total_revenue
+FROM default.my_table
+WHERE event_type = 'purchase'
+ AND timestamp >= '2025-01-01'
+GROUP BY event_type
+ORDER BY total_revenue DESC
+LIMIT 100"
+```
+
+**Note:** Iceberg tables support standard SQL queries with GROUP BY, JOINs, WHERE, ORDER BY, etc.
diff --git a/.agents/skills/cloudflare-deploy/references/pipelines/configuration.md b/.agents/skills/cloudflare-deploy/references/pipelines/configuration.md
new file mode 100644
index 0000000..75e65f5
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pipelines/configuration.md
@@ -0,0 +1,98 @@
+# Pipelines Configuration
+
+## Worker Binding
+
+```jsonc
+// wrangler.jsonc
+{
+ "pipelines": [
+ { "pipeline": "", "binding": "STREAM" }
+ ]
+}
+```
+
+Get stream ID: `npx wrangler pipelines streams list`
+
+## Schema (Structured Streams)
+
+```json
+{
+ "fields": [
+ { "name": "user_id", "type": "string", "required": true },
+ { "name": "event_type", "type": "string", "required": true },
+ { "name": "amount", "type": "float64", "required": false },
+ { "name": "timestamp", "type": "timestamp", "required": true }
+ ]
+}
+```
+
+**Types:** `string`, `int32`, `int64`, `float32`, `float64`, `bool`, `timestamp`, `json`, `binary`, `list`, `struct`
+
+## Stream Setup
+
+```bash
+# With schema
+npx wrangler pipelines streams create my-stream --schema-file schema.json
+
+# Unstructured (no validation)
+npx wrangler pipelines streams create my-stream
+
+# List/get/delete
+npx wrangler pipelines streams list
+npx wrangler pipelines streams get
+npx wrangler pipelines streams delete
+```
+
+## Sink Configuration
+
+**R2 Data Catalog (Iceberg):**
+```bash
+npx wrangler pipelines sinks create my-sink \
+ --type r2-data-catalog \
+ --bucket my-bucket --namespace default --table events \
+ --catalog-token $TOKEN \
+ --compression zstd --roll-interval 60
+```
+
+**R2 Raw (Parquet):**
+```bash
+npx wrangler pipelines sinks create my-sink \
+ --type r2 --bucket my-bucket --format parquet \
+ --path analytics/events \
+ --partitioning "year=%Y/month=%m/day=%d" \
+ --access-key-id $KEY --secret-access-key $SECRET
+```
+
+| Option | Values | Guidance |
+|--------|--------|----------|
+| `--compression` | `zstd`, `snappy`, `gzip` | `zstd` best ratio, `snappy` fastest |
+| `--roll-interval` | Seconds | Low latency: 10-60, Query perf: 300 |
+| `--roll-size` | MB | Larger = better compression |
+
+## Pipeline Creation
+
+```bash
+npx wrangler pipelines create my-pipeline \
+ --sql "INSERT INTO my_sink SELECT * FROM my_stream WHERE event_type = 'purchase'"
+```
+
+**⚠️ Pipelines are immutable** - cannot modify SQL. Must delete/recreate.
+
+## Credentials
+
+| Type | Permission | Get From |
+|------|------------|----------|
+| Catalog token | R2 Admin Read & Write | Dashboard → R2 → API tokens |
+| R2 credentials | Object Read & Write | `wrangler r2 bucket create` output |
+| HTTP ingest token | Workers Pipeline Send | Dashboard → Workers → API tokens |
+
+## Complete Example
+
+```bash
+npx wrangler r2 bucket create my-bucket
+npx wrangler r2 bucket catalog enable my-bucket
+npx wrangler pipelines streams create my-stream --schema-file schema.json
+npx wrangler pipelines sinks create my-sink --type r2-data-catalog --bucket my-bucket ...
+npx wrangler pipelines create my-pipeline --sql "INSERT INTO my_sink SELECT * FROM my_stream"
+npx wrangler deploy
+```
diff --git a/.agents/skills/cloudflare-deploy/references/pipelines/gotchas.md b/.agents/skills/cloudflare-deploy/references/pipelines/gotchas.md
new file mode 100644
index 0000000..2a2a75f
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pipelines/gotchas.md
@@ -0,0 +1,80 @@
+# Pipelines Gotchas
+
+## Critical Issues
+
+### Events Silently Dropped
+
+**Most common issue.** Events accepted (HTTP 200) but never appear in sink.
+
+**Causes:**
+1. Schema validation fails - structured streams drop invalid events silently
+2. Waiting for roll interval (10-300s) - expected behavior
+
+**Solution:** Validate client-side with Zod:
+```typescript
+const EventSchema = z.object({ user_id: z.string(), amount: z.number() });
+try {
+ const validated = EventSchema.parse(rawEvent);
+ await env.STREAM.send([validated]);
+} catch (e) { /* get immediate feedback */ }
+```
+
+### Pipelines Are Immutable
+
+Cannot modify SQL after creation. Must delete and recreate.
+
+```bash
+npx wrangler pipelines delete old-pipeline
+npx wrangler pipelines create new-pipeline --sql "..."
+```
+
+**Tip:** Use version naming (`events-pipeline-v1`) and keep SQL in version control.
+
+### Worker Binding Not Found
+
+**`env.STREAM is undefined`**
+
+1. Use **stream ID** (not pipeline ID) in `wrangler.jsonc`
+2. Redeploy after adding binding
+
+```bash
+npx wrangler pipelines streams list # Get stream ID
+npx wrangler deploy
+```
+
+## Common Errors
+
+| Error | Cause | Fix |
+|-------|-------|-----|
+| Events not in R2 | Roll interval not elapsed | Wait 10-300s, check `roll_interval` |
+| Schema validation failures | Type mismatch, missing fields | Validate client-side |
+| Rate limit (429) | >5 MB/s per stream | Batch events, request increase |
+| Payload too large (413) | >1 MB request | Split into smaller batches |
+| Cannot delete stream | Pipeline references it | Delete pipelines first |
+| Sink credential errors | Token expired | Recreate sink with new credentials |
+
+## Limits (Open Beta)
+
+| Resource | Limit |
+|----------|-------|
+| Streams/Sinks/Pipelines per account | 20 each |
+| Payload size | 1 MB |
+| Ingest rate per stream | 5 MB/s |
+| Event retention | 24 hours |
+| Recommended batch size | 100 events |
+
+## SQL Limitations
+
+- **No JOINs** - single stream per pipeline
+- **No window functions** - basic SQL only
+- **No subqueries** - must use `INSERT INTO ... SELECT ... FROM`
+- **No schema evolution** - cannot modify after creation
+
+## Debug Checklist
+
+- [ ] Stream exists: `npx wrangler pipelines streams list`
+- [ ] Pipeline healthy: `npx wrangler pipelines get `
+- [ ] SQL syntax matches schema
+- [ ] Worker redeployed after binding added
+- [ ] Waited for roll interval
+- [ ] Accepted vs processed count matches (no validation drops)
diff --git a/.agents/skills/cloudflare-deploy/references/pipelines/patterns.md b/.agents/skills/cloudflare-deploy/references/pipelines/patterns.md
new file mode 100644
index 0000000..186b6a2
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pipelines/patterns.md
@@ -0,0 +1,87 @@
+# Pipelines Patterns
+
+## Fire-and-Forget
+
+```typescript
+export default {
+ async fetch(request, env, ctx) {
+ const event = { user_id: '...', event_type: 'page_view', timestamp: new Date().toISOString() };
+ ctx.waitUntil(env.STREAM.send([event])); // Don't block response
+ return new Response('OK');
+ }
+};
+```
+
+## Schema Validation with Zod
+
+```typescript
+import { z } from 'zod';
+
+const EventSchema = z.object({
+ user_id: z.string(),
+ event_type: z.enum(['purchase', 'view']),
+ amount: z.number().positive().optional()
+});
+
+const validated = EventSchema.parse(rawEvent); // Throws on invalid
+await env.STREAM.send([validated]);
+```
+
+**Why:** Structured streams drop invalid events silently. Client validation gives immediate feedback.
+
+## SQL Transform Patterns
+
+```sql
+-- Filter early (reduce storage)
+INSERT INTO my_sink
+SELECT user_id, event_type, amount
+FROM my_stream
+WHERE event_type = 'purchase' AND amount > 10
+
+-- Select only needed fields
+INSERT INTO my_sink
+SELECT user_id, event_type, timestamp FROM my_stream
+
+-- Enrich with CASE
+INSERT INTO my_sink
+SELECT user_id, amount,
+ CASE WHEN amount > 1000 THEN 'vip' ELSE 'standard' END as tier
+FROM my_stream
+```
+
+## Pipelines + Queues Fan-out
+
+```typescript
+await Promise.all([
+ env.ANALYTICS_STREAM.send([event]), // Long-term storage
+ env.PROCESS_QUEUE.send(event) // Immediate processing
+]);
+```
+
+| Need | Use |
+|------|-----|
+| Long-term storage, SQL queries | Pipelines |
+| Immediate processing, retries | Queues |
+| Both | Fan-out pattern |
+
+## Performance Tuning
+
+| Goal | Config |
+|------|--------|
+| Low latency | `--roll-interval 10` |
+| Query performance | `--roll-interval 300 --roll-size 100` |
+| Cost optimal | `--compression zstd --roll-interval 300` |
+
+## Schema Evolution
+
+Pipelines are immutable. Use versioning:
+
+```bash
+# Create v2 stream/sink/pipeline
+npx wrangler pipelines streams create events-v2 --schema-file v2.json
+
+# Dual-write during transition
+await Promise.all([env.EVENTS_V1.send([event]), env.EVENTS_V2.send([event])]);
+
+# Query across versions with UNION ALL
+```
diff --git a/.agents/skills/cloudflare-deploy/references/pulumi/README.md b/.agents/skills/cloudflare-deploy/references/pulumi/README.md
new file mode 100644
index 0000000..e78d807
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pulumi/README.md
@@ -0,0 +1,100 @@
+# Cloudflare Pulumi Provider
+
+Expert guidance for Cloudflare Pulumi Provider (@pulumi/cloudflare).
+
+## Overview
+
+Programmatic management of Cloudflare resources: Workers, Pages, D1, KV, R2, DNS, Queues, etc.
+
+**Packages:**
+- TypeScript/JS: `@pulumi/cloudflare`
+- Python: `pulumi-cloudflare`
+- Go: `github.com/pulumi/pulumi-cloudflare/sdk/v6/go/cloudflare`
+- .NET: `Pulumi.Cloudflare`
+
+**Version:** v6.x
+
+## Core Principles
+
+1. Use API tokens (not legacy API keys)
+2. Store accountId in stack config
+3. Match binding names across code/config
+4. Use `module: true` for ES modules
+5. Set `compatibilityDate` to lock behavior
+
+## Authentication
+
+```typescript
+import * as cloudflare from "@pulumi/cloudflare";
+
+// API Token (recommended): CLOUDFLARE_API_TOKEN env
+const provider = new cloudflare.Provider("cf", { apiToken: process.env.CLOUDFLARE_API_TOKEN });
+
+// API Key (legacy): CLOUDFLARE_API_KEY + CLOUDFLARE_EMAIL env
+const provider = new cloudflare.Provider("cf", { apiKey: process.env.CLOUDFLARE_API_KEY, email: process.env.CLOUDFLARE_EMAIL });
+
+// API User Service Key: CLOUDFLARE_API_USER_SERVICE_KEY env
+const provider = new cloudflare.Provider("cf", { apiUserServiceKey: process.env.CLOUDFLARE_API_USER_SERVICE_KEY });
+```
+
+## Setup
+
+**Pulumi.yaml:**
+```yaml
+name: my-cloudflare-app
+runtime: nodejs
+config:
+ cloudflare:apiToken:
+ value: ${CLOUDFLARE_API_TOKEN}
+```
+
+**Pulumi..yaml:**
+```yaml
+config:
+ cloudflare:accountId: "abc123..."
+```
+
+**index.ts:**
+```typescript
+import * as pulumi from "@pulumi/pulumi";
+import * as cloudflare from "@pulumi/cloudflare";
+const accountId = new pulumi.Config("cloudflare").require("accountId");
+```
+
+## Common Resource Types
+- `Provider` - Provider config
+- `WorkerScript` - Worker
+- `WorkersKvNamespace` - KV
+- `R2Bucket` - R2
+- `D1Database` - D1
+- `Queue` - Queue
+- `PagesProject` - Pages
+- `DnsRecord` - DNS
+- `WorkerRoute` - Worker route
+- `WorkersDomain` - Custom domain
+
+## Key Properties
+- `accountId` - Required for most resources
+- `zoneId` - Required for DNS/domain
+- `name`/`title` - Resource identifier
+- `*Bindings` - Connect resources to Workers
+
+## Reading Order
+
+| Order | File | What | When to Read |
+|-------|------|------|--------------|
+| 1 | [configuration.md](./configuration.md) | Resource config for Workers/KV/D1/R2/Queues/Pages | First time setup, resource reference |
+| 2 | [patterns.md](./patterns.md) | Architecture patterns, multi-env, component resources | Building complex apps, best practices |
+| 3 | [api.md](./api.md) | Outputs, dependencies, imports, dynamic providers | Advanced features, integrations |
+| 4 | [gotchas.md](./gotchas.md) | Common errors, troubleshooting, limits | Debugging, deployment issues |
+
+## In This Reference
+- [configuration.md](./configuration.md) - Provider config, stack setup, Workers/bindings
+- [api.md](./api.md) - Resource types, Workers script, KV/D1/R2/queues/Pages
+- [patterns.md](./patterns.md) - Multi-env, secrets, CI/CD, stack management
+- [gotchas.md](./gotchas.md) - State issues, deployment failures, limits
+
+## See Also
+- [terraform](../terraform/) - Alternative IaC for Cloudflare
+- [wrangler](../wrangler/) - CLI deployment alternative
+- [workers](../workers/) - Worker runtime documentation
diff --git a/.agents/skills/cloudflare-deploy/references/pulumi/api.md b/.agents/skills/cloudflare-deploy/references/pulumi/api.md
new file mode 100644
index 0000000..332cfef
--- /dev/null
+++ b/.agents/skills/cloudflare-deploy/references/pulumi/api.md
@@ -0,0 +1,200 @@
+# API & Data Sources
+
+## Outputs and Exports
+
+Export resource identifiers:
+
+```typescript
+export const kvId = kv.id;
+export const bucketName = bucket.name;
+export const workerUrl = worker.subdomain;
+export const dbId = db.id;
+```
+
+## Resource Dependencies
+
+Implicit dependencies via outputs:
+
+```typescript
+const kv = new cloudflare.WorkersKvNamespace("kv", {
+ accountId: accountId,
+ title: "my-kv",
+});
+
+// Worker depends on KV (implicit via kv.id)
+const worker = new cloudflare.WorkerScript("worker", {
+ accountId: accountId,
+ name: "my-worker",
+ content: code,
+ kvNamespaceBindings: [{name: "MY_KV", namespaceId: kv.id}], // Creates dependency
+});
+```
+
+Explicit dependencies:
+
+```typescript
+const migration = new command.local.Command("migration", {
+ create: pulumi.interpolate`wrangler d1 execute ${db.name} --file ./schema.sql`,
+}, {dependsOn: [db]});
+
+const worker = new cloudflare.WorkerScript("worker", {
+ accountId: accountId,
+ name: "worker",
+ content: code,
+ d1DatabaseBindings: [{name: "DB", databaseId: db.id}],
+}, {dependsOn: [migration]}); // Ensure migrations run first
+```
+
+## Using Outputs with API Calls
+
+```typescript
+const db = new cloudflare.D1Database("db", {accountId, name: "my-db"});
+
+db.id.apply(async (dbId) => {
+ const response = await fetch(
+ `https://api.cloudflare.com/client/v4/accounts/${accountId}/d1/database/${dbId}/query`,
+ {method: "POST", headers: {"Authorization": `Bearer ${apiToken}`, "Content-Type": "application/json"},
+ body: JSON.stringify({sql: "CREATE TABLE users (id INT)"})}
+ );
+ return response.json();
+});
+```
+
+## Custom Dynamic Providers
+
+For resources not in provider:
+
+```typescript
+import * as pulumi from "@pulumi/pulumi";
+
+class D1MigrationProvider implements pulumi.dynamic.ResourceProvider {
+ async create(inputs: any): Promise {
+ const response = await fetch(
+ `https://api.cloudflare.com/client/v4/accounts/${inputs.accountId}/d1/database/${inputs.databaseId}/query`,
+ {method: "POST", headers: {"Authorization": `Bearer ${inputs.apiToken}`, "Content-Type": "application/json"},
+ body: JSON.stringify({sql: inputs.sql})}
+ );
+ return {id: `${inputs.databaseId}-${Date.now()}`, outs: await response.json()};
+ }
+ async update(id: string, olds: any, news: any): Promise {
+ if (olds.sql !== news.sql) await this.create(news);
+ return {};
+ }
+ async delete(id: string, props: any): Promise {}
+}
+
+class D1Migration extends pulumi.dynamic.Resource {
+ constructor(name: string, args: any, opts?: pulumi.CustomResourceOptions) {
+ super(new D1MigrationProvider(), name, args, opts);
+ }
+}
+
+const migration = new D1Migration("migration", {
+ accountId, databaseId: db.id, apiToken, sql: "CREATE TABLE users (id INT)",
+}, {dependsOn: [db]});
+```
+
+## Data Sources
+
+**Get Zone:**
+```typescript
+const zone = cloudflare.getZone({name: "example.com"});
+const zoneId = zone.then(z => z.id);
+```
+
+**Get Accounts (via API):**
+Use Cloudflare API directly or custom dynamic resources.
+
+## Import Existing Resources
+
+```bash
+# Import worker
+pulumi import cloudflare:index/workerScript:WorkerScript my-worker /
+
+# Import KV namespace
+pulumi import cloudflare:index/workersKvNamespace:WorkersKvNamespace my-kv
+
+# Import R2 bucket
+pulumi import cloudflare:index/r2Bucket:R2Bucket my-bucket /
+
+# Import D1 database
+pulumi import cloudflare:index/d1Database:D1Database my-db /
+
+# Import DNS record
+pulumi import cloudflare:index/dnsRecord:DnsRecord my-record