diff --git a/.github/workflows/docker-build-test.yml b/.github/workflows/docker-build-test.yml
new file mode 100644
index 000000000..d2c118ffb
--- /dev/null
+++ b/.github/workflows/docker-build-test.yml
@@ -0,0 +1,59 @@
+name: Build and Push Docker Image (Branch)
+
+on:
+ push:
+ branches-ignore:
+ - main
+
+jobs:
+ build-and-push:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Clean up before build
+ run: |
+ # Clean up unused packages to avoid disk space issues
+ sudo rm -rf /usr/local/.ghcup
+ sudo rm -rf /opt/hostedtoolcache/CodeQL
+ sudo rm -rf /usr/local/lib/android/sdk/ndk
+ sudo rm -rf /usr/share/dotnet
+ sudo rm -rf /opt/ghc
+ sudo rm -rf /usr/local/share/boost
+
+ # Show disk usage before build
+ echo "Disk usage before build:"
+ df -h
+
+ - name: Log in to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: daydreamlive
+ password: ${{ secrets.DOCKER_HUB_TOKEN }}
+
+ - name: Generate Docker tags
+ id: tag
+ run: |
+ # Replace / with - for valid Docker tag (e.g., feature/foo -> feature-foo)
+ BRANCH_TAG=$(echo "${{ github.ref_name }}" | sed 's/\//-/g')
+ # Short SHA (first 7 characters)
+ SHORT_SHA=$(echo "${{ github.sha }}" | cut -c1-7)
+ echo "branch_tag=$BRANCH_TAG" >> $GITHUB_OUTPUT
+ echo "sha_tag=$SHORT_SHA" >> $GITHUB_OUTPUT
+
+ - name: Build and push Docker image
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ platforms: linux/amd64
+ push: true
+ tags: |
+ daydreamlive/scope:${{ steps.tag.outputs.branch_tag }}
+ daydreamlive/scope:${{ steps.tag.outputs.sha_tag }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
diff --git a/FAL_DEPLOYMENT.md b/FAL_DEPLOYMENT.md
new file mode 100644
index 000000000..c14d1fb1a
--- /dev/null
+++ b/FAL_DEPLOYMENT.md
@@ -0,0 +1,188 @@
+# Deploying Scope to fal.ai
+
+This guide explains how to deploy the Scope backend to fal.ai serverless.
+
+## Architecture
+
+```
+┌──────────────────────────────────────────────────────────────────┐
+│ fal.ai Runner │
+│ ┌─────────────────────────────────────────────────────────────┐ │
+│ │ fal_app.py │ │
+│ │ ┌─────────────────┐ ┌─────────────────────────────┐│ │
+│ │ │ WebSocket │ ──────► │ Scope Backend ││ │
+│ │ │ Endpoint │ HTTP │ (uv run daydream-scope) ││ │
+│ │ │ /ws │ Proxy │ localhost:8000 ││ │
+│ │ └────────┬────────┘ └──────────────┬──────────────┘│ │
+│ │ │ │ │ │
+│ └───────────┼─────────────────────────────────┼────────────────┘ │
+└──────────────┼─────────────────────────────────┼─────────────────┘
+ │ │
+ WebSocket │ WebRTC │
+ (signaling │ (video) │
+ + API) │ │
+ ▼ ▼
+┌──────────────────────────────────────────────────────────────────┐
+│ Browser │
+│ ┌─────────────────────────────────────────────────────────────┐ │
+│ │ Frontend with FalAdapter │ │
+│ │ - API calls go through WebSocket │ │
+│ │ - WebRTC signaling goes through WebSocket │ │
+│ │ - Video frames flow directly via WebRTC │ │
+│ └─────────────────────────────────────────────────────────────┘ │
+└──────────────────────────────────────────────────────────────────┘
+```
+
+## How It Works
+
+1. **Single WebSocket Connection**: All communication (API calls + WebRTC signaling) goes through one WebSocket connection to prevent fal from spawning new runner instances.
+
+2. **Scope Runs as Subprocess**: The Scope backend runs inside the fal container via `uv run daydream-scope --no-browser`.
+
+3. **WebRTC Video Flows Directly**: Once signaling is complete, video frames flow directly via WebRTC (UDP/RTP) between browser and fal runner.
+
+## Deployment
+
+### 1. Deploy to fal.ai
+
+```bash
+cd scope
+fal deploy fal_app.py
+```
+
+This will output a URL like: `https://fal.run/your-username/scope-app`
+
+### 2. Update Frontend to Use FalAdapter
+
+In your frontend initialization (e.g., `main.tsx` or `App.tsx`):
+
+```typescript
+import { initFalAdapter } from "./lib/falAdapter";
+
+// Initialize when running on fal
+const FAL_WS_URL = "wss://fal.run/your-username/scope-app/ws";
+
+async function initApp() {
+ // Check if we should use fal mode
+ const useFal = import.meta.env.VITE_USE_FAL === "true";
+
+ if (useFal) {
+ const adapter = initFalAdapter(FAL_WS_URL);
+ await adapter.connect();
+ console.log("Connected to fal.ai backend");
+ }
+}
+
+initApp();
+```
+
+### 3. Use the FalAdapter in Components
+
+For API calls, use the adapter's API methods:
+
+```typescript
+import { getFalAdapter, isFalMode } from "./lib/falAdapter";
+import { getPipelineStatus } from "./lib/api";
+
+async function fetchStatus() {
+ if (isFalMode()) {
+ const adapter = getFalAdapter()!;
+ return adapter.api.getPipelineStatus();
+ } else {
+ return getPipelineStatus();
+ }
+}
+```
+
+For WebRTC, use the `useWebRTCFal` hook:
+
+```typescript
+import { useWebRTC } from "./hooks/useWebRTC";
+import { useWebRTCFal } from "./hooks/useWebRTCFal";
+import { getFalAdapter, isFalMode } from "./lib/falAdapter";
+
+function VideoStream() {
+ // Choose the right hook based on deployment mode
+ const adapter = getFalAdapter();
+
+ const webrtc = isFalMode() && adapter
+ ? useWebRTCFal({ adapter })
+ : useWebRTC();
+
+ // Use webrtc.startStream, webrtc.stopStream, etc.
+}
+```
+
+## WebSocket Protocol
+
+All messages are JSON with a `type` field.
+
+### WebRTC Signaling
+
+```typescript
+// Get ICE servers
+{ "type": "get_ice_servers" }
+// Response: { "type": "ice_servers", "data": { "iceServers": [...] } }
+
+// Send SDP offer
+{ "type": "offer", "sdp": "...", "sdp_type": "offer", "initialParameters": {...} }
+// Response: { "type": "answer", "sdp": "...", "sdp_type": "answer", "sessionId": "..." }
+
+// Send ICE candidate
+{ "type": "icecandidate", "sessionId": "...", "candidate": { "candidate": "...", "sdpMid": "...", "sdpMLineIndex": 0 } }
+// Response: { "type": "icecandidate_ack", "status": "ok" }
+```
+
+### API Proxy
+
+```typescript
+// Make API request
+{
+ "type": "api",
+ "method": "GET", // or POST, PATCH, DELETE
+ "path": "/api/v1/pipeline/status",
+ "body": null, // for POST/PATCH
+ "request_id": "req_123" // for correlating responses
+}
+
+// Response
+{
+ "type": "api_response",
+ "request_id": "req_123",
+ "status": 200,
+ "data": { ... }
+}
+```
+
+### Keepalive
+
+```typescript
+{ "type": "ping" }
+// Response: { "type": "pong" }
+```
+
+## Environment Variables
+
+The fal container inherits environment variables. Set these in your fal deployment:
+
+- `HF_TOKEN` - Hugging Face token for TURN server access
+- `PIPELINE` - Default pipeline to pre-warm (optional)
+
+## Limitations
+
+1. **File Downloads**: Binary file downloads (recordings, logs) need special handling. The adapter provides URLs that the browser can fetch directly.
+
+2. **File Uploads**: Files are base64-encoded when sent through WebSocket, which increases size by ~33%.
+
+3. **Connection Persistence**: The WebSocket connection must stay open to keep the runner alive. If it disconnects, you may get a new runner.
+
+## Troubleshooting
+
+### "WebSocket not connected"
+Make sure `adapter.connect()` completes before making API calls.
+
+### WebRTC connection fails
+Check that TURN servers are configured. The fal runner needs a public IP or TURN relay for WebRTC to work.
+
+### New runner spawned for each request
+Make sure ALL API calls go through the FalAdapter WebSocket, not direct HTTP fetch.
diff --git a/docs/fal-server-integration-plan.md b/docs/fal-server-integration-plan.md
new file mode 100644
index 000000000..68055a745
--- /dev/null
+++ b/docs/fal-server-integration-plan.md
@@ -0,0 +1,2190 @@
+# Plan: Move fal.ai Integration from Frontend to Server
+
+## Overview
+
+Move the fal serverless SDK integration from the frontend to the scope server, enabling:
+1. Local input sources (webcam via WebRTC, Spout) to be sent to fal cloud for GPU inference
+2. Inference results returned to scope server
+3. Output sent via Spout to other applications
+
+## Reference Implementation Analysis
+
+**Source:** [fal-ai-community/fal-demos/yolo_webcam_webrtc](https://github.com/fal-ai-community/fal-demos/tree/main/fal_demos/video/yolo_webcam_webrtc)
+
+### Key Patterns from Reference
+
+1. **fal.ai acts as WebRTC server** - accepts offers, sends answers
+2. **Client creates offers** - browser (or Scope server) initiates WebRTC connection
+3. **Simple signaling protocol:**
+ - Server sends `{"type": "ready"}` when WebSocket connects
+ - Client sends `{"type": "offer", "sdp": "..."}`
+ - Server responds `{"type": "answer", "sdp": "..."}`
+ - Both exchange `{"type": "icecandidate", "candidate": {...}}`
+
+4. **Token authentication:**
+ ```
+ POST https://rest.alpha.fal.ai/tokens/
+ Authorization: Key {api_key}
+ Body: {"allowed_apps": [alias], "token_expiration": 120}
+ ```
+ WebSocket URL: `wss://fal.run/{appId}?fal_jwt_token={token}`
+
+5. **Track processing pattern (YOLOTrack):**
+ ```python
+ @pc.on("track")
+ def on_track(track):
+ if track.kind == "video":
+ pc.addTrack(create_processing_track(track, model))
+ ```
+ The server wraps incoming track with a processing track that transforms each frame.
+
+### Differences from Our Use Case
+
+| Aspect | Reference (YOLO) | Our Use Case |
+|--------|------------------|--------------|
+| **Who is client?** | Browser | Scope Server |
+| **Who is server?** | fal.ai | fal.ai (same) |
+| **Input source** | Browser webcam | Spout/WebRTC from Scope |
+| **Processing** | YOLO detection | Video diffusion pipeline |
+| **Output destination** | Browser video element | Spout sender |
+
+**Key insight:** Since fal.ai always acts as WebRTC server, Scope server must act as WebRTC client (create offers, receive answers). This is the *opposite* of how Scope server handles browser WebRTC connections.
+
+---
+
+## Current Architecture (Frontend-based fal)
+
+```
+Browser ──WebRTC──► fal.ai ──proxy──► Scope Backend ──► GPU ──► Scope Backend ──► fal.ai ──WebRTC──► Browser
+ (WebSocket)
+```
+
+**Current Implementation:**
+- `fal_app.py`: fal serverless app that spawns Scope backend as subprocess
+- `falAdapter.ts`: WebSocket client for API proxying + WebRTC signaling
+- `falContext.tsx`: React context provider for fal mode
+- `useUnifiedWebRTC.ts`: Mode-agnostic WebRTC hook
+
+## Proposed Architecture (Server-based fal)
+
+### What Runs Where
+
+| Component | Local Machine | fal.ai Cloud |
+|-----------|---------------|--------------|
+| **Scope Server** | ✅ Runs in **CLOUD MODE** (FalClient enabled) | ✅ Runs in **LOCAL MODE** (normal operation) |
+| **WebRTC Role** | **Client** (creates offers via FalClient) | **Server** (accepts offers, same as browser) |
+| **Pipelines** | ❌ Disabled (frames sent to fal) | ✅ Enabled for GPU inference |
+| **Video Input** | Spout receiver, WebRTC from browser | WebRTC from local Scope's FalClient |
+| **Video Output** | Spout sender, WebRTC to browser | WebRTC to local Scope's FalClient |
+| **Parameter Source** | Browser UI → forwarded to fal | Received via WebRTC data channel |
+
+**Key terminology:**
+- **CLOUD MODE**: `fal_enabled=True` - frames routed to fal via FalClient, local pipelines disabled
+- **LOCAL MODE**: `fal_enabled=False` - normal operation, frames processed by local pipelines
+
+### Key Insight: No Changes Needed on fal Side
+
+The existing `fal_app.py` already:
+1. Starts the **full Scope server** as a subprocess
+2. Acts as a WebSocket proxy for WebRTC signaling
+3. Scope server on fal is already a **WebRTC server** (accepts offers, sends answers)
+
+The only change is **who connects as the WebRTC client** - instead of the browser, it's the local Scope server.
+
+### Detailed Architecture Diagram
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ LOCAL MACHINE │
+│ │
+│ ┌─────────────┐ ┌──────────────────────────────────────────────────────┐ │
+│ │ Browser │ │ Scope Server (CLOUD MODE: fal_enabled=True) │ │
+│ │ │ │ │ │
+│ │ - UI │ │ ┌─────────────────┐ ┌─────────────────────┐ │ │
+│ │ - Preview │◄────┼──│ WebRTC Server │ │ FalClient │ │ │
+│ │ - Params │────►│ │ (to browser) │ │ (WebRTC CLIENT) │────┼──┼─────┐
+│ │ │ │ └─────────────────┘ │ │ │ │ │
+│ └─────────────┘ │ │ │ - WebSocket conn │ │ │ │
+│ │ │ │ - Creates offers │ │ │ │
+│ ┌─────────────┐ │ ▼ │ - Sends frames │ │ │ │
+│ │ Spout Input │────►│ ┌─────────────────┐ │ - Receives frames │ │ │ │
+│ │ (e.g., OBS) │ │ │ FrameProcessor │◄────►│ - Forwards params │ │ │ │
+│ └─────────────┘ │ │ │ └─────────────────────┘ │ │ │
+│ │ │ fal_enabled=True│ │ │ │
+│ ┌─────────────┐ │ │ - Routes frames │ [Local pipelines DISABLED] │ │ │
+│ │ Spout Output│◄────┼──│ to FalClient │ │ │ │
+│ │ (e.g., VJ) │ │ │ - Routes params │ │ │ │
+│ └─────────────┘ │ │ to FalClient │ │ │ │
+│ │ └─────────────────┘ │ │ │
+│ └──────────────────────────────────────────────────────┘ │ │
+└─────────────────────────────────────────────────────────────────────────────────┘ │
+ │
+ WebRTC Video Stream (bidirectional) │
+ + WebRTC Data Channel (parameters) │
+ + WebSocket (signaling only) │
+ │
+┌───────────────────────────────────────────────────────────────────────────────────────┼─┐
+│ FAL.AI CLOUD (H100 GPU) │ │
+│ │ │
+│ ┌─────────────────────────────────────────────────────────────────────────────────┐ │ │
+│ │ fal_app.py (ScopeApp) │ │ │
+│ │ │ │ │
+│ │ WebSocket Endpoint (/ws) ◄───────────────────────────────────────────────────┼─┘ │
+│ │ │ │ │
+│ │ │ Proxies signaling to subprocess │ │
+│ │ ▼ │ │
+│ │ ┌─────────────────────────────────────────────────────────────────────────┐ │ │
+│ │ │ Scope Server (LOCAL MODE: fal_enabled=False, subprocess) │ │ │
+│ │ │ Command: uv run daydream-scope │ │ │
+│ │ │ │ │ │
+│ │ │ ┌───────────────────┐ ┌────────────────┐ ┌─────────────────┐ │ │ │
+│ │ │ │ WebRTC Server │───►│ FrameProcessor │───►│ Pipeline │ │ │ │
+│ │ │ │ (accepts offers │ │ │ │ (GPU inference) │ │ │ │
+│ │ │ │ from FalClient) │◄───│ fal_enabled= │◄───│ │ │ │ │
+│ │ │ │ │ │ False │ │ - LongLive │ │ │ │
+│ │ │ │ - Receives frames │ │ │ │ - VACE │ │ │ │
+│ │ │ │ - Sends frames │ │ Routes to │ │ - etc. │ │ │ │
+│ │ │ │ - Receives params │ │ local pipeline │ │ │ │ │ │
+│ │ │ └───────────────────┘ └────────────────┘ └─────────────────┘ │ │ │
+│ │ │ │ │ │
+│ │ │ [Local pipelines ENABLED - this is where GPU inference happens] │ │ │
+│ │ └─────────────────────────────────────────────────────────────────────────┘ │ │
+│ │ │ │
+│ └─────────────────────────────────────────────────────────────────────────────────┘ │
+│ │
+└─────────────────────────────────────────────────────────────────────────────────────────┘
+```
+
+### Data Flow
+
+#### Video Frames
+```
+Spout Input ──► Local FrameProcessor ──► FalClient ══WebRTC══► fal Scope Server ──► Pipeline/GPU
+ │
+Spout Output ◄── Local FrameProcessor ◄── FalClient ◄══WebRTC══════════┘ (processed frames)
+```
+
+#### Parameters (prompts, noise_scale, etc.)
+```
+Browser UI ──► Local Scope Server ──► FalClient Data Channel ══WebRTC══► fal Scope Server ──► Pipeline
+ (WebRTC data channel) (forwards params) (applies to pipeline)
+```
+
+### Parameter Forwarding Detail
+
+Parameters flow through **two WebRTC data channels**:
+
+1. **Browser → Local Scope Server** (existing)
+ - Browser sends params via WebRTC data channel (same as local mode)
+ - Local FrameProcessor receives params
+
+2. **Local Scope Server → fal Cloud** (new)
+ - When `fal_enabled=True`, FrameProcessor forwards params to FalClient
+ - FalClient sends params via its own WebRTC data channel to fal
+ - fal Scope Server applies params to pipeline
+
+```python
+# In FrameProcessor.update_parameters():
+def update_parameters(self, params: dict):
+ # Handle local-only params (Spout config)
+ if "spout_sender" in params:
+ self._update_spout_sender(params.pop("spout_sender"))
+
+ # Route remaining params based on mode
+ if self.fal_enabled and self.fal_client:
+ # Forward to fal cloud
+ self.fal_client.send_parameters(params)
+ else:
+ # Apply locally
+ for processor in self.pipeline_processors:
+ processor.update_parameters(params)
+```
+
+### Summary
+
+| Aspect | Before (Browser → fal) | After (Local Scope → fal) |
+|--------|------------------------|---------------------------|
+| **WebRTC Client** | Browser | Local Scope Server (FalClient) |
+| **WebRTC Server** | fal Scope Server | fal Scope Server (unchanged) |
+| **Video Source** | Browser webcam | Spout / local WebRTC |
+| **Video Destination** | Browser video element | Spout / local WebRTC |
+| **Parameter Source** | Browser UI | Browser UI → forwarded via FalClient |
+| **fal_app.py Changes** | N/A | **None required** |
+
+---
+
+## Implementation Plan
+
+### Phase 1: Create Server-Side fal Client Module
+
+**New file: `src/scope/server/fal_client.py`**
+
+This module handles WebSocket and WebRTC connection to fal.ai from the server. Based on the reference implementation, Scope acts as the WebRTC *client* (creates offers).
+
+```python
+import asyncio
+import json
+import logging
+from typing import Callable
+
+import websockets
+from aiortc import RTCPeerConnection, RTCSessionDescription
+from aiortc.sdp import candidate_from_sdp
+from av import VideoFrame
+
+logger = logging.getLogger(__name__)
+
+TOKEN_EXPIRATION_SECONDS = 120
+
+
+class FalClient:
+ """WebSocket + WebRTC client for connecting to fal.ai cloud.
+
+ Based on fal-demos/yolo_webcam_webrtc reference implementation.
+ Scope acts as WebRTC client (creates offers), fal.ai acts as server.
+ """
+
+ def __init__(
+ self,
+ app_id: str,
+ api_key: str,
+ on_frame_received: Callable[[VideoFrame], None] | None = None,
+ ):
+ self.app_id = app_id # e.g., "owner/app-name/webrtc"
+ self.api_key = api_key
+ self.on_frame_received = on_frame_received
+
+ self.ws: websockets.WebSocketClientProtocol | None = None
+ self.pc: RTCPeerConnection | None = None
+ self.output_track: "FalOutputTrack | None" = None
+ self.stop_event = asyncio.Event()
+ self._receive_task: asyncio.Task | None = None
+
+ async def _get_temporary_token(self) -> str:
+ """Get temporary JWT token from fal API (mirrors frontend pattern)."""
+ import aiohttp
+
+ # Extract alias from app_id (e.g., "owner/app-name/webrtc" -> "app-name")
+ parts = self.app_id.split("/")
+ alias = parts[1] if len(parts) >= 2 else self.app_id
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ "https://rest.alpha.fal.ai/tokens/",
+ headers={
+ "Authorization": f"Key {self.api_key}",
+ "Content-Type": "application/json",
+ },
+ json={
+ "allowed_apps": [alias],
+ "token_expiration": TOKEN_EXPIRATION_SECONDS,
+ },
+ ) as resp:
+ if not resp.ok:
+ error_body = await resp.text()
+ raise RuntimeError(f"Token request failed: {resp.status} {error_body}")
+ token = await resp.json()
+ # Handle both string and object responses
+ if isinstance(token, dict) and "detail" in token:
+ return token["detail"]
+ return token
+
+ def _build_ws_url(self, token: str) -> str:
+ """Build WebSocket URL with JWT token (mirrors frontend pattern)."""
+ app_id = self.app_id.strip("/")
+ return f"wss://fal.run/{app_id}?fal_jwt_token={token}"
+
+ async def connect(self) -> None:
+ """Connect to fal WebSocket and establish WebRTC connection."""
+ # Get temporary token
+ token = await self._get_temporary_token()
+ ws_url = self._build_ws_url(token)
+
+ logger.info(f"Connecting to fal WebSocket: {ws_url[:50]}...")
+ self.ws = await websockets.connect(ws_url)
+
+ # Wait for "ready" message from server
+ ready_msg = await self.ws.recv()
+ ready_data = json.loads(ready_msg)
+ if ready_data.get("type") != "ready":
+ raise RuntimeError(f"Expected 'ready' message, got: {ready_data}")
+ logger.info("fal server ready")
+
+ # Create peer connection
+ self.pc = RTCPeerConnection(
+ configuration={"iceServers": [{"urls": "stun:stun.l.google.com:19302"}]}
+ )
+
+ # Set up event handlers
+ self._setup_pc_handlers()
+
+ # Add output track (for sending frames to fal)
+ from scope.server.fal_tracks import FalOutputTrack
+ self.output_track = FalOutputTrack()
+ self.pc.addTrack(self.output_track)
+
+ # Create and send offer (we are the client)
+ offer = await self.pc.createOffer()
+ await self.pc.setLocalDescription(offer)
+ await self.ws.send(json.dumps({
+ "type": "offer",
+ "sdp": self.pc.localDescription.sdp,
+ }))
+ logger.info("Sent WebRTC offer")
+
+ # Start message receive loop
+ self._receive_task = asyncio.create_task(self._receive_loop())
+
+ def _setup_pc_handlers(self):
+ """Set up RTCPeerConnection event handlers."""
+
+ @self.pc.on("icecandidate")
+ async def on_icecandidate(candidate):
+ if self.ws is None:
+ return
+ if candidate is None:
+ await self.ws.send(json.dumps({
+ "type": "icecandidate",
+ "candidate": None,
+ }))
+ else:
+ await self.ws.send(json.dumps({
+ "type": "icecandidate",
+ "candidate": {
+ "candidate": candidate.candidate,
+ "sdpMid": candidate.sdpMid,
+ "sdpMLineIndex": candidate.sdpMLineIndex,
+ },
+ }))
+
+ @self.pc.on("connectionstatechange")
+ async def on_connectionstatechange():
+ logger.info(f"Connection state: {self.pc.connectionState}")
+ if self.pc.connectionState in ("failed", "closed", "disconnected"):
+ self.stop_event.set()
+
+ @self.pc.on("track")
+ def on_track(track):
+ """Handle incoming track (processed frames from fal)."""
+ if track.kind == "video":
+ logger.info("Received video track from fal")
+ asyncio.create_task(self._consume_track(track))
+
+ async def _consume_track(self, track):
+ """Consume frames from the incoming track."""
+ while not self.stop_event.is_set():
+ try:
+ frame = await track.recv()
+ if self.on_frame_received:
+ self.on_frame_received(frame)
+ except Exception as e:
+ logger.error(f"Error receiving frame: {e}")
+ break
+
+ async def _receive_loop(self):
+ """Receive and handle WebSocket messages."""
+ try:
+ while not self.stop_event.is_set():
+ try:
+ message = await asyncio.wait_for(
+ self.ws.recv(),
+ timeout=1.0,
+ )
+ except asyncio.TimeoutError:
+ continue
+
+ try:
+ data = json.loads(message)
+ except json.JSONDecodeError:
+ logger.warning(f"Non-JSON message: {message}")
+ continue
+
+ msg_type = data.get("type")
+
+ if msg_type == "answer":
+ # Set remote description from server's answer
+ answer = RTCSessionDescription(
+ sdp=data["sdp"],
+ type="answer",
+ )
+ await self.pc.setRemoteDescription(answer)
+ logger.info("Set remote description from answer")
+
+ elif msg_type == "icecandidate":
+ candidate_data = data.get("candidate")
+ if candidate_data is None:
+ await self.pc.addIceCandidate(None)
+ else:
+ candidate = candidate_from_sdp(candidate_data.get("candidate", ""))
+ candidate.sdpMid = candidate_data.get("sdpMid")
+ candidate.sdpMLineIndex = candidate_data.get("sdpMLineIndex")
+ await self.pc.addIceCandidate(candidate)
+
+ elif msg_type == "error":
+ logger.error(f"Server error: {data.get('error')}")
+
+ else:
+ logger.debug(f"Unknown message type: {msg_type}")
+
+ except websockets.exceptions.ConnectionClosed:
+ logger.info("WebSocket connection closed")
+ except Exception as e:
+ logger.error(f"Receive loop error: {e}")
+ finally:
+ self.stop_event.set()
+
+ async def send_frame(self, frame: VideoFrame) -> None:
+ """Send a frame to fal for processing."""
+ if self.output_track:
+ await self.output_track.put_frame(frame)
+
+ async def disconnect(self) -> None:
+ """Close WebRTC and WebSocket connections."""
+ self.stop_event.set()
+
+ if self._receive_task:
+ self._receive_task.cancel()
+ try:
+ await self._receive_task
+ except asyncio.CancelledError:
+ pass
+
+ if self.pc:
+ await self.pc.close()
+ self.pc = None
+
+ if self.ws:
+ await self.ws.close()
+ self.ws = None
+
+ logger.info("Disconnected from fal")
+```
+
+### Phase 2: Create fal Video Track for Sending Frames
+
+**New file: `src/scope/server/fal_tracks.py`**
+
+Custom aiortc MediaStreamTrack for sending frames to fal. This follows the same pattern as `YOLOTrack` in the reference, but for outbound frames.
+
+```python
+import asyncio
+import fractions
+import time
+
+from aiortc.mediastreams import MediaStreamTrack
+from av import VideoFrame
+
+
+class FalOutputTrack(MediaStreamTrack):
+ """Sends frames from queue to fal via WebRTC.
+
+ This is the outbound track - frames are put into the queue
+ and sent to fal.ai for processing.
+ """
+
+ kind = "video"
+
+ def __init__(self, target_fps: int = 30):
+ super().__init__()
+ self.frame_queue: asyncio.Queue[VideoFrame] = asyncio.Queue(maxsize=30)
+ self.target_fps = target_fps
+ self._start_time = time.time()
+ self._frame_count = 0
+
+ async def recv(self) -> VideoFrame:
+ """Called by aiortc to get next frame to send.
+
+ This method is called by the WebRTC stack when it needs
+ the next frame to encode and send.
+ """
+ frame = await self.frame_queue.get()
+
+ # Set pts (presentation timestamp) and time_base
+ self._frame_count += 1
+ frame.pts = self._frame_count
+ frame.time_base = fractions.Fraction(1, self.target_fps)
+
+ return frame
+
+ async def put_frame(self, frame: VideoFrame) -> bool:
+ """Add frame to be sent to fal.
+
+ Returns True if frame was queued, False if queue was full (frame dropped).
+ """
+ try:
+ self.frame_queue.put_nowait(frame)
+ return True
+ except asyncio.QueueFull:
+ # Drop oldest frame and add new one
+ try:
+ self.frame_queue.get_nowait()
+ self.frame_queue.put_nowait(frame)
+ return True
+ except asyncio.QueueEmpty:
+ return False
+
+ def put_frame_sync(self, frame: VideoFrame) -> bool:
+ """Synchronous version for use from non-async contexts."""
+ return self.put_frame_nowait(frame)
+
+ def put_frame_nowait(self, frame: VideoFrame) -> bool:
+ """Non-blocking frame put."""
+ try:
+ self.frame_queue.put_nowait(frame)
+ return True
+ except asyncio.QueueFull:
+ return False
+
+
+class FalInputTrack(MediaStreamTrack):
+ """Receives processed frames from fal via WebRTC.
+
+ This wraps an incoming track and makes frames available via a queue.
+ Similar pattern to YOLOTrack in reference, but stores frames instead
+ of processing them.
+ """
+
+ kind = "video"
+
+ def __init__(self, source_track: MediaStreamTrack):
+ super().__init__()
+ self.source_track = source_track
+ self.frame_queue: asyncio.Queue[VideoFrame] = asyncio.Queue(maxsize=30)
+ self._consume_task: asyncio.Task | None = None
+
+ def start_consuming(self):
+ """Start consuming frames from source track."""
+ self._consume_task = asyncio.create_task(self._consume_loop())
+
+ async def _consume_loop(self):
+ """Continuously receive frames from source and queue them."""
+ while True:
+ try:
+ frame = await self.source_track.recv()
+ try:
+ self.frame_queue.put_nowait(frame)
+ except asyncio.QueueFull:
+ # Drop oldest frame
+ try:
+ self.frame_queue.get_nowait()
+ self.frame_queue.put_nowait(frame)
+ except asyncio.QueueEmpty:
+ pass
+ except Exception:
+ break
+
+ async def recv(self) -> VideoFrame:
+ """Get next received frame."""
+ return await self.frame_queue.get()
+
+ def get_frame_nowait(self) -> VideoFrame | None:
+ """Non-blocking frame get."""
+ try:
+ return self.frame_queue.get_nowait()
+ except asyncio.QueueEmpty:
+ return None
+
+ async def stop(self):
+ """Stop consuming frames."""
+ if self._consume_task:
+ self._consume_task.cancel()
+ try:
+ await self._consume_task
+ except asyncio.CancelledError:
+ pass
+```
+
+### Phase 3: Integrate fal Client with FrameProcessor
+
+**Modify: `src/scope/server/frame_processor.py`**
+
+Add fal cloud processing mode alongside existing local pipeline processing:
+
+```python
+class FrameProcessor:
+ def __init__(self, ...):
+ # Existing attributes...
+
+ # fal cloud integration
+ self.fal_client: FalClient | None = None
+ self.fal_enabled = False
+ self._fal_received_frames: asyncio.Queue[VideoFrame] = asyncio.Queue(maxsize=30)
+
+ def _on_fal_frame_received(self, frame: VideoFrame):
+ """Callback when frame is received from fal."""
+ try:
+ self._fal_received_frames.put_nowait(frame)
+ except asyncio.QueueFull:
+ # Drop oldest
+ try:
+ self._fal_received_frames.get_nowait()
+ self._fal_received_frames.put_nowait(frame)
+ except asyncio.QueueEmpty:
+ pass
+
+ async def connect_to_fal(self, app_id: str, api_key: str) -> None:
+ """Connect to fal.ai cloud for remote processing."""
+ if self.fal_client:
+ await self.fal_client.disconnect()
+
+ self.fal_client = FalClient(
+ app_id=app_id,
+ api_key=api_key,
+ on_frame_received=self._on_fal_frame_received,
+ )
+ await self.fal_client.connect()
+ self.fal_enabled = True
+
+ async def disconnect_from_fal(self) -> None:
+ """Disconnect from fal.ai cloud."""
+ if self.fal_client:
+ await self.fal_client.disconnect()
+ self.fal_client = None
+ self.fal_enabled = False
+
+ def put(self, frame: VideoFrame) -> bool:
+ """Put frame for processing."""
+ if self.fal_enabled and self.fal_client:
+ # Send to fal cloud via WebRTC
+ return self.fal_client.output_track.put_frame_nowait(frame)
+ else:
+ # Existing local processing
+ ...
+
+ def get(self) -> VideoFrame | None:
+ """Get processed frame."""
+ if self.fal_enabled:
+ # Get from fal cloud
+ try:
+ return self._fal_received_frames.get_nowait()
+ except asyncio.QueueEmpty:
+ return None
+ else:
+ # Existing local processing
+ ...
+```
+
+### Phase 4: Add API Endpoints for fal Configuration
+
+**Modify: `src/scope/server/app.py`**
+
+Add REST endpoints to configure fal mode:
+
+```python
+from pydantic import BaseModel
+
+
+class FalConnectRequest(BaseModel):
+ app_id: str # e.g., "owner/scope-fal/webrtc"
+ api_key: str
+
+
+class FalStatusResponse(BaseModel):
+ connected: bool
+ app_id: str | None = None
+
+
+@app.post("/api/v1/fal/connect")
+async def connect_to_fal(request: FalConnectRequest) -> FalStatusResponse:
+ """Connect to fal.ai cloud for remote GPU inference."""
+ await frame_processor.connect_to_fal(
+ app_id=request.app_id,
+ api_key=request.api_key,
+ )
+ return FalStatusResponse(connected=True, app_id=request.app_id)
+
+
+@app.post("/api/v1/fal/disconnect")
+async def disconnect_from_fal() -> FalStatusResponse:
+ """Disconnect from fal.ai cloud."""
+ await frame_processor.disconnect_from_fal()
+ return FalStatusResponse(connected=False)
+
+
+@app.get("/api/v1/fal/status")
+async def get_fal_status() -> FalStatusResponse:
+ """Get current fal connection status."""
+ if frame_processor.fal_enabled and frame_processor.fal_client:
+ return FalStatusResponse(
+ connected=True,
+ app_id=frame_processor.fal_client.app_id,
+ )
+ return FalStatusResponse(connected=False)
+```
+
+### Phase 5: Fix Spout Receiver to Route Through fal
+
+**Problem:** The current `_spout_receiver_loop` bypasses `put()` and writes directly to the local pipeline processor queue. This means Spout input doesn't route through fal even when fal is enabled.
+
+**What already works:**
+- ✅ Browser WebRTC input → fal (browser frames go through `put()`)
+- ✅ fal output → Browser WebRTC (via `get()`)
+- ✅ fal output → Spout sender (handled in `get()`)
+
+**What needs fixing:**
+- ❌ Spout input → fal: `_spout_receiver_loop` must route through `put()` when fal is enabled
+
+#### Data Flow Diagram (all inputs/outputs)
+
+```
+┌─────────────────────────────────────────────────────────────────────────────┐
+│ LOCAL SCOPE SERVER │
+│ │
+│ INPUTS ROUTING OUTPUTS │
+│ ─────── ─────── ─────── │
+│ │
+│ Browser ──WebRTC──►┐ ┌──►WebRTC──► Browser │
+│ │ │ (preview) │
+│ Spout ───────────►├──► put() ──► [fal or local] │ │
+│ Receiver │ │ ├──► Spout ──► External │
+│ (external app) │ │ │ Sender App │
+│ │ ▼ │ │
+│ │ ┌──────────────┐ │ │
+│ │ │ fal_enabled? │──► get() ─┘ │
+│ │ └──────┬───────┘ │
+│ │ YES │ NO │
+│ │ ▼ │
+│ │ ┌─────────────────┐ ┌──────────────┐ │
+│ │ │ FalClient │ │ Local │ │
+│ │ │ (WebRTC to fal) │ │ Pipeline │ │
+│ │ └────────┬────────┘ └──────────────┘ │
+│ │ │ │
+│ │ ▼ │
+│ │ ┌─────────────────┐ │
+│ │ │ fal.ai GPU │ │
+│ │ │ (H100 cloud) │ │
+│ │ └────────┬────────┘ │
+│ │ │ │
+│ │ ▼ │
+│ │ _fal_received_frames │
+│ │ │
+└─────────────────────────────────────────────────────────────────────────────┘
+
+Note: Browser connection required - it drives get() loop which feeds both
+ WebRTC output (preview) and Spout sender.
+```
+
+#### Implementation
+
+**File:** `src/scope/server/frame_processor.py`
+
+**Change:** Modify `_spout_receiver_loop` to route through `put()` when fal is enabled
+
+```python
+def _spout_receiver_loop(self):
+ """Background thread that receives frames from Spout and adds to buffer."""
+ ...
+ while self.running and self.spout_receiver_enabled and self.spout_receiver is not None:
+ ...
+ rgb_frame = self.spout_receiver.receive(as_rgb=True)
+ if rgb_frame is not None:
+ last_frame_time = time.time()
+
+ # Route based on fal mode
+ if self.fal_enabled and self.fal_client:
+ # Convert numpy to av.VideoFrame for WebRTC and route through put()
+ from av import VideoFrame
+ video_frame = VideoFrame.from_ndarray(rgb_frame, format='rgb24')
+ self.put(video_frame)
+ elif self.pipeline_processors:
+ # Local processing: put directly into pipeline (existing behavior)
+ first_processor = self.pipeline_processors[0]
+ frame_tensor = torch.from_numpy(rgb_frame)
+ frame_tensor = frame_tensor.unsqueeze(0)
+ try:
+ first_processor.input_queue.put_nowait(frame_tensor)
+ except queue.Full:
+ logger.debug("First processor input queue full, dropping Spout frame")
+
+ frame_count += 1
+ ...
+```
+
+#### Supported Configurations
+
+| Input | Output | Works? |
+|-------|--------|--------|
+| Browser WebRTC | Browser WebRTC | ✅ Yes |
+| Browser WebRTC | Browser + Spout | ✅ Yes |
+| Spout Receiver | Browser WebRTC | ✅ Yes (after fix) |
+| Spout Receiver | Browser + Spout | ✅ Yes (after fix) |
+| Spout Receiver | Spout only (no browser) | ❌ No (browser required to drive get()) |
+
+#### Tests
+
+**Unit tests** (add to `tests/server/test_frame_processor_fal.py`):
+- `test_spout_receiver_routes_to_fal_when_enabled`
+- `test_spout_receiver_routes_to_local_when_fal_disabled`
+
+**Manual tests:**
+1. Start Scope server with fal connected
+2. Enable Spout receiver (connect to OBS or similar)
+3. Verify frames appear in browser preview (proves Spout → fal → browser works)
+4. Enable Spout sender, verify external app receives processed frames
+
+### Phase 6: Parameter Forwarding and UI Integration
+
+#### Current Parameter Flow (Local Mode)
+
+```
+Browser UI ─── WebRTC Data Channel ──► Scope Server ──► FrameProcessor.update_parameters()
+ (JSON messages) │
+ ▼
+ Pipeline Processors
+```
+
+**Key insight:** Parameters are sent via WebRTC data channel as JSON messages, NOT via HTTP/REST. This includes:
+- `prompts`, `noise_scale`, `denoising_step_list`
+- `kv_cache_attention_bias`, `paused`
+- `spout_sender`, `spout_receiver`
+- `vace_ref_images`, `vace_context_scale`
+- `transition` (prompt interpolation)
+- `ctrl_input` (controller input)
+- `lora_scales`
+
+#### Required: Parameter Forwarding to fal Cloud
+
+When cloud mode is enabled, the FalClient must forward parameter updates to fal.ai via its own data channel:
+
+```
+Browser UI ─── WebRTC Data Channel ──► Scope Server ──► FalClient Data Channel ──► fal.ai
+ (JSON messages) │ (JSON messages)
+ │
+ ▼
+ Also stored locally
+ (for UI state sync)
+```
+
+#### FalClient Data Channel Implementation
+
+Add data channel support to `FalClient`:
+
+```python
+class FalClient:
+ def __init__(self, ...):
+ # ... existing attributes ...
+ self.data_channel: RTCDataChannel | None = None
+ self._pending_parameters: dict = {}
+
+ async def connect(self, initial_parameters: dict | None = None) -> None:
+ """Connect to fal with optional initial parameters."""
+ # ... token and WebSocket setup ...
+
+ # Create peer connection
+ self.pc = RTCPeerConnection(...)
+
+ # Create data channel for parameter updates (BEFORE creating offer)
+ self.data_channel = self.pc.createDataChannel(
+ "parameters",
+ ordered=True, # Ensure parameter order is preserved
+ )
+
+ @self.data_channel.on("open")
+ def on_data_channel_open():
+ logger.info("Data channel to fal opened")
+ # Send any pending parameters
+ if self._pending_parameters:
+ self._send_parameters(self._pending_parameters)
+ self._pending_parameters = {}
+
+ # ... rest of connection setup ...
+
+ # Include initial parameters in offer message
+ await self.ws.send(json.dumps({
+ "type": "offer",
+ "sdp": self.pc.localDescription.sdp,
+ "initialParameters": initial_parameters, # Sent with offer
+ }))
+
+ def send_parameters(self, parameters: dict) -> bool:
+ """Forward parameter update to fal.ai via data channel."""
+ if self.data_channel and self.data_channel.readyState == "open":
+ return self._send_parameters(parameters)
+ else:
+ # Queue for when channel opens
+ self._pending_parameters.update(parameters)
+ return False
+
+ def _send_parameters(self, parameters: dict) -> bool:
+ """Internal: send parameters over data channel."""
+ try:
+ # Filter out None values (same as frontend)
+ filtered = {k: v for k, v in parameters.items() if v is not None}
+ message = json.dumps(filtered)
+ self.data_channel.send(message)
+ logger.debug(f"Sent parameters to fal: {filtered}")
+ return True
+ except Exception as e:
+ logger.error(f"Failed to send parameters: {e}")
+ return False
+```
+
+#### FrameProcessor Parameter Routing
+
+Modify `update_parameters()` to route to fal when cloud mode is active:
+
+```python
+def update_parameters(self, parameters: dict[str, Any]):
+ """Update parameters - routes to local pipelines OR fal cloud."""
+
+ # Handle Spout config locally (always)
+ if "spout_sender" in parameters:
+ self._update_spout_sender(parameters.pop("spout_sender"))
+ if "spout_receiver" in parameters:
+ self._update_spout_receiver(parameters.pop("spout_receiver"))
+
+ # Route remaining parameters based on mode
+ if self.fal_enabled and self.fal_client:
+ # Forward to fal cloud
+ self.fal_client.send_parameters(parameters)
+ else:
+ # Local processing
+ for processor in self.pipeline_processors:
+ processor.update_parameters(parameters)
+
+ # Always store locally for state tracking
+ self.parameters = {**self.parameters, **parameters}
+```
+
+#### Parameters That Stay Local vs Forward to fal
+
+| Parameter | Local | Forward to fal | Notes |
+|-----------|-------|----------------|-------|
+| `spout_sender` | ✓ | ✗ | Output is always local |
+| `spout_receiver` | ✓ | ✗ | Input is always local |
+| `paused` | ✓ | ✓ | Both need to know |
+| `prompts` | ✗ | ✓ | Pipeline parameter |
+| `noise_scale` | ✗ | ✓ | Pipeline parameter |
+| `denoising_step_list` | ✗ | ✓ | Pipeline parameter |
+| `kv_cache_attention_bias` | ✗ | ✓ | Pipeline parameter |
+| `transition` | ✗ | ✓ | Pipeline parameter |
+| `vace_*` | ✗ | ✓ | Pipeline parameter |
+| `ctrl_input` | ✗ | ✓ | Pipeline parameter |
+| `lora_scales` | ✗ | ✓ | Pipeline parameter |
+
+#### UI Toggle: Cloud vs Local Mode
+
+Design goals:
+1. **Single UI toggle** to switch between local GPU and fal cloud
+2. **Seamless switching** - same parameter controls work in both modes
+3. **Clear status indication** - user knows which mode is active
+4. **Graceful fallback** - if cloud fails, can switch back to local
+
+#### Frontend Changes
+
+**New State in Settings Context**
+
+Modify `frontend/src/context/SettingsContext.tsx`:
+
+```typescript
+interface Settings {
+ // ... existing settings ...
+
+ // Cloud inference settings
+ cloudMode: {
+ enabled: boolean;
+ appId: string; // e.g., "username/scope-fal/webrtc"
+ apiKey: string; // fal API key
+ status: "disconnected" | "connecting" | "connected" | "error";
+ errorMessage?: string;
+ };
+}
+
+const defaultSettings: Settings = {
+ // ... existing defaults ...
+ cloudMode: {
+ enabled: false,
+ appId: "",
+ apiKey: "",
+ status: "disconnected",
+ },
+};
+```
+
+**Cloud Mode Toggle Component**
+
+New file: `frontend/src/components/CloudModeToggle.tsx`
+
+```typescript
+export function CloudModeToggle() {
+ const { settings, updateSettings } = useSettings();
+ const { cloudMode } = settings;
+
+ const handleToggle = async (enabled: boolean) => {
+ if (enabled) {
+ // Connect to fal cloud
+ updateSettings({ cloudMode: { ...cloudMode, status: "connecting" } });
+ try {
+ await fetch("/api/v1/fal/connect", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ app_id: cloudMode.appId,
+ api_key: cloudMode.apiKey,
+ initial_parameters: {
+ pipeline_ids: settings.pipelineIds,
+ prompts: settings.prompts,
+ // ... other current parameters
+ },
+ }),
+ });
+ updateSettings({ cloudMode: { ...cloudMode, enabled: true, status: "connected" } });
+ } catch (error) {
+ updateSettings({
+ cloudMode: {
+ ...cloudMode,
+ enabled: false,
+ status: "error",
+ errorMessage: error.message,
+ },
+ });
+ }
+ } else {
+ // Disconnect from fal cloud
+ await fetch("/api/v1/fal/disconnect", { method: "POST" });
+ updateSettings({ cloudMode: { ...cloudMode, enabled: false, status: "disconnected" } });
+ }
+ };
+
+ return (
+
+
+
+ {cloudMode.enabled ? "☁️ Cloud GPU" : "💻 Local GPU"}
+
+ {cloudMode.status === "connecting" && }
+ {cloudMode.status === "error" && (
+ {cloudMode.errorMessage}
+ )}
+
+ );
+}
+```
+
+**Settings Panel for Cloud Credentials**
+
+Modify `frontend/src/components/SettingsPanel.tsx` to add a section for cloud configuration:
+
+```typescript
+
+ updateSettings({
+ cloudMode: { ...settings.cloudMode, appId }
+ })}
+ placeholder="username/scope-fal/webrtc"
+ />
+ updateSettings({
+ cloudMode: { ...settings.cloudMode, apiKey }
+ })}
+ placeholder="Enter your fal API key"
+ />
+
+
+```
+
+#### Backend API Changes
+
+Modify `src/scope/server/app.py` to update connect endpoint to accept initial parameters:
+
+```python
+class FalConnectRequest(BaseModel):
+ app_id: str
+ api_key: str
+ initial_parameters: dict | None = None # Pipeline params at connect time
+
+
+@app.post("/api/v1/fal/connect")
+async def connect_to_fal(request: FalConnectRequest) -> FalStatusResponse:
+ """Connect to fal.ai cloud for remote GPU inference."""
+ await frame_processor.connect_to_fal(
+ app_id=request.app_id,
+ api_key=request.api_key,
+ initial_parameters=request.initial_parameters,
+ )
+ return FalStatusResponse(connected=True, app_id=request.app_id)
+```
+
+#### Data Flow with Cloud Mode Toggle
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────┐
+│ Frontend UI │
+│ │
+│ ┌─────────────────┐ ┌──────────────────────────────────────────────────┐ │
+│ │ Cloud Toggle │ │ Parameter Controls │ │
+│ │ [OFF] Local GPU │ │ Prompts | Noise | Steps | VACE | LoRA | etc │ │
+│ │ [ON] Cloud GPU │ │ │ │
+│ └────────┬────────┘ └──────────────────────┬───────────────────────────┘ │
+│ │ │ │
+│ │ POST /api/v1/fal/connect │ WebRTC Data Channel │
+│ │ POST /api/v1/fal/disconnect │ (same as before) │
+│ ▼ ▼ │
+└───────────┼─────────────────────────────────────┼───────────────────────────────┘
+ │ │
+ ▼ ▼
+┌───────────────────────────────────────────────────────────────────────────────┐
+│ Scope Server │
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────┐ │
+│ │ FrameProcessor │ │
+│ │ │ │
+│ │ fal_enabled: bool ◄─── Set by /api/v1/fal/connect │ │
+│ │ │ │
+│ │ update_parameters(params): │ │
+│ │ if fal_enabled: │ │
+│ │ fal_client.send_parameters(params) ──► To fal cloud │ │
+│ │ else: │ │
+│ │ pipeline_processors.update(params) ──► Local processing │ │
+│ │ │ │
+│ │ put(frame): │ │
+│ │ if fal_enabled: │ │
+│ │ fal_client.send_frame(frame) ──► To fal cloud │ │
+│ │ else: │ │
+│ │ local_queue.put(frame) ──► Local processing │ │
+│ └─────────────────────────────────────────────────────────────────────────┘ │
+│ │
+└───────────────────────────────────────────────────────────────────────────────┘
+```
+
+#### Persistence
+
+Store cloud credentials in localStorage (frontend) so users don't have to re-enter:
+
+```typescript
+// In SettingsContext
+useEffect(() => {
+ const saved = localStorage.getItem("cloudModeSettings");
+ if (saved) {
+ const { appId, apiKey } = JSON.parse(saved);
+ updateSettings({ cloudMode: { ...settings.cloudMode, appId, apiKey } });
+ }
+}, []);
+
+useEffect(() => {
+ // Don't persist the enabled state, only credentials
+ localStorage.setItem("cloudModeSettings", JSON.stringify({
+ appId: settings.cloudMode.appId,
+ apiKey: settings.cloudMode.apiKey,
+ }));
+}, [settings.cloudMode.appId, settings.cloudMode.apiKey]);
+```
+
+---
+
+## Files to Create
+
+| File | Purpose |
+|------|---------|
+| `src/scope/server/fal_client.py` | WebSocket + WebRTC client for fal.ai (with data channel) |
+| `src/scope/server/fal_tracks.py` | Custom MediaStreamTrack classes for frame I/O |
+| `frontend/src/components/CloudModeToggle.tsx` | UI toggle for cloud/local mode |
+
+## Files to Modify
+
+| File | Changes |
+|------|---------|
+| `src/scope/server/frame_processor.py` | Add fal cloud mode, parameter routing, connect/disconnect logic |
+| `src/scope/server/app.py` | Add fal configuration endpoints |
+| `src/scope/server/schema.py` | Add fal configuration schemas |
+| `frontend/src/context/SettingsContext.tsx` | Add cloudMode state |
+| `frontend/src/components/SettingsPanel.tsx` | Add cloud credentials UI |
+| `pyproject.toml` | Add `aiohttp` dependency for token API |
+
+---
+
+## Configuration
+
+### API Usage
+
+```bash
+# Connect to fal
+curl -X POST http://localhost:8000/api/v1/fal/connect \
+ -H "Content-Type: application/json" \
+ -d '{
+ "app_id": "your-username/scope-fal/webrtc",
+ "api_key": "your-fal-api-key"
+ }'
+
+# Check status
+curl http://localhost:8000/api/v1/fal/status
+
+# Disconnect
+curl -X POST http://localhost:8000/api/v1/fal/disconnect
+```
+
+### Environment Variables
+
+```bash
+# Optional: Set default fal credentials
+FAL_APP_ID=your-username/scope-fal/webrtc
+FAL_API_KEY=your-key
+```
+
+---
+
+## Dependencies to Add
+
+```toml
+# pyproject.toml
+dependencies = [
+ "aiohttp>=3.9.0", # For token API requests
+ # websockets and aiortc already included
+]
+```
+
+---
+
+## Data Flow Diagram
+
+```
+┌─────────────────────────────────────────────────────────────────────────────┐
+│ Scope Server │
+│ │
+│ ┌──────────────┐ ┌─────────────────┐ ┌─────────────────┐ │
+│ │ Spout │────►│ FrameProcessor │────►│ FalClient │ │
+│ │ Receiver │ │ │ │ │ │
+│ └──────────────┘ │ - put() │ │ - WebSocket │ │
+│ │ - get() │ │ - RTCPeerConn │ │
+│ ┌──────────────┐ │ - fal_enabled │ │ - FalOutputTrack│ │
+│ │ Spout │◄────│ │◄────│ │ │
+│ │ Sender │ └─────────────────┘ └────────┬────────┘ │
+│ └──────────────┘ │ │
+│ │ WebRTC │
+│ ┌──────────────┐ │ (client mode) │
+│ │ Browser │◄─── WebRTC (server mode) ◄───────────┤ │
+│ │ Preview │ │ │
+│ └──────────────┘ │ │
+└────────────────────────────────────────────────────────┼────────────────────┘
+ │
+ ▼
+ ┌──────────────────────┐
+ │ fal.ai Cloud │
+ │ │
+ │ WebRTC Endpoint │
+ │ (/webrtc) │
+ │ │ │
+ │ ▼ │
+ │ ┌────────────────┐ │
+ │ │ Scope Pipeline │ │
+ │ │ (GPU Inference)│ │
+ │ └────────────────┘ │
+ │ │
+ └──────────────────────┘
+```
+
+---
+
+## Signaling Protocol (from Reference)
+
+```
+┌─────────────┐ ┌─────────────┐
+│ Scope Server│ │ fal.ai │
+│ (Client) │ │ (Server) │
+└──────┬──────┘ └──────┬──────┘
+ │ │
+ │──── WebSocket Connect ─────────────────►│
+ │ │
+ │◄──── {"type": "ready"} ─────────────────│
+ │ │
+ │──── {"type": "offer", "sdp": "..."} ───►│
+ │ │
+ │◄──── {"type": "answer", "sdp": "..."} ──│
+ │ │
+ │◄───► ICE Candidates (bidirectional) ───►│
+ │ │
+ │═════ WebRTC Media Stream ══════════════►│
+ │ │
+ │◄════ WebRTC Media Stream (processed) ═══│
+ │ │
+```
+
+---
+
+## Phase-by-Phase Testing
+
+This section provides verification steps for each phase. Complete all tests for a phase before proceeding to the next.
+
+### Phase 1 Testing: FalClient Module
+
+**Prerequisites:** None (first phase)
+
+**Files Created:**
+- `src/scope/server/fal_client.py` - Main FalClient class
+- `tests/server/__init__.py` - Test package
+- `tests/server/test_fal_client.py` - Unit tests
+
+**Dependencies Added to `pyproject.toml`:**
+- `aiohttp>=3.9.0`
+- `websockets>=12.0`
+- `pytest-asyncio>=0.24.0` (dev)
+
+---
+
+#### Automatic Tests (Unit Tests)
+
+**Location:** `tests/server/test_fal_client.py`
+
+**Test List (9 tests):**
+
+| Test Name | What It Tests |
+|-----------|---------------|
+| `test_get_temporary_token_success` | Token acquisition returns token from `{"detail": "..."}` response |
+| `test_get_temporary_token_string_response` | Token acquisition handles plain string response |
+| `test_get_temporary_token_failure` | Token acquisition raises `RuntimeError` on HTTP error |
+| `test_get_temporary_token_extracts_alias` | Alias extracted correctly from app_id (e.g., `owner/my-app/webrtc` → `my-app`) |
+| `test_build_ws_url` | WebSocket URL constructed correctly |
+| `test_build_ws_url_strips_slashes` | Leading/trailing slashes stripped from app_id |
+| `test_fal_client_initialization` | Client initializes with correct default state |
+| `test_fal_client_with_callback` | Client accepts and stores frame callback |
+| `test_disconnect_when_not_connected` | Disconnect works cleanly when not connected |
+
+**Run All Phase 1 Tests:**
+```bash
+uv run pytest tests/server/test_fal_client.py -v
+```
+
+**Expected Output:**
+```
+tests/server/test_fal_client.py::test_get_temporary_token_success PASSED
+tests/server/test_fal_client.py::test_get_temporary_token_string_response PASSED
+tests/server/test_fal_client.py::test_get_temporary_token_failure PASSED
+tests/server/test_fal_client.py::test_get_temporary_token_extracts_alias PASSED
+tests/server/test_fal_client.py::test_build_ws_url PASSED
+tests/server/test_fal_client.py::test_build_ws_url_strips_slashes PASSED
+tests/server/test_fal_client.py::test_fal_client_initialization PASSED
+tests/server/test_fal_client.py::test_fal_client_with_callback PASSED
+tests/server/test_fal_client.py::test_disconnect_when_not_connected PASSED
+
+============================== 9 passed ===============================
+```
+
+**Run All Tests (ensure no regressions):**
+```bash
+uv run pytest tests/ -v
+```
+
+---
+
+#### Manual Tests
+
+##### 1. Module Import Test
+
+**Purpose:** Verify the module can be imported without errors
+
+```bash
+uv run python -c "from scope.server.fal_client import FalClient; print('FalClient imported successfully')"
+```
+
+**Expected Output:**
+```
+FalClient imported successfully
+```
+
+**What to check if it fails:**
+- Missing dependencies: Run `uv sync --group dev`
+- Import errors: Check that `aiohttp` and `websockets` are installed
+
+---
+
+##### 2. Server Startup Test
+
+**Purpose:** Verify the server starts without import errors from the new module
+
+```bash
+timeout 5 uv run daydream-scope 2>&1 || true
+```
+
+**Expected Output:**
+```
+ - scope.core.pipelines.registry - INFO - GPU detected with X.X GB VRAM
+```
+
+**What to check if it fails:**
+- Import errors in fal_client.py
+- Missing dependencies
+
+---
+
+##### 3. Token API Test (requires FAL_API_KEY)
+
+**Purpose:** Verify real token acquisition from fal.ai API
+
+**Step 1:** Set your API key
+```bash
+export FAL_API_KEY="your-fal-api-key-here"
+```
+
+**Step 2:** Test token endpoint directly with curl
+```bash
+curl -X POST https://rest.alpha.fal.ai/tokens/ \
+ -H "Authorization: Key $FAL_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{"allowed_apps": ["scope-fal"], "token_expiration": 120}'
+```
+
+**Expected Output:**
+```json
+{"detail": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9..."}
+```
+
+**Step 3:** Test via Python (optional)
+```bash
+uv run python -c "
+import asyncio
+from scope.server.fal_client import FalClient
+import os
+
+async def test():
+ client = FalClient(
+ app_id='your-username/scope-fal/webrtc',
+ api_key=os.environ.get('FAL_API_KEY', '')
+ )
+ try:
+ token = await client._get_temporary_token()
+ print(f'Token acquired: {token[:50]}...')
+ except Exception as e:
+ print(f'Error: {e}')
+
+asyncio.run(test())
+"
+```
+
+**What to check if it fails:**
+- Invalid API key: Verify FAL_API_KEY is set correctly
+- Network issues: Check internet connectivity
+- App alias mismatch: The `allowed_apps` must match your fal app name
+
+---
+
+##### 4. FalClient Instantiation Test
+
+**Purpose:** Verify FalClient can be created with different configurations
+
+```bash
+uv run python -c "
+from scope.server.fal_client import FalClient
+
+# Test basic initialization
+client1 = FalClient(app_id='owner/app/webrtc', api_key='test-key')
+print(f'Client 1: app_id={client1.app_id}, has_callback={client1.on_frame_received is not None}')
+
+# Test with callback
+client2 = FalClient(
+ app_id='owner/app/webrtc',
+ api_key='test-key',
+ on_frame_received=lambda f: print('Frame received')
+)
+print(f'Client 2: app_id={client2.app_id}, has_callback={client2.on_frame_received is not None}')
+
+# Test URL building
+url = client1._build_ws_url('test-token')
+print(f'WebSocket URL: {url}')
+"
+```
+
+**Expected Output:**
+```
+Client 1: app_id=owner/app/webrtc, has_callback=False
+Client 2: app_id=owner/app/webrtc, has_callback=True
+WebSocket URL: wss://fal.run/owner/app/webrtc?fal_jwt_token=test-token
+```
+
+---
+
+#### Phase 1 Completion Checklist
+
+| Test | Type | Status |
+|------|------|--------|
+| All 9 unit tests pass | Automatic | ⬜ |
+| Module imports without errors | Manual | ⬜ |
+| Server starts without errors | Manual | ⬜ |
+| Token API works with real key | Manual (optional) | ⬜ |
+| FalClient instantiation works | Manual | ⬜ |
+
+**To mark Phase 1 complete, all "Automatic" and "Manual" tests must pass. The "Manual (optional)" test requires a real fal API key.**
+
+---
+
+### Phase 2 Testing: FalOutputTrack and FalInputTrack
+
+**Prerequisites:** Phase 1 complete
+
+**Files Created:**
+- `src/scope/server/fal_tracks.py` - FalOutputTrack and FalInputTrack classes
+- `tests/server/test_fal_tracks.py` - Unit tests
+
+---
+
+#### Automatic Tests (Unit Tests)
+
+**Location:** `tests/server/test_fal_tracks.py`
+
+**Test List (18 tests):**
+
+##### FalOutputTrack Tests (9 tests)
+
+| Test Name | What It Tests |
+|-----------|---------------|
+| `test_initialization` | Track initializes with kind="video", target_fps=30, frame_count=0, maxsize=30 |
+| `test_initialization_custom_fps` | Track accepts custom FPS parameter |
+| `test_recv_returns_frame_with_pts` | recv() returns frame with correct pts and time_base |
+| `test_recv_increments_frame_count` | recv() increments frame count with each call |
+| `test_put_frame_success` | put_frame() successfully queues frame |
+| `test_put_frame_drops_oldest_when_full` | put_frame() drops oldest frame when queue is full |
+| `test_put_frame_nowait_success` | put_frame_nowait() successfully queues frame |
+| `test_put_frame_nowait_returns_false_when_full` | put_frame_nowait() returns False when queue is full |
+| `test_put_frame_sync_calls_nowait` | put_frame_sync() uses put_frame_nowait() |
+
+##### FalInputTrack Tests (9 tests)
+
+| Test Name | What It Tests |
+|-----------|---------------|
+| `test_initialization` | Track initializes with source_track and empty queue |
+| `test_start_consuming_creates_task` | start_consuming() creates asyncio task |
+| `test_recv_returns_frame_from_queue` | recv() returns frame from queue |
+| `test_get_frame_nowait_returns_frame` | get_frame_nowait() returns frame when available |
+| `test_get_frame_nowait_returns_none_when_empty` | get_frame_nowait() returns None when queue is empty |
+| `test_stop_cancels_consume_task` | stop() cancels the consume task |
+| `test_stop_handles_no_task` | stop() handles case when no task exists |
+| `test_consume_loop_queues_frames` | _consume_loop() receives and queues frames |
+| `test_consume_loop_drops_oldest_when_full` | _consume_loop() drops oldest frame when queue is full |
+
+**Run All Phase 2 Tests:**
+```bash
+uv run pytest tests/server/test_fal_tracks.py -v
+```
+
+**Expected Output:**
+```
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_initialization PASSED
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_initialization_custom_fps PASSED
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_recv_returns_frame_with_pts PASSED
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_recv_increments_frame_count PASSED
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_put_frame_success PASSED
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_put_frame_drops_oldest_when_full PASSED
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_put_frame_nowait_success PASSED
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_put_frame_nowait_returns_false_when_full PASSED
+tests/server/test_fal_tracks.py::TestFalOutputTrack::test_put_frame_sync_calls_nowait PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_initialization PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_start_consuming_creates_task PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_recv_returns_frame_from_queue PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_get_frame_nowait_returns_frame PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_get_frame_nowait_returns_none_when_empty PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_stop_cancels_consume_task PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_stop_handles_no_task PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_consume_loop_queues_frames PASSED
+tests/server/test_fal_tracks.py::TestFalInputTrack::test_consume_loop_drops_oldest_when_full PASSED
+
+============================== 18 passed ===============================
+```
+
+**Run All fal Tests (Phase 1 + Phase 2):**
+```bash
+uv run pytest tests/server/test_fal_client.py tests/server/test_fal_tracks.py -v
+```
+
+**Expected:** 27 passed (9 from Phase 1 + 18 from Phase 2)
+
+---
+
+#### Manual Tests
+
+##### 1. Module Import Test
+
+**Purpose:** Verify the module can be imported without errors
+
+```bash
+uv run python -c "from scope.server.fal_tracks import FalOutputTrack, FalInputTrack; print('fal_tracks imported successfully')"
+```
+
+**Expected Output:**
+```
+fal_tracks imported successfully
+```
+
+**What to check if it fails:**
+- Import errors: Check that `aiortc` is installed
+- Missing dependencies: Run `uv sync --group dev`
+
+---
+
+##### 2. FalOutputTrack Creation Test
+
+**Purpose:** Verify FalOutputTrack initializes correctly
+
+```bash
+uv run python -c "
+from scope.server.fal_tracks import FalOutputTrack
+
+track = FalOutputTrack()
+print(f'Track kind: {track.kind}')
+print(f'Target FPS: {track.target_fps}')
+print(f'Queue maxsize: {track.frame_queue.maxsize}')
+print(f'Initial frame count: {track._frame_count}')
+print('FalOutputTrack created successfully')
+"
+```
+
+**Expected Output:**
+```
+Track kind: video
+Target FPS: 30
+Queue maxsize: 30
+Initial frame count: 0
+FalOutputTrack created successfully
+```
+
+---
+
+##### 3. FalInputTrack Creation Test
+
+**Purpose:** Verify FalInputTrack initializes correctly
+
+```bash
+uv run python -c "
+from unittest.mock import MagicMock
+from scope.server.fal_tracks import FalInputTrack
+
+mock_source = MagicMock()
+track = FalInputTrack(mock_source)
+print(f'Track kind: {track.kind}')
+print(f'Source track set: {track.source_track is not None}')
+print(f'Queue maxsize: {track.frame_queue.maxsize}')
+print(f'Consume task (before start): {track._consume_task}')
+print('FalInputTrack created successfully')
+"
+```
+
+**Expected Output:**
+```
+Track kind: video
+Source track set: True
+Queue maxsize: 30
+Consume task (before start): None
+FalInputTrack created successfully
+```
+
+---
+
+##### 4. Frame Queue Test
+
+**Purpose:** Verify frames can be queued and retrieved
+
+```bash
+uv run python -c "
+import asyncio
+from unittest.mock import MagicMock
+from scope.server.fal_tracks import FalOutputTrack
+
+async def test():
+ track = FalOutputTrack(target_fps=30)
+
+ # Create mock frame
+ mock_frame = MagicMock()
+ mock_frame.pts = None
+ mock_frame.time_base = None
+
+ # Test put
+ result = await track.put_frame(mock_frame)
+ print(f'Put frame result: {result}')
+ print(f'Queue size after put: {track.frame_queue.qsize()}')
+
+ # Test recv
+ received = await track.recv()
+ print(f'Frame pts after recv: {received.pts}')
+ print(f'Frame time_base: {received.time_base}')
+ print(f'Queue size after recv: {track.frame_queue.qsize()}')
+
+asyncio.run(test())
+"
+```
+
+**Expected Output:**
+```
+Put frame result: True
+Queue size after put: 1
+Frame pts after recv: 1
+Frame time_base: 1/30
+Queue size after recv: 0
+```
+
+---
+
+##### 5. Server Startup Test
+
+**Purpose:** Verify the server starts without import errors from the new module
+
+```bash
+timeout 5 uv run daydream-scope 2>&1 || true
+```
+
+**Expected Output:**
+```
+ - scope.core.pipelines.registry - INFO - GPU detected with X.X GB VRAM
+```
+
+**What to check if it fails:**
+- Import errors in fal_tracks.py
+- Circular import issues between fal_client.py and fal_tracks.py
+
+---
+
+#### Phase 2 Completion Checklist
+
+| Test | Type | Status |
+|------|------|--------|
+| All 18 unit tests pass | Automatic | ⬜ |
+| Module imports without errors | Manual | ⬜ |
+| FalOutputTrack creation works | Manual | ⬜ |
+| FalInputTrack creation works | Manual | ⬜ |
+| Frame queue put/recv works | Manual | ⬜ |
+| Server starts without errors | Manual | ⬜ |
+
+**To mark Phase 2 complete, all "Automatic" and "Manual" tests must pass.**
+
+---
+
+### Phase 3 Testing: FrameProcessor Integration
+
+**Prerequisites:** Phases 1-2 complete
+
+#### Unit Tests
+
+Add to `tests/server/test_frame_processor.py`:
+
+```python
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+
+@pytest.mark.asyncio
+async def test_connect_to_fal():
+ """Test fal connection initialization."""
+ from scope.server.frame_processor import FrameProcessor
+
+ processor = FrameProcessor(...) # Use appropriate constructor
+
+ with patch("scope.server.fal_client.FalClient") as MockFalClient:
+ mock_client = AsyncMock()
+ MockFalClient.return_value = mock_client
+
+ await processor.connect_to_fal(
+ app_id="owner/app/webrtc",
+ api_key="test-key"
+ )
+
+ assert processor.fal_enabled is True
+ assert processor.fal_client is not None
+ mock_client.connect.assert_called_once()
+
+@pytest.mark.asyncio
+async def test_disconnect_from_fal():
+ """Test fal disconnection cleanup."""
+ from scope.server.frame_processor import FrameProcessor
+
+ processor = FrameProcessor(...)
+ processor.fal_client = AsyncMock()
+ processor.fal_enabled = True
+
+ await processor.disconnect_from_fal()
+
+ assert processor.fal_enabled is False
+ assert processor.fal_client is None
+
+def test_put_routes_to_fal_when_enabled():
+ """Test frame routing to fal when cloud mode enabled."""
+ from scope.server.frame_processor import FrameProcessor
+
+ processor = FrameProcessor(...)
+ processor.fal_enabled = True
+ processor.fal_client = MagicMock()
+ processor.fal_client.output_track = MagicMock()
+ processor.fal_client.output_track.put_frame_nowait = MagicMock(return_value=True)
+
+ frame = MagicMock()
+ result = processor.put(frame)
+
+ processor.fal_client.output_track.put_frame_nowait.assert_called_once_with(frame)
+```
+
+Run with:
+```bash
+uv run pytest tests/server/test_frame_processor.py -v -k fal
+```
+
+#### Manual Tests
+
+1. **Server Startup Test** (no fal connection):
+ ```bash
+ uv run daydream-scope
+ # Server should start without errors
+ # fal_enabled should be False by default
+ ```
+
+2. **FrameProcessor State Test**:
+ ```bash
+ uv run python -c "
+ from scope.server.frame_processor import FrameProcessor
+ # Check that fal attributes exist
+ import inspect
+ source = inspect.getsource(FrameProcessor.__init__)
+ assert 'fal_client' in source or hasattr(FrameProcessor, 'fal_client')
+ print('FrameProcessor has fal integration attributes')
+ "
+ ```
+
+#### Phase 3 Completion Criteria
+- [ ] All unit tests pass
+- [ ] Server starts without errors
+- [ ] FrameProcessor has fal_client and fal_enabled attributes
+- [ ] Local processing still works (no regression)
+
+---
+
+### Phase 4 Testing: API Endpoints
+
+**Prerequisites:** Phases 1-3 complete
+
+#### Unit Tests
+
+Add to `tests/server/test_app.py`:
+
+```python
+import pytest
+from fastapi.testclient import TestClient
+from unittest.mock import AsyncMock, patch
+
+def test_fal_connect_endpoint():
+ """Test /api/v1/fal/connect endpoint."""
+ from scope.server.app import app
+
+ with patch("scope.server.app.frame_processor") as mock_fp:
+ mock_fp.connect_to_fal = AsyncMock()
+
+ client = TestClient(app)
+ response = client.post(
+ "/api/v1/fal/connect",
+ json={"app_id": "owner/app/webrtc", "api_key": "test-key"}
+ )
+
+ assert response.status_code == 200
+ assert response.json()["connected"] is True
+ assert response.json()["app_id"] == "owner/app/webrtc"
+
+def test_fal_disconnect_endpoint():
+ """Test /api/v1/fal/disconnect endpoint."""
+ from scope.server.app import app
+
+ with patch("scope.server.app.frame_processor") as mock_fp:
+ mock_fp.disconnect_from_fal = AsyncMock()
+
+ client = TestClient(app)
+ response = client.post("/api/v1/fal/disconnect")
+
+ assert response.status_code == 200
+ assert response.json()["connected"] is False
+
+def test_fal_status_endpoint_disconnected():
+ """Test /api/v1/fal/status when disconnected."""
+ from scope.server.app import app
+
+ with patch("scope.server.app.frame_processor") as mock_fp:
+ mock_fp.fal_enabled = False
+ mock_fp.fal_client = None
+
+ client = TestClient(app)
+ response = client.get("/api/v1/fal/status")
+
+ assert response.status_code == 200
+ assert response.json()["connected"] is False
+
+def test_fal_status_endpoint_connected():
+ """Test /api/v1/fal/status when connected."""
+ from scope.server.app import app
+
+ with patch("scope.server.app.frame_processor") as mock_fp:
+ mock_fp.fal_enabled = True
+ mock_fp.fal_client.app_id = "owner/app/webrtc"
+
+ client = TestClient(app)
+ response = client.get("/api/v1/fal/status")
+
+ assert response.status_code == 200
+ assert response.json()["connected"] is True
+ assert response.json()["app_id"] == "owner/app/webrtc"
+```
+
+Run with:
+```bash
+uv run pytest tests/server/test_app.py -v -k fal
+```
+
+#### Manual Tests
+
+1. **API Endpoint Test** (server must be running):
+ ```bash
+ # Start server in one terminal
+ uv run daydream-scope
+
+ # In another terminal, test endpoints
+
+ # Test status (should be disconnected)
+ curl http://localhost:8000/api/v1/fal/status
+ # Expected: {"connected": false, "app_id": null}
+
+ # Test connect (will fail without valid credentials, but endpoint should respond)
+ curl -X POST http://localhost:8000/api/v1/fal/connect \
+ -H "Content-Type: application/json" \
+ -d '{"app_id": "test/app/webrtc", "api_key": "invalid"}'
+ # Expected: Error response (token fetch fails)
+
+ # Test disconnect
+ curl -X POST http://localhost:8000/api/v1/fal/disconnect
+ # Expected: {"connected": false, "app_id": null}
+ ```
+
+2. **Schema Validation Test**:
+ ```bash
+ # Test invalid request body
+ curl -X POST http://localhost:8000/api/v1/fal/connect \
+ -H "Content-Type: application/json" \
+ -d '{"invalid": "data"}'
+ # Expected: 422 Validation Error
+ ```
+
+#### Phase 4 Completion Criteria
+- [ ] All unit tests pass
+- [ ] /api/v1/fal/status returns correct disconnected state
+- [ ] /api/v1/fal/connect validates request body
+- [ ] /api/v1/fal/disconnect returns success
+
+---
+
+### Phase 5 Testing: Spout Integration
+
+**Prerequisites:** Phases 1-4 complete, fal app deployed
+
+#### Unit Tests
+
+```python
+@pytest.mark.asyncio
+async def test_fal_frame_callback_queues_frame():
+ """Test that received frames are queued."""
+ from scope.server.frame_processor import FrameProcessor
+
+ processor = FrameProcessor(...)
+
+ frame = MagicMock()
+ processor._on_fal_frame_received(frame)
+
+ assert processor._fal_received_frames.qsize() == 1
+
+def test_get_returns_fal_frame_when_enabled():
+ """Test that get() returns frames from fal queue."""
+ from scope.server.frame_processor import FrameProcessor
+
+ processor = FrameProcessor(...)
+ processor.fal_enabled = True
+
+ frame = MagicMock()
+ processor._fal_received_frames.put_nowait(frame)
+
+ result = processor.get()
+ assert result is frame
+```
+
+#### Manual Tests
+
+1. **End-to-End with Real fal** (requires deployed fal app):
+ ```bash
+ # Deploy fal app if not already
+ fal deploy fal_app.py
+
+ # Start scope server
+ uv run daydream-scope
+
+ # Connect to fal
+ curl -X POST http://localhost:8000/api/v1/fal/connect \
+ -H "Content-Type: application/json" \
+ -d "{\"app_id\": \"$FAL_APP_ID\", \"api_key\": \"$FAL_API_KEY\"}"
+
+ # Check connection status
+ curl http://localhost:8000/api/v1/fal/status
+ # Expected: {"connected": true, "app_id": "..."}
+ ```
+
+2. **Spout Flow Test** (Windows only, requires Spout-compatible apps):
+ ```
+ 1. Start a Spout sender app (e.g., OBS with Spout plugin)
+ 2. Start scope server with Spout receiver enabled
+ 3. Connect to fal via API
+ 4. Start a Spout receiver app (e.g., Resolume)
+ 5. Verify video flows through the entire pipeline
+ ```
+
+3. **WebRTC Connection Verification**:
+ ```bash
+ # Check server logs for:
+ # - "Connecting to fal WebSocket..."
+ # - "fal server ready"
+ # - "Sent WebRTC offer"
+ # - "Set remote description from answer"
+ # - "Connection state: connected"
+ # - "Received video track from fal"
+ ```
+
+#### Phase 5 Completion Criteria
+- [ ] All unit tests pass
+- [ ] Can connect to deployed fal app via API
+- [ ] Server logs show successful WebRTC connection
+- [ ] (If Spout available) Frames flow through full pipeline
+
+---
+
+### Phase 6 Testing: Parameter Forwarding and UI
+
+**Prerequisites:** Phases 1-5 complete
+
+#### Unit Tests
+
+```python
+def test_send_parameters_queues_when_channel_closed():
+ """Test parameters are queued when data channel not ready."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="test", api_key="test")
+ client.data_channel = None # Not connected
+
+ result = client.send_parameters({"prompt": "test"})
+
+ assert result is False
+ assert client._pending_parameters == {"prompt": "test"}
+
+def test_update_parameters_routes_to_fal():
+ """Test parameter routing when fal enabled."""
+ from scope.server.frame_processor import FrameProcessor
+
+ processor = FrameProcessor(...)
+ processor.fal_enabled = True
+ processor.fal_client = MagicMock()
+ processor.fal_client.send_parameters = MagicMock(return_value=True)
+
+ processor.update_parameters({"prompts": ["test prompt"]})
+
+ processor.fal_client.send_parameters.assert_called_once()
+
+def test_spout_params_stay_local():
+ """Test Spout parameters are not forwarded to fal."""
+ from scope.server.frame_processor import FrameProcessor
+
+ processor = FrameProcessor(...)
+ processor.fal_enabled = True
+ processor.fal_client = MagicMock()
+ processor._update_spout_sender = MagicMock()
+
+ processor.update_parameters({
+ "spout_sender": {"enabled": True},
+ "prompts": ["test"]
+ })
+
+ # Spout handled locally
+ processor._update_spout_sender.assert_called_once()
+ # Only prompts sent to fal
+ call_args = processor.fal_client.send_parameters.call_args[0][0]
+ assert "spout_sender" not in call_args
+```
+
+#### Manual Tests
+
+1. **Parameter Forwarding Test** (requires connected fal):
+ ```bash
+ # With fal connected, open browser to scope UI
+ # Change prompt in UI
+ # Check fal logs for received parameter update
+ ```
+
+2. **Data Channel Test**:
+ ```bash
+ # Check server logs for:
+ # - "Data channel to fal opened"
+ # - "Sent parameters to fal: {...}"
+ ```
+
+3. **UI Toggle Test** (requires frontend changes):
+ ```
+ 1. Open scope UI in browser
+ 2. Enter fal credentials in settings
+ 3. Toggle cloud mode ON
+ 4. Verify status shows "connected"
+ 5. Change parameters (prompt, noise, etc.)
+ 6. Check fal logs for parameter updates
+ 7. Toggle cloud mode OFF
+ 8. Verify local processing resumes
+ ```
+
+4. **Persistence Test**:
+ ```
+ 1. Enter fal credentials in UI
+ 2. Refresh browser page
+ 3. Open settings panel
+ 4. Verify credentials are still filled in
+ 5. Verify cloud mode is OFF (not auto-connected)
+ ```
+
+#### Phase 6 Completion Criteria
+- [ ] All unit tests pass
+- [ ] Parameters are forwarded to fal when connected
+- [ ] Spout parameters stay local
+- [ ] UI toggle connects/disconnects correctly
+- [ ] Credentials persist across page refresh
+- [ ] Mode switching works without errors
+
+---
+
+## Test Summary Checklist
+
+Use this checklist to track progress through all phases:
+
+| Phase | Unit Tests | Manual Tests | Status |
+|-------|------------|--------------|--------|
+| 1. FalClient Module | 9 tests | 4 tests | ✅ |
+| 2. FalOutputTrack/FalInputTrack | 18 tests | 5 tests | ✅ |
+| 3. FrameProcessor Integration | 14 tests | 2 tests | ✅ |
+| 4. API Endpoints | 11 tests | 2 tests | ✅ |
+| 5. Spout Receiver fal Routing | 2 tests | 1 test | ✅ |
+| 6. Parameter Forwarding & UI | 10 tests | 4 tests | ✅ |
+
+**Total: 64 unit tests, 18 manual tests**
+
+---
+
+## Considerations
+
+### Thread Safety
+- FalClient runs in asyncio event loop
+- Spout threads communicate via queues
+- Use `asyncio.run_coroutine_threadsafe()` for cross-thread async calls
+
+### Error Handling
+- WebSocket disconnection: Auto-reconnect with exponential backoff
+- WebRTC ICE failures: Log and notify, allow manual retry
+- Frame timeouts: Drop frames and log warnings
+- Token expiration: Re-authenticate before 120s timeout
+
+### Latency
+- WebRTC adds ~50-100ms latency per direction
+- Total round-trip to fal cloud: ~200-400ms depending on network
+- Consider frame rate adjustment based on measured latency
+
+### Fallback
+- If fal connection fails, option to fall back to local processing (if GPU available)
+- Configuration flag: `fal_cloud.fallback_to_local: true`
diff --git a/fal_app.py b/fal_app.py
new file mode 100644
index 000000000..fc3567362
--- /dev/null
+++ b/fal_app.py
@@ -0,0 +1,428 @@
+"""
+fal.ai deployment for Scope.
+
+This runs the Scope backend and proxies WebRTC signaling + API calls through
+a single WebSocket connection to avoid fal spawning new runners for each request.
+
+Based on:
+- https://docs.fal.ai/examples/serverless/deploy-models-with-custom-containers
+- https://github.com/fal-ai-community/fal-demos/blob/main/fal_demos/video/yolo_webcam_webrtc/yolo.py
+"""
+
+import fal
+from fal.container import ContainerImage
+from fastapi import WebSocket
+
+# Configuration
+DOCKER_IMAGE = "daydreamlive/scope:d117be7"
+
+# Create a Dockerfile that uses your existing image as base
+dockerfile_str = f"""
+FROM {DOCKER_IMAGE}
+
+"""
+
+# Create container image from Dockerfile string
+custom_image = ContainerImage.from_dockerfile_str(
+ dockerfile_str,
+)
+
+
+class ScopeApp(fal.App, keep_alive=1800):
+ """
+ Scope server on fal.ai.
+
+ This runs the Scope backend as a subprocess and exposes a WebSocket endpoint
+ that handles:
+ 1. WebRTC signaling (SDP offer/answer, ICE candidates)
+ 2. REST API calls (proxied through WebSocket to avoid new runner instances)
+
+ The actual WebRTC video stream flows directly between browser and this runner
+ once the signaling is complete.
+
+ """
+
+ # Set custom Docker image
+ image = custom_image
+
+ # GPU configuration
+ machine_type = "GPU-H100"
+
+ # Additional requirements needed for the setup code
+ requirements = [
+ "requests",
+ "httpx", # For async HTTP requests
+ ]
+
+ # Public auth mode (no JWT token required)
+ auth_mode = "public"
+
+ def setup(self):
+ """
+ Start the Scope backend server as a background process.
+ """
+ import logging
+ import os
+ import subprocess
+ import threading
+ import time
+
+ logger = logging.getLogger(__name__)
+ print("Starting Scope container setup...")
+
+ # Verify GPU is available
+ try:
+ result = subprocess.run(
+ ["nvidia-smi"], capture_output=True, text=True, check=True
+ )
+ print(f"GPU Status:\n{result.stdout}")
+ except Exception as e:
+ logger.error(f"GPU check failed: {e}")
+ raise
+
+ # Environment for scope
+ scope_env = os.environ.copy()
+ # Use fal's /data directory for persistent storage (survives restarts)
+ scope_env["DAYDREAM_SCOPE_MODELS_DIR"] = "/data/models"
+ scope_env["DAYDREAM_SCOPE_LOGS_DIR"] = "/data/logs"
+ scope_env["DAYDREAM_SCOPE_ASSETS_DIR"] = "/data/assets"
+
+ # Start the scope server in a background thread
+ def start_server():
+ print("Starting Scope server...")
+ try:
+ subprocess.run(
+ ["uv", "run", "daydream-scope", "--no-browser", "--host", "0.0.0.0", "--port", "8000"],
+ check=True,
+ env=scope_env,
+ )
+ except Exception as e:
+ logger.error(f"Failed to start Scope server: {e}")
+ raise
+
+ server_thread = threading.Thread(target=start_server, daemon=True)
+ server_thread.start()
+
+ # Wait for the server to be ready
+ print("Waiting for Scope server to start...")
+ max_wait = 120 # seconds
+ start_time = time.time()
+
+ while time.time() - start_time < max_wait:
+ try:
+ import requests
+
+ response = requests.get("http://localhost:8000/health", timeout=2)
+ if response.status_code == 200:
+ print("✅ Scope server is running on port 8000")
+ break
+ except Exception:
+ pass
+ time.sleep(2)
+ else:
+ logger.warning(
+ f"Scope server health check timed out after {max_wait}s, continuing anyway..."
+ )
+
+ print("Scope container setup complete")
+
+ @fal.endpoint("/ws", is_websocket=True)
+ async def websocket_handler(self, ws: WebSocket) -> None:
+ """
+ Main WebSocket endpoint that handles:
+ 1. WebRTC signaling (offer/answer, ICE candidates)
+ 2. REST API call proxying
+
+ Protocol:
+ - All messages are JSON with a "type" field
+ - WebRTC signaling types: "get_ice_servers", "offer", "icecandidate"
+ - API proxy type: "api" with "method", "path", "body" fields
+
+ This keeps a persistent connection to prevent fal from spawning new runners.
+ """
+ import asyncio
+ import json
+ import logging
+
+ import httpx
+ from starlette.websockets import WebSocketDisconnect, WebSocketState
+
+ logger = logging.getLogger(__name__)
+ SCOPE_BASE_URL = "http://localhost:8000"
+
+ await ws.accept()
+ print("✅ WebSocket connection accepted")
+
+ # Send ready message
+ await ws.send_json({"type": "ready"})
+
+ # Track WebRTC session ID for ICE candidate routing
+ session_id = None
+
+ async def safe_send_json(payload: dict):
+ """Send JSON, handling connection errors gracefully."""
+ try:
+ if (
+ ws.client_state != WebSocketState.CONNECTED
+ or ws.application_state != WebSocketState.CONNECTED
+ ):
+ return
+ await ws.send_json(payload)
+ except (RuntimeError, WebSocketDisconnect):
+ pass
+
+ async def handle_get_ice_servers():
+ """Proxy GET /api/v1/webrtc/ice-servers"""
+ async with httpx.AsyncClient() as client:
+ response = await client.get(
+ f"{SCOPE_BASE_URL}/api/v1/webrtc/ice-servers"
+ )
+ return {
+ "type": "ice_servers",
+ "data": response.json(),
+ "status": response.status_code,
+ }
+
+ async def handle_offer(payload: dict):
+ """Proxy POST /api/v1/webrtc/offer
+
+ The backend's offer endpoint handles pipeline loading:
+ - If pipeline not loaded, it auto-loads from initialParameters
+ - The load is awaited, so it blocks until complete
+ - No pre-loading needed here
+ """
+ nonlocal session_id
+ request_id = payload.get("request_id")
+
+ async with httpx.AsyncClient() as client:
+ # Just proxy the offer - backend handles pipeline loading and waits for it
+ response = await client.post(
+ f"{SCOPE_BASE_URL}/api/v1/webrtc/offer",
+ json={
+ "sdp": payload.get("sdp"),
+ "type": payload.get("sdp_type", "offer"),
+ "initialParameters": payload.get("initialParameters"),
+ },
+ timeout=180.0, # Long timeout - backend may need to load pipeline first
+ )
+
+ if response.status_code == 200:
+ data = response.json()
+ session_id = data.get("sessionId")
+ return {
+ "type": "answer",
+ "request_id": request_id,
+ "sdp": data.get("sdp"),
+ "sdp_type": data.get("type"),
+ "sessionId": session_id,
+ }
+ else:
+ return {
+ "type": "error",
+ "request_id": request_id,
+ "error": f"Offer failed: {response.status_code}",
+ "detail": response.text,
+ }
+
+ async def handle_icecandidate(payload: dict):
+ """Proxy PATCH /api/v1/webrtc/offer/{session_id} for ICE candidates"""
+ nonlocal session_id
+ request_id = payload.get("request_id")
+
+ candidate = payload.get("candidate")
+ target_session = payload.get("sessionId") or session_id
+
+ if not target_session:
+ return {
+ "type": "error",
+ "request_id": request_id,
+ "error": "No session ID available for ICE candidate",
+ }
+
+ if candidate is None:
+ # End of candidates signal
+ return {"type": "icecandidate_ack", "request_id": request_id, "status": "end_of_candidates"}
+
+ async with httpx.AsyncClient() as client:
+ response = await client.patch(
+ f"{SCOPE_BASE_URL}/api/v1/webrtc/offer/{target_session}",
+ json={
+ "candidates": [
+ {
+ "candidate": candidate.get("candidate"),
+ "sdpMid": candidate.get("sdpMid"),
+ "sdpMLineIndex": candidate.get("sdpMLineIndex"),
+ }
+ ]
+ },
+ timeout=10.0,
+ )
+
+ if response.status_code == 204:
+ return {"type": "icecandidate_ack", "request_id": request_id, "status": "ok"}
+ else:
+ return {
+ "type": "error",
+ "request_id": request_id,
+ "error": f"ICE candidate failed: {response.status_code}",
+ "detail": response.text,
+ }
+
+ async def handle_api_request(payload: dict):
+ """
+ Proxy arbitrary API requests to Scope backend.
+
+ Expected payload:
+ {
+ "type": "api",
+ "method": "GET" | "POST" | "PATCH" | "DELETE",
+ "path": "/api/v1/...",
+ "body": {...} # optional, for POST/PATCH
+ "request_id": "..." # optional, for correlating responses
+ }
+
+ Special handling for file uploads:
+ If body contains "_base64_content", it's decoded and sent as binary.
+ """
+ import base64
+
+ method = payload.get("method", "GET").upper()
+ path = payload.get("path", "")
+ body = payload.get("body")
+ request_id = payload.get("request_id")
+
+ async with httpx.AsyncClient() as client:
+ try:
+ # Check if this is a base64-encoded file upload
+ is_binary_upload = (
+ body
+ and isinstance(body, dict)
+ and "_base64_content" in body
+ )
+
+ if method == "GET":
+ response = await client.get(
+ f"{SCOPE_BASE_URL}{path}", timeout=30.0
+ )
+ elif method == "POST":
+ if is_binary_upload:
+ # Decode base64 and send as binary
+ binary_content = base64.b64decode(body["_base64_content"])
+ content_type = body.get(
+ "_content_type", "application/octet-stream"
+ )
+ response = await client.post(
+ f"{SCOPE_BASE_URL}{path}",
+ content=binary_content,
+ headers={"Content-Type": content_type},
+ timeout=60.0, # Longer timeout for uploads
+ )
+ else:
+ response = await client.post(
+ f"{SCOPE_BASE_URL}{path}", json=body, timeout=30.0
+ )
+ elif method == "PATCH":
+ response = await client.patch(
+ f"{SCOPE_BASE_URL}{path}", json=body, timeout=30.0
+ )
+ elif method == "DELETE":
+ response = await client.delete(
+ f"{SCOPE_BASE_URL}{path}", timeout=30.0
+ )
+ else:
+ return {
+ "type": "api_response",
+ "request_id": request_id,
+ "status": 400,
+ "error": f"Unsupported method: {method}",
+ }
+
+ # Try to parse JSON response
+ try:
+ data = response.json()
+ except Exception:
+ data = response.text
+
+ return {
+ "type": "api_response",
+ "request_id": request_id,
+ "status": response.status_code,
+ "data": data,
+ }
+
+ except httpx.TimeoutException:
+ return {
+ "type": "api_response",
+ "request_id": request_id,
+ "status": 504,
+ "error": "Request timeout",
+ }
+ except Exception as e:
+ return {
+ "type": "api_response",
+ "request_id": request_id,
+ "status": 500,
+ "error": str(e),
+ }
+
+ async def handle_message(payload: dict) -> dict | None:
+ """Route message to appropriate handler based on type."""
+ msg_type = payload.get("type")
+ request_id = payload.get("request_id")
+
+ if msg_type == "get_ice_servers":
+ return await handle_get_ice_servers()
+ elif msg_type == "offer":
+ return await handle_offer(payload)
+ elif msg_type == "icecandidate":
+ return await handle_icecandidate(payload)
+ elif msg_type == "api":
+ return await handle_api_request(payload)
+ elif msg_type == "ping":
+ return {"type": "pong", "request_id": request_id}
+ else:
+ return {"type": "error", "request_id": request_id, "error": f"Unknown message type: {msg_type}"}
+
+ # Main message loop
+ try:
+ while True:
+ try:
+ message = await ws.receive_text()
+ except RuntimeError:
+ break
+
+ try:
+ payload = json.loads(message)
+ except json.JSONDecodeError as e:
+ await safe_send_json(
+ {"type": "error", "error": f"Invalid JSON: {e}"}
+ )
+ continue
+
+ # Handle the message
+ response = await handle_message(payload)
+ if response:
+ await safe_send_json(response)
+
+ except WebSocketDisconnect:
+ print("WebSocket disconnected")
+ except Exception as e:
+ logger.error(f"WebSocket error: {e}")
+ await safe_send_json({"type": "error", "error": str(e)})
+ finally:
+ print("WebSocket connection closed")
+
+
+# Deployment:
+# 1. Run: fal run fal_app.py (for local testing)
+# 2. Run: fal deploy fal_app.py (to deploy to fal.ai)
+# 3. fal.ai will provide you with a WebSocket URL
+#
+# Client usage:
+# 1. Connect to wss:///ws
+# 2. Wait for {"type": "ready"}
+# 3. Send {"type": "get_ice_servers"} to get ICE servers
+# 4. Send {"type": "offer", "sdp": "...", "sdp_type": "offer"} for WebRTC offer
+# 5. Receive {"type": "answer", "sdp": "...", "sessionId": "..."}
+# 6. Exchange ICE candidates via {"type": "icecandidate", "candidate": {...}}
+# 7. For API calls: {"type": "api", "method": "GET", "path": "/api/v1/pipeline/status"}
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index 89e3a9efd..6f6857e34 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -89,6 +89,7 @@
"integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==",
"dev": true,
"license": "MIT",
+ "peer": true,
"dependencies": {
"@babel/code-frame": "^7.27.1",
"@babel/generator": "^7.28.3",
@@ -2557,6 +2558,7 @@
"integrity": "sha512-hHkbU/eoO3EG5/MZkuFSKmYqPbSVk5byPFa3e7y/8TybHiLMACgI8seVYlicwk7H5K/rI2px9xrQp/C+AUDTiQ==",
"devOptional": true,
"license": "MIT",
+ "peer": true,
"dependencies": {
"csstype": "^3.0.2"
}
@@ -2567,6 +2569,7 @@
"integrity": "sha512-qXRuZaOsAdXKFyOhRBg6Lqqc0yay13vN7KrIg4L7N4aaHN68ma9OK3NE1BoDFgFOTfM7zg+3/8+2n8rLUH3OKQ==",
"devOptional": true,
"license": "MIT",
+ "peer": true,
"peerDependencies": {
"@types/react": "^19.0.0"
}
@@ -2617,6 +2620,7 @@
"integrity": "sha512-B7RIQiTsCBBmY+yW4+ILd6mF5h1FUwJsVvpqkrgpszYifetQ2Ke+Z4u6aZh0CblkUGIdR59iYVyXqqZGkZ3aBw==",
"dev": true,
"license": "MIT",
+ "peer": true,
"dependencies": {
"@typescript-eslint/scope-manager": "8.43.0",
"@typescript-eslint/types": "8.43.0",
@@ -2869,6 +2873,7 @@
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"dev": true,
"license": "MIT",
+ "peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -3037,6 +3042,7 @@
}
],
"license": "MIT",
+ "peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.8.2",
"caniuse-lite": "^1.0.30001741",
@@ -3319,6 +3325,7 @@
"integrity": "sha512-QePbBFMJFjgmlE+cXAlbHZbHpdFVS2E/6vzCy7aKlebddvl1vadiC4JFV5u/wqTkNUwEV8WrQi257jf5f06hrg==",
"dev": true,
"license": "MIT",
+ "peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -4455,6 +4462,7 @@
}
],
"license": "MIT",
+ "peer": true,
"dependencies": {
"nanoid": "^3.3.11",
"picocolors": "^1.1.1",
@@ -4533,6 +4541,7 @@
"resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz",
"integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==",
"license": "MIT",
+ "peer": true,
"engines": {
"node": ">=0.10.0"
}
@@ -4542,6 +4551,7 @@
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz",
"integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==",
"license": "MIT",
+ "peer": true,
"dependencies": {
"scheduler": "^0.26.0"
},
@@ -4899,6 +4909,7 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
+ "peer": true,
"engines": {
"node": ">=12"
},
@@ -4957,6 +4968,7 @@
"integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
"dev": true,
"license": "Apache-2.0",
+ "peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -5079,6 +5091,7 @@
"integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==",
"dev": true,
"license": "MIT",
+ "peer": true,
"dependencies": {
"esbuild": "^0.27.0",
"fdir": "^6.5.0",
@@ -5172,6 +5185,7 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
+ "peer": true,
"engines": {
"node": ">=12"
},
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index 74fb3fa47..7b3e4529a 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -1,13 +1,21 @@
import { StreamPage } from "./pages/StreamPage";
import { Toaster } from "./components/ui/sonner";
+import { FalProvider } from "./lib/falContext";
import "./index.css";
+// Get fal WebSocket URL and API key from environment variables
+// Set VITE_FAL_WS_URL to enable fal mode, e.g.:
+// VITE_FAL_WS_URL=wss://fal.run/your-username/scope-app/ws
+// VITE_FAL_KEY=your-fal-api-key
+const FAL_WS_URL = import.meta.env.VITE_FAL_WS_URL as string | undefined;
+const FAL_KEY = import.meta.env.VITE_FAL_KEY as string | undefined;
+
function App() {
return (
- <>
+
- >
+
);
}
diff --git a/frontend/src/components/CloudModeToggle.tsx b/frontend/src/components/CloudModeToggle.tsx
new file mode 100644
index 000000000..2fe5b049a
--- /dev/null
+++ b/frontend/src/components/CloudModeToggle.tsx
@@ -0,0 +1,179 @@
+import { useState } from "react";
+import { Toggle } from "./ui/toggle";
+import { Input } from "./ui/input";
+import { LabelWithTooltip } from "./ui/label-with-tooltip";
+import { Loader2, Cloud, Monitor, AlertCircle } from "lucide-react";
+import type { CloudModeState } from "../types";
+
+interface CloudModeToggleProps {
+ cloudMode: CloudModeState;
+ onCloudModeChange: (cloudMode: Partial) => void;
+ disabled?: boolean;
+}
+
+export function CloudModeToggle({
+ cloudMode,
+ onCloudModeChange,
+ disabled = false,
+}: CloudModeToggleProps) {
+ const [isConnecting, setIsConnecting] = useState(false);
+
+ const handleToggle = async (enabled: boolean) => {
+ if (enabled) {
+ // Validate credentials before connecting
+ if (!cloudMode.appId || !cloudMode.apiKey) {
+ onCloudModeChange({
+ status: "error",
+ errorMessage: "Please enter fal App ID and API Key",
+ });
+ return;
+ }
+
+ setIsConnecting(true);
+ onCloudModeChange({ status: "connecting" });
+
+ try {
+ const response = await fetch("/api/v1/fal/connect", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ app_id: cloudMode.appId,
+ api_key: cloudMode.apiKey,
+ }),
+ });
+
+ if (!response.ok) {
+ const error = await response.json();
+ throw new Error(error.detail || "Failed to connect to cloud");
+ }
+
+ onCloudModeChange({
+ enabled: true,
+ status: "connected",
+ errorMessage: undefined,
+ });
+ } catch (error) {
+ onCloudModeChange({
+ enabled: false,
+ status: "error",
+ errorMessage:
+ error instanceof Error ? error.message : "Connection failed",
+ });
+ } finally {
+ setIsConnecting(false);
+ }
+ } else {
+ // Disconnect from cloud
+ setIsConnecting(true);
+ try {
+ await fetch("/api/v1/fal/disconnect", { method: "POST" });
+ onCloudModeChange({
+ enabled: false,
+ status: "disconnected",
+ errorMessage: undefined,
+ });
+ } catch (error) {
+ console.error("Failed to disconnect from cloud:", error);
+ // Still mark as disconnected since we're disabling
+ onCloudModeChange({
+ enabled: false,
+ status: "disconnected",
+ errorMessage: undefined,
+ });
+ } finally {
+ setIsConnecting(false);
+ }
+ }
+ };
+
+ const isLoading = isConnecting || cloudMode.status === "connecting";
+
+ return (
+
+ {/* Cloud Mode Toggle */}
+
+
+
+ {isLoading && }
+
+ {cloudMode.enabled ? (
+ <>
+
+ Cloud
+ >
+ ) : (
+ <>
+
+ Local
+ >
+ )}
+
+
+
+
+ {/* Status indicator */}
+ {cloudMode.status === "connected" && (
+
+ )}
+
+ {/* Error message */}
+ {cloudMode.status === "error" && cloudMode.errorMessage && (
+
+
+
+ {cloudMode.errorMessage}
+
+
+ )}
+
+ {/* Cloud credentials - always show for configuration */}
+
+
+ );
+}
diff --git a/frontend/src/components/SettingsPanel.tsx b/frontend/src/components/SettingsPanel.tsx
index 9af3ed09a..f5b9205bd 100644
--- a/frontend/src/components/SettingsPanel.tsx
+++ b/frontend/src/components/SettingsPanel.tsx
@@ -35,8 +35,10 @@ import type {
InputMode,
PipelineInfo,
VaeType,
+ CloudModeState,
} from "../types";
import { LoRAManager } from "./LoRAManager";
+import { CloudModeToggle } from "./CloudModeToggle";
// Minimum dimension for most pipelines (will be overridden by pipeline-specific minDimension from schema)
const DEFAULT_MIN_DIMENSION = 1;
@@ -101,6 +103,9 @@ interface SettingsPanelProps {
// Postprocessors
postprocessorIds?: string[];
onPostprocessorIdsChange?: (ids: string[]) => void;
+ // Cloud mode settings (server-side fal integration)
+ cloudMode?: CloudModeState;
+ onCloudModeChange?: (cloudMode: Partial) => void;
}
export function SettingsPanel({
@@ -149,6 +154,8 @@ export function SettingsPanel({
onPreprocessorIdsChange,
postprocessorIds = [],
onPostprocessorIdsChange,
+ cloudMode,
+ onCloudModeChange,
}: SettingsPanelProps) {
// Local slider state management hooks
const noiseScaleSlider = useLocalSliderValue(noiseScale, onNoiseScaleChange);
@@ -948,6 +955,15 @@ export function SettingsPanel({
)}
)}
+
+ {/* Cloud GPU Mode (server-side fal.ai integration) */}
+ {cloudMode && onCloudModeChange && (
+
+ )}
);
diff --git a/frontend/src/hooks/useApi.ts b/frontend/src/hooks/useApi.ts
new file mode 100644
index 000000000..d5459982e
--- /dev/null
+++ b/frontend/src/hooks/useApi.ts
@@ -0,0 +1,207 @@
+/**
+ * Unified API hook that automatically routes requests through FalAdapter
+ * when in fal mode, or uses direct HTTP when in local mode.
+ */
+
+import { useCallback } from "react";
+import { useFalContext } from "../lib/falContext";
+import * as api from "../lib/api";
+import type {
+ PipelineStatusResponse,
+ PipelineLoadRequest,
+ PipelineSchemasResponse,
+ HardwareInfoResponse,
+ LoRAFilesResponse,
+ AssetsResponse,
+ AssetFileInfo,
+ WebRTCOfferRequest,
+ WebRTCOfferResponse,
+} from "../lib/api";
+import type { IceServersResponse, ModelStatusResponse } from "../types";
+
+/**
+ * Hook that provides API functions that work in both local and fal modes.
+ *
+ * In fal mode, all requests go through the FalAdapter WebSocket.
+ * In local mode, requests go directly via HTTP fetch.
+ */
+export function useApi() {
+ const { adapter, isFalMode, isReady } = useFalContext();
+
+ // Pipeline APIs
+ const getPipelineStatus = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.getPipelineStatus();
+ }
+ return api.getPipelineStatus();
+ }, [adapter, isFalMode]);
+
+ const loadPipeline = useCallback(
+ async (data: PipelineLoadRequest): Promise<{ message: string }> => {
+ if (isFalMode && adapter) {
+ return adapter.api.loadPipeline(data);
+ }
+ return api.loadPipeline(data);
+ },
+ [adapter, isFalMode]
+ );
+
+ const getPipelineSchemas = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.getPipelineSchemas();
+ }
+ return api.getPipelineSchemas();
+ }, [adapter, isFalMode]);
+
+ // Model APIs
+ const checkModelStatus = useCallback(
+ async (pipelineId: string): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.checkModelStatus(pipelineId);
+ }
+ return api.checkModelStatus(pipelineId);
+ },
+ [adapter, isFalMode]
+ );
+
+ const downloadPipelineModels = useCallback(
+ async (pipelineId: string): Promise<{ message: string }> => {
+ if (isFalMode && adapter) {
+ return adapter.api.downloadPipelineModels(pipelineId);
+ }
+ return api.downloadPipelineModels(pipelineId);
+ },
+ [adapter, isFalMode]
+ );
+
+ // Hardware APIs
+ const getHardwareInfo = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.getHardwareInfo();
+ }
+ return api.getHardwareInfo();
+ }, [adapter, isFalMode]);
+
+ // LoRA APIs
+ const listLoRAFiles = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.listLoRAFiles();
+ }
+ return api.listLoRAFiles();
+ }, [adapter, isFalMode]);
+
+ // Asset APIs
+ const listAssets = useCallback(
+ async (type?: "image" | "video"): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.listAssets(type);
+ }
+ return api.listAssets(type);
+ },
+ [adapter, isFalMode]
+ );
+
+ const uploadAsset = useCallback(
+ async (file: File): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.uploadAsset(file);
+ }
+ return api.uploadAsset(file);
+ },
+ [adapter, isFalMode]
+ );
+
+ // Logs
+ const fetchCurrentLogs = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.fetchCurrentLogs();
+ }
+ return api.fetchCurrentLogs();
+ }, [adapter, isFalMode]);
+
+ // Recording - note: in fal mode, we still use direct HTTP for binary download
+ const downloadRecording = useCallback(
+ async (sessionId: string): Promise => {
+ // Always use direct HTTP for binary downloads
+ // In fal mode, this may need the full URL
+ return api.downloadRecording(sessionId);
+ },
+ []
+ );
+
+ // WebRTC signaling
+ const getIceServers = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.getIceServers();
+ }
+ return api.getIceServers();
+ }, [adapter, isFalMode]);
+
+ const sendWebRTCOffer = useCallback(
+ async (data: WebRTCOfferRequest): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.sendOffer(
+ data.sdp || "",
+ data.type || "offer",
+ data.initialParameters
+ );
+ }
+ return api.sendWebRTCOffer(data);
+ },
+ [adapter, isFalMode]
+ );
+
+ const sendIceCandidates = useCallback(
+ async (
+ sessionId: string,
+ candidates: RTCIceCandidate | RTCIceCandidate[]
+ ): Promise => {
+ if (isFalMode && adapter) {
+ const candidateArray = Array.isArray(candidates) ? candidates : [candidates];
+ for (const candidate of candidateArray) {
+ await adapter.sendIceCandidate(sessionId, candidate);
+ }
+ return;
+ }
+ return api.sendIceCandidates(sessionId, candidates);
+ },
+ [adapter, isFalMode]
+ );
+
+ return {
+ // State
+ isFalMode,
+ isReady,
+
+ // Pipeline
+ getPipelineStatus,
+ loadPipeline,
+ getPipelineSchemas,
+
+ // Models
+ checkModelStatus,
+ downloadPipelineModels,
+
+ // Hardware
+ getHardwareInfo,
+
+ // LoRA
+ listLoRAFiles,
+
+ // Assets
+ listAssets,
+ uploadAsset,
+ getAssetUrl: api.getAssetUrl, // This is just a URL builder, no API call
+
+ // Logs
+ fetchCurrentLogs,
+
+ // Recording
+ downloadRecording,
+
+ // WebRTC signaling
+ getIceServers,
+ sendWebRTCOffer,
+ sendIceCandidates,
+ };
+}
diff --git a/frontend/src/hooks/usePipeline.ts b/frontend/src/hooks/usePipeline.ts
index 30fa31268..9bbf4cb85 100644
--- a/frontend/src/hooks/usePipeline.ts
+++ b/frontend/src/hooks/usePipeline.ts
@@ -1,5 +1,6 @@
import { useState, useEffect, useCallback, useRef } from "react";
-import { loadPipeline, getPipelineStatus } from "../lib/api";
+import { loadPipeline as loadPipelineApi, getPipelineStatus as getPipelineStatusApi } from "../lib/api";
+import { useFalContext } from "../lib/falContext";
import type { PipelineStatusResponse, PipelineLoadParams } from "../lib/api";
import { toast } from "sonner";
@@ -10,6 +11,22 @@ interface UsePipelineOptions {
export function usePipeline(options: UsePipelineOptions = {}) {
const { pollInterval = 2000, maxTimeout = 600000 } = options;
+ const { adapter, isFalMode } = useFalContext();
+
+ // Helper functions that use fal adapter when available
+ const getPipelineStatus = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.getPipelineStatus();
+ }
+ return getPipelineStatusApi();
+ }, [adapter, isFalMode]);
+
+ const loadPipelineRequest = useCallback(async (data: { pipeline_ids: string[]; load_params?: PipelineLoadParams | null }) => {
+ if (isFalMode && adapter) {
+ return adapter.api.loadPipeline(data);
+ }
+ return loadPipelineApi(data);
+ }, [adapter, isFalMode]);
const [status, setStatus] =
useState("not_loaded");
@@ -155,7 +172,7 @@ export function usePipeline(options: UsePipelineOptions = {}) {
shownErrorRef.current = null; // Reset error tracking when starting new load
// Start the load request
- await loadPipeline({
+ await loadPipelineRequest({
pipeline_ids: pipelineIds,
load_params: loadParams,
});
@@ -241,7 +258,7 @@ export function usePipeline(options: UsePipelineOptions = {}) {
setIsLoading(false);
}
},
- [isLoading, maxTimeout, pollInterval, startPolling, stopPolling]
+ [isLoading, maxTimeout, pollInterval, startPolling, stopPolling, getPipelineStatus, loadPipelineRequest]
);
// Load pipeline with proper state management
diff --git a/frontend/src/hooks/usePipelines.ts b/frontend/src/hooks/usePipelines.ts
index d8fb47ac4..21622d4e4 100644
--- a/frontend/src/hooks/usePipelines.ts
+++ b/frontend/src/hooks/usePipelines.ts
@@ -1,8 +1,11 @@
import { useState, useEffect } from "react";
import { getPipelineSchemas } from "../lib/api";
+import { useFalContext } from "../lib/falContext";
import type { InputMode, PipelineInfo } from "../types";
export function usePipelines() {
+ const { adapter, isFalMode, isReady } = useFalContext();
+
const [pipelines, setPipelines] = useState(null);
useEffect(() => {
+ // In fal mode, wait until adapter is ready
+ if (isFalMode && !isReady) {
+ return;
+ }
+
let mounted = true;
async function fetchPipelines() {
try {
setIsLoading(true);
- const schemas = await getPipelineSchemas();
+
+ // Use adapter if in fal mode, otherwise direct API
+ const schemas = isFalMode && adapter
+ ? await adapter.api.getPipelineSchemas()
+ : await getPipelineSchemas();
if (!mounted) return;
@@ -95,7 +107,7 @@ export function usePipelines() {
return () => {
mounted = false;
};
- }, []);
+ }, [adapter, isFalMode, isReady]);
return { pipelines, isLoading, error };
}
diff --git a/frontend/src/hooks/useStreamState.ts b/frontend/src/hooks/useStreamState.ts
index 176fc55b0..69ee5ee6e 100644
--- a/frontend/src/hooks/useStreamState.ts
+++ b/frontend/src/hooks/useStreamState.ts
@@ -6,13 +6,15 @@ import type {
PromptData,
PipelineId,
InputMode,
+ CloudModeState,
} from "../types";
import {
- getHardwareInfo,
- getPipelineSchemas,
+ getHardwareInfo as getHardwareInfoApi,
+ getPipelineSchemas as getPipelineSchemasApi,
type HardwareInfoResponse,
type PipelineSchemasResponse,
} from "../lib/api";
+import { useFalContext } from "../lib/falContext";
// Generic fallback defaults used before schemas are loaded.
// Resolution and denoising steps use conservative values.
@@ -23,6 +25,34 @@ const BASE_FALLBACK = {
seed: 42,
};
+// Local storage key for cloud mode credentials
+const CLOUD_MODE_STORAGE_KEY = "daydream-scope-cloud-mode";
+
+// Default cloud mode state (not enabled, no credentials)
+const DEFAULT_CLOUD_MODE: CloudModeState = {
+ enabled: false,
+ appId: "",
+ apiKey: "",
+ status: "disconnected",
+};
+
+// Load cloud mode credentials from localStorage
+function loadCloudModeCredentials(): Partial {
+ try {
+ const stored = localStorage.getItem(CLOUD_MODE_STORAGE_KEY);
+ if (stored) {
+ const parsed = JSON.parse(stored);
+ return {
+ appId: parsed.appId || "",
+ apiKey: parsed.apiKey || "",
+ };
+ }
+ } catch (e) {
+ console.warn("Failed to load cloud mode credentials from localStorage:", e);
+ }
+ return {};
+}
+
// Get fallback defaults for a pipeline before schemas are loaded
function getFallbackDefaults(mode?: InputMode) {
// Default to text mode if no mode specified (will be corrected when schemas load)
@@ -44,6 +74,23 @@ function getFallbackDefaults(mode?: InputMode) {
}
export function useStreamState() {
+ const { adapter, isFalMode, isReady } = useFalContext();
+
+ // Helper functions that use fal adapter when available
+ const getPipelineSchemas = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.getPipelineSchemas();
+ }
+ return getPipelineSchemasApi();
+ }, [adapter, isFalMode]);
+
+ const getHardwareInfo = useCallback(async (): Promise => {
+ if (isFalMode && adapter) {
+ return adapter.api.getHardwareInfo();
+ }
+ return getHardwareInfoApi();
+ }, [adapter, isFalMode]);
+
const [systemMetrics, setSystemMetrics] = useState({
cpu: 0,
gpu: 0,
@@ -139,6 +186,9 @@ export function useStreamState() {
// Get initial defaults (use fallback since schemas haven't loaded yet)
const initialDefaults = getFallbackDefaults("text");
+ // Load cloud mode credentials from localStorage
+ const savedCloudCredentials = loadCloudModeCredentials();
+
const [settings, setSettings] = useState({
pipelineId: "longlive",
resolution: {
@@ -155,6 +205,10 @@ export function useStreamState() {
paused: false,
loraMergeStrategy: "permanent_merge",
inputMode: initialDefaults.inputMode,
+ cloudMode: {
+ ...DEFAULT_CLOUD_MODE,
+ ...savedCloudCredentials,
+ },
});
const [promptData, setPromptData] = useState({
@@ -169,6 +223,11 @@ export function useStreamState() {
// Fetch pipeline schemas and hardware info on mount
useEffect(() => {
+ // In fal mode, wait until adapter is ready
+ if (isFalMode && !isReady) {
+ return;
+ }
+
const fetchInitialData = async () => {
try {
const [schemasResult, hardwareResult] = await Promise.allSettled([
@@ -218,7 +277,7 @@ export function useStreamState() {
};
fetchInitialData();
- }, []);
+ }, [isFalMode, isReady, getPipelineSchemas, getHardwareInfo]);
// Update inputMode when schemas load or pipeline changes
// This sets the correct default mode for the pipeline
@@ -291,6 +350,27 @@ export function useStreamState() {
// If no threshold is set, VACE remains enabled by default (from schema)
}, [settings.pipelineId, hardwareInfo, pipelineSchemas]);
+ // Persist cloud mode credentials to localStorage when they change
+ // Note: We only persist appId and apiKey, not the enabled state or status
+ useEffect(() => {
+ if (settings.cloudMode) {
+ try {
+ localStorage.setItem(
+ CLOUD_MODE_STORAGE_KEY,
+ JSON.stringify({
+ appId: settings.cloudMode.appId,
+ apiKey: settings.cloudMode.apiKey,
+ })
+ );
+ } catch (e) {
+ console.warn(
+ "Failed to save cloud mode credentials to localStorage:",
+ e
+ );
+ }
+ }
+ }, [settings.cloudMode?.appId, settings.cloudMode?.apiKey]);
+
const updateMetrics = useCallback((newMetrics: Partial) => {
setSystemMetrics(prev => ({ ...prev, ...newMetrics }));
}, []);
diff --git a/frontend/src/hooks/useUnifiedWebRTC.ts b/frontend/src/hooks/useUnifiedWebRTC.ts
new file mode 100644
index 000000000..a74fb9cf6
--- /dev/null
+++ b/frontend/src/hooks/useUnifiedWebRTC.ts
@@ -0,0 +1,440 @@
+/**
+ * Unified WebRTC hook that automatically uses the right implementation
+ * based on whether we're in fal mode or local mode.
+ */
+
+import { useState, useEffect, useRef, useCallback } from "react";
+import { useFalContext } from "../lib/falContext";
+import {
+ sendWebRTCOffer,
+ sendIceCandidates,
+ getIceServers,
+ type PromptItem,
+ type PromptTransition,
+} from "../lib/api";
+import { toast } from "sonner";
+
+interface InitialParameters {
+ prompts?: string[] | PromptItem[];
+ prompt_interpolation_method?: "linear" | "slerp";
+ transition?: PromptTransition;
+ denoising_step_list?: number[];
+ noise_scale?: number;
+ noise_controller?: boolean;
+ manage_cache?: boolean;
+ kv_cache_attention_bias?: number;
+ vace_ref_images?: string[];
+ vace_context_scale?: number;
+ pipeline_ids?: string[];
+ images?: string[];
+ first_frame_image?: string;
+ last_frame_image?: string;
+}
+
+interface UseUnifiedWebRTCOptions {
+ /** Callback function called when the stream stops on the backend */
+ onStreamStop?: () => void;
+}
+
+/**
+ * Unified WebRTC hook that works in both local and fal modes.
+ *
+ * In local mode, uses direct HTTP for signaling.
+ * In fal mode, uses the FalAdapter WebSocket for signaling.
+ */
+export function useUnifiedWebRTC(options?: UseUnifiedWebRTCOptions) {
+ const { adapter, isFalMode } = useFalContext();
+
+ const [remoteStream, setRemoteStream] = useState(null);
+ const [connectionState, setConnectionState] =
+ useState("new");
+ const [isConnecting, setIsConnecting] = useState(false);
+ const [isStreaming, setIsStreaming] = useState(false);
+
+ const peerConnectionRef = useRef(null);
+ const dataChannelRef = useRef(null);
+ const currentStreamRef = useRef(null);
+ const sessionIdRef = useRef(null);
+ const queuedCandidatesRef = useRef([]);
+
+ // Helper to get ICE servers
+ const fetchIceServers = useCallback(async (): Promise => {
+ try {
+ console.log("[UnifiedWebRTC] Fetching ICE servers...");
+ let iceServersResponse;
+
+ if (isFalMode && adapter) {
+ iceServersResponse = await adapter.getIceServers();
+ } else {
+ iceServersResponse = await getIceServers();
+ }
+
+ console.log(
+ `[UnifiedWebRTC] Using ${iceServersResponse.iceServers.length} ICE servers`
+ );
+ return { iceServers: iceServersResponse.iceServers };
+ } catch (error) {
+ console.warn(
+ "[UnifiedWebRTC] Failed to fetch ICE servers, using default STUN:",
+ error
+ );
+ return { iceServers: [{ urls: "stun:stun.l.google.com:19302" }] };
+ }
+ }, [adapter, isFalMode]);
+
+ // Helper to send SDP offer
+ const sendOffer = useCallback(
+ async (
+ sdp: string,
+ type: string,
+ initialParameters?: InitialParameters
+ ) => {
+ if (isFalMode && adapter) {
+ return adapter.sendOffer(sdp, type, initialParameters);
+ }
+ return sendWebRTCOffer({
+ sdp,
+ type,
+ initialParameters,
+ });
+ },
+ [adapter, isFalMode]
+ );
+
+ // Helper to send ICE candidate
+ const sendIceCandidate = useCallback(
+ async (sessionId: string, candidate: RTCIceCandidate) => {
+ if (isFalMode && adapter) {
+ await adapter.sendIceCandidate(sessionId, candidate);
+ } else {
+ await sendIceCandidates(sessionId, candidate);
+ }
+ },
+ [adapter, isFalMode]
+ );
+
+ const startStream = useCallback(
+ async (initialParameters?: InitialParameters, stream?: MediaStream) => {
+ if (isConnecting || peerConnectionRef.current) return;
+
+ setIsConnecting(true);
+
+ try {
+ currentStreamRef.current = stream || null;
+
+ // Fetch ICE servers
+ const config = await fetchIceServers();
+
+ const pc = new RTCPeerConnection(config);
+ peerConnectionRef.current = pc;
+
+ // Create data channel for parameter updates
+ const dataChannel = pc.createDataChannel("parameters", {
+ ordered: true,
+ });
+ dataChannelRef.current = dataChannel;
+
+ dataChannel.onopen = () => {
+ console.log("[UnifiedWebRTC] Data channel opened");
+ };
+
+ dataChannel.onmessage = (event) => {
+ console.log("[UnifiedWebRTC] Data channel message:", event.data);
+
+ try {
+ const data = JSON.parse(event.data);
+
+ // Handle stream stop notification from backend
+ if (data.type === "stream_stopped") {
+ console.log("[UnifiedWebRTC] Stream stopped by backend");
+ setIsStreaming(false);
+ setIsConnecting(false);
+ setRemoteStream(null);
+
+ if (data.error_message) {
+ toast.error("Stream Error", {
+ description: data.error_message,
+ duration: 5000,
+ });
+ }
+
+ if (peerConnectionRef.current) {
+ peerConnectionRef.current.close();
+ peerConnectionRef.current = null;
+ }
+
+ options?.onStreamStop?.();
+ }
+ } catch (error) {
+ console.error(
+ "[UnifiedWebRTC] Failed to parse data channel message:",
+ error
+ );
+ }
+ };
+
+ dataChannel.onerror = (error) => {
+ console.error("[UnifiedWebRTC] Data channel error:", error);
+ };
+
+ // Add video track for sending to server
+ let transceiver: RTCRtpTransceiver | undefined;
+ if (stream) {
+ stream.getTracks().forEach((track) => {
+ if (track.kind === "video") {
+ console.log("[UnifiedWebRTC] Adding video track for sending");
+ const sender = pc.addTrack(track, stream);
+ transceiver = pc.getTransceivers().find((t) => t.sender === sender);
+ }
+ });
+ } else {
+ console.log(
+ "[UnifiedWebRTC] No video stream - adding transceiver for no-input pipeline"
+ );
+ transceiver = pc.addTransceiver("video");
+ }
+
+ // Force VP8-only for aiortc compatibility
+ if (transceiver) {
+ const codecs = RTCRtpReceiver.getCapabilities("video")?.codecs || [];
+ const vp8Codecs = codecs.filter(
+ (c) => c.mimeType.toLowerCase() === "video/vp8"
+ );
+ if (vp8Codecs.length > 0) {
+ transceiver.setCodecPreferences(vp8Codecs);
+ console.log("[UnifiedWebRTC] Forced VP8-only codec");
+ }
+ }
+
+ // Event handlers
+ pc.ontrack = (evt: RTCTrackEvent) => {
+ if (evt.streams && evt.streams[0]) {
+ console.log("[UnifiedWebRTC] Setting remote stream");
+ setRemoteStream(evt.streams[0]);
+ }
+ };
+
+ pc.onconnectionstatechange = () => {
+ console.log("[UnifiedWebRTC] Connection state:", pc.connectionState);
+ setConnectionState(pc.connectionState);
+
+ if (pc.connectionState === "connected") {
+ setIsConnecting(false);
+ setIsStreaming(true);
+
+ // Log negotiated codec
+ const senders = pc.getSenders();
+ const videoSender = senders.find((s) => s.track?.kind === "video");
+ if (videoSender) {
+ const params = videoSender.getParameters();
+ const codec = params.codecs?.[0];
+ if (codec) {
+ console.log(`[UnifiedWebRTC] Negotiated codec: ${codec.mimeType}`);
+ }
+ }
+ } else if (
+ pc.connectionState === "disconnected" ||
+ pc.connectionState === "failed"
+ ) {
+ setIsConnecting(false);
+ setIsStreaming(false);
+ }
+ };
+
+ pc.oniceconnectionstatechange = () => {
+ console.log("[UnifiedWebRTC] ICE state:", pc.iceConnectionState);
+ };
+
+ pc.onicecandidate = async ({ candidate }: RTCPeerConnectionIceEvent) => {
+ if (candidate) {
+ console.log("[UnifiedWebRTC] ICE candidate generated");
+
+ if (sessionIdRef.current) {
+ try {
+ await sendIceCandidate(sessionIdRef.current, candidate);
+ console.log("[UnifiedWebRTC] Sent ICE candidate");
+ } catch (error) {
+ console.error("[UnifiedWebRTC] Failed to send ICE candidate:", error);
+ }
+ } else {
+ console.log("[UnifiedWebRTC] Queuing ICE candidate (no session ID yet)");
+ queuedCandidatesRef.current.push(candidate);
+ }
+ } else {
+ console.log("[UnifiedWebRTC] ICE gathering complete");
+ }
+ };
+
+ // Create and send offer
+ const offer = await pc.createOffer();
+ await pc.setLocalDescription(offer);
+
+ console.log("[UnifiedWebRTC] Sending offer");
+ try {
+ const answer = await sendOffer(
+ pc.localDescription!.sdp,
+ pc.localDescription!.type,
+ initialParameters
+ );
+
+ console.log("[UnifiedWebRTC] Received answer, sessionId:", answer.sessionId);
+ sessionIdRef.current = answer.sessionId;
+
+ // Flush queued ICE candidates
+ if (queuedCandidatesRef.current.length > 0) {
+ console.log(
+ `[UnifiedWebRTC] Flushing ${queuedCandidatesRef.current.length} queued candidates`
+ );
+ for (const candidate of queuedCandidatesRef.current) {
+ try {
+ await sendIceCandidate(sessionIdRef.current, candidate);
+ } catch (error) {
+ console.error(
+ "[UnifiedWebRTC] Failed to send queued candidate:",
+ error
+ );
+ }
+ }
+ queuedCandidatesRef.current = [];
+ }
+
+ await pc.setRemoteDescription({
+ sdp: answer.sdp,
+ type: answer.type as RTCSdpType,
+ });
+ } catch (error) {
+ console.error("[UnifiedWebRTC] Offer/answer exchange failed:", error);
+ setIsConnecting(false);
+ }
+ } catch (error) {
+ console.error("[UnifiedWebRTC] Failed to start stream:", error);
+ setIsConnecting(false);
+ }
+ },
+ [
+ isConnecting,
+ options,
+ fetchIceServers,
+ sendOffer,
+ sendIceCandidate,
+ ]
+ );
+
+ const updateVideoTrack = useCallback(
+ async (newStream: MediaStream) => {
+ if (peerConnectionRef.current && isStreaming) {
+ try {
+ const videoTrack = newStream.getVideoTracks()[0];
+ if (!videoTrack) {
+ console.error("[UnifiedWebRTC] No video track in new stream");
+ return false;
+ }
+
+ const sender = peerConnectionRef.current
+ .getSenders()
+ .find((s) => s.track?.kind === "video");
+
+ if (sender) {
+ console.log("[UnifiedWebRTC] Replacing video track");
+ await sender.replaceTrack(videoTrack);
+ currentStreamRef.current = newStream;
+ return true;
+ } else {
+ console.error("[UnifiedWebRTC] No video sender found");
+ return false;
+ }
+ } catch (error) {
+ console.error("[UnifiedWebRTC] Failed to replace track:", error);
+ return false;
+ }
+ }
+ return false;
+ },
+ [isStreaming]
+ );
+
+ const sendParameterUpdate = useCallback(
+ (params: {
+ prompts?: string[] | PromptItem[];
+ prompt_interpolation_method?: "linear" | "slerp";
+ transition?: PromptTransition;
+ denoising_step_list?: number[];
+ noise_scale?: number;
+ noise_controller?: boolean;
+ manage_cache?: boolean;
+ reset_cache?: boolean;
+ kv_cache_attention_bias?: number;
+ paused?: boolean;
+ spout_sender?: { enabled: boolean; name: string };
+ spout_receiver?: { enabled: boolean; name: string };
+ vace_ref_images?: string[];
+ vace_use_input_video?: boolean;
+ vace_context_scale?: number;
+ ctrl_input?: { button: string[]; mouse: [number, number] };
+ images?: string[];
+ first_frame_image?: string;
+ last_frame_image?: string;
+ }) => {
+ if (
+ dataChannelRef.current &&
+ dataChannelRef.current.readyState === "open"
+ ) {
+ try {
+ const filteredParams: Record = {};
+ for (const [key, value] of Object.entries(params)) {
+ if (value !== undefined && value !== null) {
+ filteredParams[key] = value;
+ }
+ }
+
+ const message = JSON.stringify(filteredParams);
+ dataChannelRef.current.send(message);
+ console.log("[UnifiedWebRTC] Sent parameter update:", filteredParams);
+ } catch (error) {
+ console.error("[UnifiedWebRTC] Failed to send parameter update:", error);
+ }
+ } else {
+ console.warn("[UnifiedWebRTC] Data channel not available");
+ }
+ },
+ []
+ );
+
+ const stopStream = useCallback(() => {
+ if (peerConnectionRef.current) {
+ peerConnectionRef.current.close();
+ peerConnectionRef.current = null;
+ }
+
+ dataChannelRef.current = null;
+ currentStreamRef.current = null;
+ sessionIdRef.current = null;
+ queuedCandidatesRef.current = [];
+
+ setRemoteStream(null);
+ setConnectionState("new");
+ setIsStreaming(false);
+ }, []);
+
+ // Cleanup on unmount
+ useEffect(() => {
+ return () => {
+ if (peerConnectionRef.current) {
+ peerConnectionRef.current.close();
+ }
+ };
+ }, []);
+
+ return {
+ remoteStream,
+ connectionState,
+ isConnecting,
+ isStreaming,
+ peerConnectionRef,
+ sessionId: sessionIdRef.current,
+ startStream,
+ stopStream,
+ updateVideoTrack,
+ sendParameterUpdate,
+ };
+}
diff --git a/frontend/src/hooks/useWebRTCFal.ts b/frontend/src/hooks/useWebRTCFal.ts
new file mode 100644
index 000000000..bee90afd4
--- /dev/null
+++ b/frontend/src/hooks/useWebRTCFal.ts
@@ -0,0 +1,385 @@
+/**
+ * WebRTC hook for fal.ai deployment
+ *
+ * This is a drop-in replacement for useWebRTC that routes all signaling
+ * through the FalAdapter WebSocket connection instead of direct HTTP calls.
+ *
+ * Usage:
+ * // In your app initialization
+ * import { initFalAdapter } from "../lib/falAdapter";
+ * const adapter = initFalAdapter("wss://your-fal-endpoint/ws");
+ * await adapter.connect();
+ *
+ * // In your component (same interface as useWebRTC)
+ * const { startStream, stopStream, ... } = useWebRTCFal({ adapter });
+ */
+
+import { useState, useEffect, useRef, useCallback } from "react";
+import { toast } from "sonner";
+import type { FalAdapter } from "../lib/falAdapter";
+import type { PromptItem, PromptTransition } from "../lib/api";
+
+interface InitialParameters {
+ prompts?: string[] | PromptItem[];
+ prompt_interpolation_method?: "linear" | "slerp";
+ transition?: PromptTransition;
+ denoising_step_list?: number[];
+ noise_scale?: number;
+ noise_controller?: boolean;
+ manage_cache?: boolean;
+ kv_cache_attention_bias?: number;
+ vace_ref_images?: string[];
+ vace_context_scale?: number;
+ pipeline_ids?: string[];
+ images?: string[];
+ first_frame_image?: string;
+ last_frame_image?: string;
+}
+
+interface UseWebRTCFalOptions {
+ /** The FalAdapter instance to use for signaling */
+ adapter: FalAdapter;
+ /** Callback function called when the stream stops on the backend */
+ onStreamStop?: () => void;
+}
+
+/**
+ * Hook for managing WebRTC connections via fal.ai WebSocket signaling.
+ *
+ * This provides the same interface as useWebRTC but routes signaling
+ * through the FalAdapter WebSocket connection.
+ */
+export function useWebRTCFal(options: UseWebRTCFalOptions) {
+ const { adapter, onStreamStop } = options;
+
+ const [remoteStream, setRemoteStream] = useState(null);
+ const [connectionState, setConnectionState] =
+ useState("new");
+ const [isConnecting, setIsConnecting] = useState(false);
+ const [isStreaming, setIsStreaming] = useState(false);
+
+ const peerConnectionRef = useRef(null);
+ const dataChannelRef = useRef(null);
+ const currentStreamRef = useRef(null);
+ const sessionIdRef = useRef(null);
+ const queuedCandidatesRef = useRef([]);
+
+ const startStream = useCallback(
+ async (initialParameters?: InitialParameters, stream?: MediaStream) => {
+ if (isConnecting || peerConnectionRef.current) return;
+
+ setIsConnecting(true);
+
+ try {
+ currentStreamRef.current = stream || null;
+
+ // Fetch ICE servers via FalAdapter
+ console.log("[WebRTCFal] Fetching ICE servers via FalAdapter...");
+ let config: RTCConfiguration;
+ try {
+ const iceServersResponse = await adapter.getIceServers();
+ config = {
+ iceServers: iceServersResponse.iceServers,
+ };
+ console.log(
+ `[WebRTCFal] Using ${iceServersResponse.iceServers.length} ICE servers`
+ );
+ } catch (error) {
+ console.warn(
+ "[WebRTCFal] Failed to fetch ICE servers, using default STUN:",
+ error
+ );
+ config = {
+ iceServers: [{ urls: "stun:stun.l.google.com:19302" }],
+ };
+ }
+
+ const pc = new RTCPeerConnection(config);
+ peerConnectionRef.current = pc;
+
+ // Create data channel for parameter updates
+ const dataChannel = pc.createDataChannel("parameters", {
+ ordered: true,
+ });
+ dataChannelRef.current = dataChannel;
+
+ dataChannel.onopen = () => {
+ console.log("[WebRTCFal] Data channel opened");
+ };
+
+ dataChannel.onmessage = (event) => {
+ console.log("[WebRTCFal] Data channel message:", event.data);
+
+ try {
+ const data = JSON.parse(event.data);
+
+ // Handle stream stop notification from backend
+ if (data.type === "stream_stopped") {
+ console.log("[WebRTCFal] Stream stopped by backend");
+ setIsStreaming(false);
+ setIsConnecting(false);
+ setRemoteStream(null);
+
+ if (data.error_message) {
+ toast.error("Stream Error", {
+ description: data.error_message,
+ duration: 5000,
+ });
+ }
+
+ if (peerConnectionRef.current) {
+ peerConnectionRef.current.close();
+ peerConnectionRef.current = null;
+ }
+
+ onStreamStop?.();
+ }
+ } catch (error) {
+ console.error("[WebRTCFal] Failed to parse data channel message:", error);
+ }
+ };
+
+ dataChannel.onerror = (error) => {
+ console.error("[WebRTCFal] Data channel error:", error);
+ };
+
+ // Add video track for sending to server
+ let transceiver: RTCRtpTransceiver | undefined;
+ if (stream) {
+ stream.getTracks().forEach((track) => {
+ if (track.kind === "video") {
+ console.log("[WebRTCFal] Adding video track for sending");
+ const sender = pc.addTrack(track, stream);
+ transceiver = pc.getTransceivers().find((t) => t.sender === sender);
+ }
+ });
+ } else {
+ console.log("[WebRTCFal] No video stream - adding transceiver for no-input pipeline");
+ transceiver = pc.addTransceiver("video");
+ }
+
+ // Force VP8-only for aiortc compatibility
+ if (transceiver) {
+ const codecs = RTCRtpReceiver.getCapabilities("video")?.codecs || [];
+ const vp8Codecs = codecs.filter(
+ (c) => c.mimeType.toLowerCase() === "video/vp8"
+ );
+ if (vp8Codecs.length > 0) {
+ transceiver.setCodecPreferences(vp8Codecs);
+ console.log("[WebRTCFal] Forced VP8-only codec");
+ }
+ }
+
+ // Event handlers
+ pc.ontrack = (evt: RTCTrackEvent) => {
+ if (evt.streams && evt.streams[0]) {
+ console.log("[WebRTCFal] Setting remote stream");
+ setRemoteStream(evt.streams[0]);
+ }
+ };
+
+ pc.onconnectionstatechange = () => {
+ console.log("[WebRTCFal] Connection state:", pc.connectionState);
+ setConnectionState(pc.connectionState);
+
+ if (pc.connectionState === "connected") {
+ setIsConnecting(false);
+ setIsStreaming(true);
+ } else if (
+ pc.connectionState === "disconnected" ||
+ pc.connectionState === "failed"
+ ) {
+ setIsConnecting(false);
+ setIsStreaming(false);
+ }
+ };
+
+ pc.oniceconnectionstatechange = () => {
+ console.log("[WebRTCFal] ICE state:", pc.iceConnectionState);
+ };
+
+ pc.onicecandidate = async ({ candidate }: RTCPeerConnectionIceEvent) => {
+ if (candidate) {
+ console.log("[WebRTCFal] ICE candidate generated");
+
+ if (sessionIdRef.current) {
+ try {
+ await adapter.sendIceCandidate(sessionIdRef.current, candidate);
+ console.log("[WebRTCFal] Sent ICE candidate via FalAdapter");
+ } catch (error) {
+ console.error("[WebRTCFal] Failed to send ICE candidate:", error);
+ }
+ } else {
+ console.log("[WebRTCFal] Queuing ICE candidate (no session ID yet)");
+ queuedCandidatesRef.current.push(candidate);
+ }
+ } else {
+ console.log("[WebRTCFal] ICE gathering complete");
+ }
+ };
+
+ // Create and send offer via FalAdapter
+ const offer = await pc.createOffer();
+ await pc.setLocalDescription(offer);
+
+ console.log("[WebRTCFal] Sending offer via FalAdapter");
+ try {
+ const answer = await adapter.sendOffer(
+ pc.localDescription!.sdp,
+ pc.localDescription!.type,
+ initialParameters
+ );
+
+ console.log("[WebRTCFal] Received answer, sessionId:", answer.sessionId);
+ sessionIdRef.current = answer.sessionId;
+
+ // Flush queued ICE candidates
+ if (queuedCandidatesRef.current.length > 0) {
+ console.log(
+ `[WebRTCFal] Flushing ${queuedCandidatesRef.current.length} queued candidates`
+ );
+ try {
+ await adapter.sendIceCandidates(
+ sessionIdRef.current,
+ queuedCandidatesRef.current
+ );
+ } catch (error) {
+ console.error("[WebRTCFal] Failed to send queued candidates:", error);
+ }
+ queuedCandidatesRef.current = [];
+ }
+
+ await pc.setRemoteDescription({
+ sdp: answer.sdp,
+ type: answer.type as RTCSdpType,
+ });
+ } catch (error) {
+ console.error("[WebRTCFal] Offer/answer exchange failed:", error);
+ setIsConnecting(false);
+ }
+ } catch (error) {
+ console.error("[WebRTCFal] Failed to start stream:", error);
+ setIsConnecting(false);
+ }
+ },
+ [adapter, isConnecting, onStreamStop]
+ );
+
+ const updateVideoTrack = useCallback(
+ async (newStream: MediaStream) => {
+ if (peerConnectionRef.current && isStreaming) {
+ try {
+ const videoTrack = newStream.getVideoTracks()[0];
+ if (!videoTrack) {
+ console.error("[WebRTCFal] No video track in new stream");
+ return false;
+ }
+
+ const sender = peerConnectionRef.current
+ .getSenders()
+ .find((s) => s.track?.kind === "video");
+
+ if (sender) {
+ console.log("[WebRTCFal] Replacing video track");
+ await sender.replaceTrack(videoTrack);
+ currentStreamRef.current = newStream;
+ return true;
+ } else {
+ console.error("[WebRTCFal] No video sender found");
+ return false;
+ }
+ } catch (error) {
+ console.error("[WebRTCFal] Failed to replace track:", error);
+ return false;
+ }
+ }
+ return false;
+ },
+ [isStreaming]
+ );
+
+ const sendParameterUpdate = useCallback(
+ (params: {
+ prompts?: string[] | PromptItem[];
+ prompt_interpolation_method?: "linear" | "slerp";
+ transition?: PromptTransition;
+ denoising_step_list?: number[];
+ noise_scale?: number;
+ noise_controller?: boolean;
+ manage_cache?: boolean;
+ reset_cache?: boolean;
+ kv_cache_attention_bias?: number;
+ paused?: boolean;
+ spout_sender?: { enabled: boolean; name: string };
+ spout_receiver?: { enabled: boolean; name: string };
+ vace_ref_images?: string[];
+ vace_use_input_video?: boolean;
+ vace_context_scale?: number;
+ ctrl_input?: { button: string[]; mouse: [number, number] };
+ images?: string[];
+ first_frame_image?: string;
+ last_frame_image?: string;
+ }) => {
+ if (
+ dataChannelRef.current &&
+ dataChannelRef.current.readyState === "open"
+ ) {
+ try {
+ const filteredParams: Record = {};
+ for (const [key, value] of Object.entries(params)) {
+ if (value !== undefined && value !== null) {
+ filteredParams[key] = value;
+ }
+ }
+
+ const message = JSON.stringify(filteredParams);
+ dataChannelRef.current.send(message);
+ console.log("[WebRTCFal] Sent parameter update:", filteredParams);
+ } catch (error) {
+ console.error("[WebRTCFal] Failed to send parameter update:", error);
+ }
+ } else {
+ console.warn("[WebRTCFal] Data channel not available");
+ }
+ },
+ []
+ );
+
+ const stopStream = useCallback(() => {
+ if (peerConnectionRef.current) {
+ peerConnectionRef.current.close();
+ peerConnectionRef.current = null;
+ }
+
+ dataChannelRef.current = null;
+ currentStreamRef.current = null;
+ sessionIdRef.current = null;
+ queuedCandidatesRef.current = [];
+
+ setRemoteStream(null);
+ setConnectionState("new");
+ setIsStreaming(false);
+ }, []);
+
+ // Cleanup on unmount
+ useEffect(() => {
+ return () => {
+ if (peerConnectionRef.current) {
+ peerConnectionRef.current.close();
+ }
+ };
+ }, []);
+
+ return {
+ remoteStream,
+ connectionState,
+ isConnecting,
+ isStreaming,
+ peerConnectionRef,
+ sessionId: sessionIdRef.current,
+ startStream,
+ stopStream,
+ updateVideoTrack,
+ sendParameterUpdate,
+ };
+}
diff --git a/frontend/src/lib/falAdapter.ts b/frontend/src/lib/falAdapter.ts
new file mode 100644
index 000000000..41d309351
--- /dev/null
+++ b/frontend/src/lib/falAdapter.ts
@@ -0,0 +1,485 @@
+/**
+ * fal.ai WebSocket Adapter for Scope
+ *
+ * This adapter routes all API calls and WebRTC signaling through a single
+ * WebSocket connection to the fal.ai endpoint, preventing fal from spawning
+ * new runner instances for each request.
+ *
+ * Usage:
+ * const adapter = new FalAdapter("wss://your-fal-endpoint/ws", "your-api-key");
+ * await adapter.connect();
+ *
+ * // Use like regular API
+ * const status = await adapter.api.getPipelineStatus();
+ *
+ * // WebRTC signaling
+ * const iceServers = await adapter.getIceServers();
+ * const answer = await adapter.sendOffer(sdp, type, initialParams);
+ * await adapter.sendIceCandidate(sessionId, candidate);
+ *
+ * Authentication:
+ * The API key is passed as a query parameter (fal_jwt_token) since
+ * browser WebSocket API doesn't support custom headers.
+ */
+
+import type {
+ IceServersResponse,
+ ModelStatusResponse,
+} from "../types";
+import type {
+ WebRTCOfferRequest,
+ WebRTCOfferResponse,
+ PipelineStatusResponse,
+ PipelineLoadRequest,
+ PipelineSchemasResponse,
+ HardwareInfoResponse,
+ LoRAFilesResponse,
+ AssetsResponse,
+ AssetFileInfo,
+} from "./api";
+
+type MessageHandler = (response: ApiResponse) => void;
+
+interface ApiResponse {
+ type: string;
+ request_id?: string;
+ status?: number;
+ data?: unknown;
+ error?: string;
+ // WebRTC specific
+ sdp?: string;
+ sdp_type?: string;
+ sessionId?: string;
+ candidate?: RTCIceCandidateInit | null;
+}
+
+interface PendingRequest {
+ resolve: (value: unknown) => void;
+ reject: (error: Error) => void;
+ timeout: ReturnType;
+}
+
+export class FalAdapter {
+ private ws: WebSocket | null = null;
+ private wsUrl: string;
+ private apiKey: string | null = null;
+ private pendingRequests: Map = new Map();
+ private requestCounter = 0;
+ private isReady = false;
+ private readyPromise: Promise | null = null;
+ private readyResolve: (() => void) | null = null;
+ private reconnectAttempts = 0;
+ private maxReconnectAttempts = 5;
+ private messageHandlers: Set = new Set();
+
+ // Current WebRTC session ID (set after offer/answer exchange)
+ private currentSessionId: string | null = null;
+
+ /**
+ * Create a FalAdapter instance.
+ * @param wsUrl - WebSocket URL for the fal.ai endpoint
+ * @param apiKey - Optional fal.ai API key for authentication
+ */
+ constructor(wsUrl: string, apiKey?: string) {
+ this.wsUrl = wsUrl;
+ this.apiKey = apiKey || null;
+ }
+
+ /**
+ * Connect to the fal WebSocket endpoint
+ */
+ async connect(): Promise {
+ if (this.ws?.readyState === WebSocket.OPEN) {
+ return;
+ }
+
+ this.readyPromise = new Promise((resolve) => {
+ this.readyResolve = resolve;
+ });
+
+ return new Promise((resolve, reject) => {
+ try {
+ // Build URL with auth token as query parameter if provided
+ // (WebSocket API doesn't support custom headers in browsers)
+ let url = this.wsUrl;
+ if (this.apiKey) {
+ const separator = url.includes("?") ? "&" : "?";
+ url = `${url}${separator}fal_jwt_token=${encodeURIComponent(this.apiKey)}`;
+ }
+
+ this.ws = new WebSocket(url);
+
+ this.ws.onopen = () => {
+ console.log("[FalAdapter] WebSocket connected");
+ this.reconnectAttempts = 0;
+ };
+
+ this.ws.onmessage = (event) => {
+ try {
+ const message = JSON.parse(event.data) as ApiResponse;
+ this.handleMessage(message);
+
+ // Check for ready message
+ if (message.type === "ready") {
+ this.isReady = true;
+ this.readyResolve?.();
+ resolve();
+ }
+ } catch (error) {
+ console.error("[FalAdapter] Failed to parse message:", error);
+ }
+ };
+
+ this.ws.onerror = (error) => {
+ console.error("[FalAdapter] WebSocket error:", error);
+ reject(error);
+ };
+
+ this.ws.onclose = (event) => {
+ console.log("[FalAdapter] WebSocket closed:", event.code, event.reason);
+ this.isReady = false;
+ this.ws = null;
+
+ // Reject all pending requests
+ for (const [requestId, pending] of this.pendingRequests) {
+ clearTimeout(pending.timeout);
+ pending.reject(new Error("WebSocket connection closed"));
+ this.pendingRequests.delete(requestId);
+ }
+
+ // Attempt reconnect if not intentional close
+ if (event.code !== 1000 && this.reconnectAttempts < this.maxReconnectAttempts) {
+ this.reconnectAttempts++;
+ const delay = Math.min(1000 * Math.pow(2, this.reconnectAttempts), 30000);
+ console.log(`[FalAdapter] Reconnecting in ${delay}ms...`);
+ setTimeout(() => this.connect(), delay);
+ }
+ };
+ } catch (error) {
+ reject(error);
+ }
+ });
+ }
+
+ /**
+ * Wait for the adapter to be ready
+ */
+ async waitForReady(): Promise {
+ if (this.isReady) return;
+ if (this.readyPromise) {
+ await this.readyPromise;
+ }
+ }
+
+ /**
+ * Disconnect from the WebSocket
+ */
+ disconnect(): void {
+ if (this.ws) {
+ this.ws.close(1000, "Client disconnect");
+ this.ws = null;
+ }
+ this.isReady = false;
+ }
+
+ /**
+ * Add a message handler for handling server-pushed messages
+ */
+ onMessage(handler: MessageHandler): () => void {
+ this.messageHandlers.add(handler);
+ return () => this.messageHandlers.delete(handler);
+ }
+
+ private handleMessage(message: ApiResponse): void {
+ // Handle response to pending request
+ if (message.request_id && this.pendingRequests.has(message.request_id)) {
+ const pending = this.pendingRequests.get(message.request_id)!;
+ clearTimeout(pending.timeout);
+ this.pendingRequests.delete(message.request_id);
+
+ if (message.type === "error" || (message.status && message.status >= 400)) {
+ pending.reject(new Error(message.error || `Request failed with status ${message.status}`));
+ } else {
+ pending.resolve(message);
+ }
+ return;
+ }
+
+ // Handle WebRTC signaling responses (no request_id)
+ if (message.type === "answer" || message.type === "ice_servers" || message.type === "icecandidate_ack") {
+ // These are handled by specific pending requests
+ return;
+ }
+
+ // Notify all message handlers for server-pushed messages
+ for (const handler of this.messageHandlers) {
+ try {
+ handler(message);
+ } catch (error) {
+ console.error("[FalAdapter] Message handler error:", error);
+ }
+ }
+ }
+
+ private generateRequestId(): string {
+ return `req_${++this.requestCounter}_${Date.now()}`;
+ }
+
+ private async sendAndWait(
+ message: Record,
+ timeoutMs = 30000
+ ): Promise {
+ await this.waitForReady();
+
+ if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {
+ throw new Error("WebSocket not connected");
+ }
+
+ const requestId = this.generateRequestId();
+ const messageWithId = { ...message, request_id: requestId };
+
+ return new Promise((resolve, reject) => {
+ const timeout = setTimeout(() => {
+ this.pendingRequests.delete(requestId);
+ reject(new Error(`Request timeout after ${timeoutMs}ms`));
+ }, timeoutMs);
+
+ this.pendingRequests.set(requestId, {
+ resolve: resolve as (value: unknown) => void,
+ reject,
+ timeout,
+ });
+
+ this.ws!.send(JSON.stringify(messageWithId));
+ });
+ }
+
+ // ==================== WebRTC Signaling ====================
+
+ /**
+ * Get ICE servers from the backend
+ */
+ async getIceServers(): Promise {
+ const response = await this.sendAndWait({
+ type: "get_ice_servers",
+ });
+ return response.data as IceServersResponse;
+ }
+
+ /**
+ * Send WebRTC offer and get answer
+ */
+ async sendOffer(
+ sdp: string,
+ sdpType: string,
+ initialParameters?: WebRTCOfferRequest["initialParameters"]
+ ): Promise {
+ const response = await this.sendAndWait({
+ type: "offer",
+ sdp,
+ sdp_type: sdpType,
+ initialParameters,
+ });
+
+ if (response.sessionId) {
+ this.currentSessionId = response.sessionId;
+ }
+
+ return {
+ sdp: response.sdp!,
+ type: response.sdp_type!,
+ sessionId: response.sessionId!,
+ };
+ }
+
+ /**
+ * Send ICE candidate
+ */
+ async sendIceCandidate(
+ sessionId: string | null,
+ candidate: RTCIceCandidate | null
+ ): Promise {
+ await this.sendAndWait({
+ type: "icecandidate",
+ sessionId: sessionId || this.currentSessionId,
+ candidate: candidate
+ ? {
+ candidate: candidate.candidate,
+ sdpMid: candidate.sdpMid,
+ sdpMLineIndex: candidate.sdpMLineIndex,
+ }
+ : null,
+ });
+ }
+
+ /**
+ * Send multiple ICE candidates
+ */
+ async sendIceCandidates(
+ sessionId: string,
+ candidates: RTCIceCandidate[]
+ ): Promise {
+ for (const candidate of candidates) {
+ await this.sendIceCandidate(sessionId, candidate);
+ }
+ }
+
+ // ==================== API Proxy ====================
+
+ /**
+ * Make an API request through the WebSocket
+ */
+ private async apiRequest(
+ method: "GET" | "POST" | "PATCH" | "DELETE",
+ path: string,
+ body?: unknown
+ ): Promise {
+ const response = await this.sendAndWait({
+ type: "api",
+ method,
+ path,
+ body,
+ });
+
+ if (response.status && response.status >= 400) {
+ throw new Error(response.error || `API request failed with status ${response.status}`);
+ }
+
+ return response.data as T;
+ }
+
+ // API methods matching the original api.ts interface
+ api = {
+ getPipelineStatus: (): Promise =>
+ this.apiRequest("GET", "/api/v1/pipeline/status"),
+
+ loadPipeline: (data: PipelineLoadRequest): Promise<{ message: string }> =>
+ this.apiRequest("POST", "/api/v1/pipeline/load", data),
+
+ getPipelineSchemas: (): Promise =>
+ this.apiRequest("GET", "/api/v1/pipelines/schemas"),
+
+ checkModelStatus: (pipelineId: string): Promise =>
+ this.apiRequest("GET", `/api/v1/models/status?pipeline_id=${pipelineId}`),
+
+ downloadPipelineModels: (pipelineId: string): Promise<{ message: string }> =>
+ this.apiRequest("POST", "/api/v1/models/download", { pipeline_id: pipelineId }),
+
+ getHardwareInfo: (): Promise =>
+ this.apiRequest("GET", "/api/v1/hardware/info"),
+
+ listLoRAFiles: (): Promise =>
+ this.apiRequest("GET", "/api/v1/lora/list"),
+
+ listAssets: (type?: "image" | "video"): Promise =>
+ this.apiRequest("GET", type ? `/api/v1/assets?type=${type}` : "/api/v1/assets"),
+
+ uploadAsset: async (file: File): Promise => {
+ // For file uploads, we need to convert to base64 and send through WebSocket
+ // This is a limitation of the WebSocket approach
+ const arrayBuffer = await file.arrayBuffer();
+ const base64 = btoa(
+ new Uint8Array(arrayBuffer).reduce(
+ (data, byte) => data + String.fromCharCode(byte),
+ ""
+ )
+ );
+
+ return this.apiRequest("POST", `/api/v1/assets?filename=${encodeURIComponent(file.name)}`, {
+ _base64_content: base64,
+ _content_type: file.type,
+ });
+ },
+
+ fetchCurrentLogs: (): Promise =>
+ this.apiRequest("GET", "/api/v1/logs/current"),
+
+ // Note: downloadRecording needs special handling for binary data
+ // For now, it will return the URL to download from
+ getRecordingUrl: (sessionId: string): string =>
+ `/api/v1/recordings/${sessionId}`,
+ };
+}
+
+// ==================== React Hook ====================
+
+import { useState, useEffect, useRef, useCallback } from "react";
+
+/**
+ * React hook for using the FalAdapter
+ */
+export function useFalAdapter(wsUrl: string | null, apiKey?: string) {
+ const [isConnected, setIsConnected] = useState(false);
+ const [isReady, setIsReady] = useState(false);
+ const [error, setError] = useState(null);
+ const adapterRef = useRef(null);
+
+ useEffect(() => {
+ if (!wsUrl) {
+ adapterRef.current = null;
+ setIsConnected(false);
+ setIsReady(false);
+ return;
+ }
+
+ const adapter = new FalAdapter(wsUrl, apiKey);
+ adapterRef.current = adapter;
+
+ adapter
+ .connect()
+ .then(() => {
+ setIsConnected(true);
+ setIsReady(true);
+ setError(null);
+ })
+ .catch((err) => {
+ setError(err);
+ setIsConnected(false);
+ setIsReady(false);
+ });
+
+ return () => {
+ adapter.disconnect();
+ };
+ }, [wsUrl, apiKey]);
+
+ const getAdapter = useCallback(() => adapterRef.current, []);
+
+ return {
+ adapter: adapterRef.current,
+ getAdapter,
+ isConnected,
+ isReady,
+ error,
+ };
+}
+
+// ==================== Global Instance ====================
+
+let globalAdapter: FalAdapter | null = null;
+
+/**
+ * Initialize the global FalAdapter instance
+ * Call this once at app startup if using fal deployment
+ */
+export function initFalAdapter(wsUrl: string, apiKey?: string): FalAdapter {
+ if (globalAdapter) {
+ globalAdapter.disconnect();
+ }
+ globalAdapter = new FalAdapter(wsUrl, apiKey);
+ return globalAdapter;
+}
+
+/**
+ * Get the global FalAdapter instance
+ */
+export function getFalAdapter(): FalAdapter | null {
+ return globalAdapter;
+}
+
+/**
+ * Check if we're running on fal (adapter is initialized)
+ */
+export function isFalMode(): boolean {
+ return globalAdapter !== null && globalAdapter !== undefined;
+}
diff --git a/frontend/src/lib/falContext.tsx b/frontend/src/lib/falContext.tsx
new file mode 100644
index 000000000..3ee1a426c
--- /dev/null
+++ b/frontend/src/lib/falContext.tsx
@@ -0,0 +1,97 @@
+/**
+ * Fal.ai Context Provider
+ *
+ * Provides a context for managing fal.ai deployment mode.
+ * When FAL_WS_URL is set, all API calls and WebRTC signaling
+ * go through the FalAdapter WebSocket connection.
+ */
+
+import React, { createContext, useContext, useEffect, useState } from "react";
+import { FalAdapter } from "./falAdapter";
+
+interface FalContextValue {
+ /** Whether we're in fal mode */
+ isFalMode: boolean;
+ /** The FalAdapter instance (null if not in fal mode) */
+ adapter: FalAdapter | null;
+ /** Whether the adapter is connected and ready */
+ isReady: boolean;
+ /** Connection error if any */
+ error: Error | null;
+}
+
+const FalContext = createContext({
+ isFalMode: false,
+ adapter: null,
+ isReady: false,
+ error: null,
+});
+
+interface FalProviderProps {
+ /** WebSocket URL for fal.ai endpoint. If not set, local mode is used. */
+ wsUrl?: string;
+ /** fal.ai API key for authentication */
+ apiKey?: string;
+ children: React.ReactNode;
+}
+
+export function FalProvider({ wsUrl, apiKey, children }: FalProviderProps) {
+ const [adapter, setAdapter] = useState(null);
+ const [isReady, setIsReady] = useState(false);
+ const [error, setError] = useState(null);
+
+ useEffect(() => {
+ if (!wsUrl) {
+ setAdapter(null);
+ setIsReady(false);
+ setError(null);
+ return;
+ }
+
+ console.log("[FalProvider] Connecting to fal.ai:", wsUrl);
+ const falAdapter = new FalAdapter(wsUrl, apiKey);
+ setAdapter(falAdapter);
+
+ falAdapter
+ .connect()
+ .then(() => {
+ console.log("[FalProvider] Connected to fal.ai");
+ setIsReady(true);
+ setError(null);
+ })
+ .catch((err) => {
+ console.error("[FalProvider] Connection failed:", err);
+ setError(err);
+ setIsReady(false);
+ });
+
+ return () => {
+ console.log("[FalProvider] Disconnecting from fal.ai");
+ falAdapter.disconnect();
+ };
+ }, [wsUrl]);
+
+ const value: FalContextValue = {
+ isFalMode: !!wsUrl,
+ adapter,
+ isReady: !!wsUrl && isReady,
+ error,
+ };
+
+ return {children};
+}
+
+/**
+ * Hook to access the fal context
+ */
+export function useFalContext() {
+ return useContext(FalContext);
+}
+
+/**
+ * Hook that returns the adapter if in fal mode
+ */
+export function useFalAdapter() {
+ const { adapter, isFalMode, isReady, error } = useFalContext();
+ return { adapter, isFalMode, isReady, error };
+}
diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx
index 10ed13e0c..78860cc5d 100644
--- a/frontend/src/main.tsx
+++ b/frontend/src/main.tsx
@@ -1,10 +1,9 @@
-import { StrictMode } from "react";
import { createRoot } from "react-dom/client";
import "./index.css";
import App from "./App.tsx";
createRoot(document.getElementById("root")!).render(
-
-
-
+ //
+
+ //
);
diff --git a/frontend/src/pages/StreamPage.tsx b/frontend/src/pages/StreamPage.tsx
index e3bb66177..1806dcf76 100644
--- a/frontend/src/pages/StreamPage.tsx
+++ b/frontend/src/pages/StreamPage.tsx
@@ -7,13 +7,15 @@ import { PromptInputWithTimeline } from "../components/PromptInputWithTimeline";
import { DownloadDialog } from "../components/DownloadDialog";
import type { TimelinePrompt } from "../components/PromptTimeline";
import { StatusBar } from "../components/StatusBar";
-import { useWebRTC } from "../hooks/useWebRTC";
+import { useUnifiedWebRTC } from "../hooks/useUnifiedWebRTC";
import { useVideoSource } from "../hooks/useVideoSource";
import { useWebRTCStats } from "../hooks/useWebRTCStats";
import { useControllerInput } from "../hooks/useControllerInput";
import { usePipeline } from "../hooks/usePipeline";
import { useStreamState } from "../hooks/useStreamState";
import { usePipelines } from "../hooks/usePipelines";
+import { useApi } from "../hooks/useApi";
+import { useFalContext } from "../lib/falContext";
import { getDefaultPromptForMode } from "../data/pipelines";
import { adjustResolutionForPipeline } from "../lib/utils";
import type {
@@ -26,11 +28,6 @@ import type {
VaeType,
} from "../types";
import type { PromptItem, PromptTransition } from "../lib/api";
-import {
- checkModelStatus,
- downloadPipelineModels,
- downloadRecording,
-} from "../lib/api";
import { sendLoRAScaleUpdates } from "../utils/loraHelpers";
import { toast } from "sonner";
@@ -71,6 +68,17 @@ function getVaceParams(
}
export function StreamPage() {
+ // Get API functions that work in both local and fal modes
+ const api = useApi();
+ const { isFalMode, isReady: isFalReady } = useFalContext();
+
+ // Show loading state while connecting to fal
+ useEffect(() => {
+ if (isFalMode) {
+ console.log("[StreamPage] Fal mode enabled, ready:", isFalReady);
+ }
+ }, [isFalMode, isFalReady]);
+
// Fetch available pipelines dynamically
const { pipelines } = usePipelines();
@@ -161,7 +169,7 @@ export function StreamPage() {
pipelineInfo,
} = usePipeline();
- // WebRTC for streaming
+ // WebRTC for streaming (unified hook works in both local and fal modes)
const {
remoteStream,
isStreaming,
@@ -172,7 +180,7 @@ export function StreamPage() {
updateVideoTrack,
sendParameterUpdate,
sessionId,
- } = useWebRTC();
+ } = useUnifiedWebRTC();
// Computed loading state - true when downloading models, loading pipeline, or connecting WebRTC
const isLoading = isDownloading || isPipelineLoading || isConnecting;
@@ -350,12 +358,12 @@ export function StreamPage() {
setDownloadProgress(null);
try {
- await downloadPipelineModels(pipelineId);
+ await api.downloadPipelineModels(pipelineId);
// Enhanced polling with progress updates
const checkDownloadProgress = async () => {
try {
- const status = await checkModelStatus(pipelineId);
+ const status = await api.checkModelStatus(pipelineId);
// Update progress state
if (status.progress) {
@@ -833,6 +841,14 @@ export function StreamPage() {
// Use override pipeline ID if provided, otherwise use current settings
const pipelineIdToUse = overridePipelineId || settings.pipelineId;
+ // Check cloud mode FIRST before any local operations
+ const isCloudMode =
+ settings.cloudMode?.enabled && settings.cloudMode?.status === "connected";
+
+ if (isCloudMode) {
+ console.log("Cloud mode enabled, skipping local pipeline/model loading");
+ }
+
try {
// Build pipeline chain: preprocessors + main pipeline + postprocessors
const pipelineIds: string[] = [];
@@ -844,114 +860,122 @@ export function StreamPage() {
pipelineIds.push(...settings.postprocessorIds);
}
- // Check if models are needed but not downloaded for all pipelines in the chain
- // Collect all missing pipelines/preprocessors
- const missingPipelines: string[] = [];
- for (const pipelineId of pipelineIds) {
- const pipelineInfo = pipelines?.[pipelineId];
- if (pipelineInfo?.requiresModels) {
- try {
- const status = await checkModelStatus(pipelineId);
- if (!status.downloaded) {
- missingPipelines.push(pipelineId);
+ // Skip model checks and download in cloud mode - models run on fal.ai servers
+ if (!isCloudMode) {
+ // Check if models are needed but not downloaded for all pipelines in the chain
+ // Collect all missing pipelines/preprocessors
+ const missingPipelines: string[] = [];
+ for (const pipelineId of pipelineIds) {
+ const pipelineInfo = pipelines?.[pipelineId];
+ if (pipelineInfo?.requiresModels) {
+ try {
+ const status = await api.checkModelStatus(pipelineId);
+ if (!status.downloaded) {
+ missingPipelines.push(pipelineId);
+ }
+ } catch (error) {
+ console.error(
+ `Error checking model status for ${pipelineId}:`,
+ error
+ );
+ // Continue anyway if check fails
}
- } catch (error) {
- console.error(
- `Error checking model status for ${pipelineId}:`,
- error
- );
- // Continue anyway if check fails
}
}
- }
- // If any pipelines are missing models, show download dialog
- if (missingPipelines.length > 0) {
- setPipelinesNeedingModels(missingPipelines);
- setShowDownloadDialog(true);
- return false; // Stream did not start
+ // If any pipelines are missing models, show download dialog
+ if (missingPipelines.length > 0) {
+ setPipelinesNeedingModels(missingPipelines);
+ setShowDownloadDialog(true);
+ return false; // Stream did not start
+ }
}
- // Always load pipeline with current parameters - backend will handle the rest
- console.log(`Loading ${pipelineIdToUse} pipeline...`);
-
- // Determine current input mode
+ // Determine current input mode (needed for both cloud and local paths)
const currentMode =
settings.inputMode || getPipelineDefaultMode(pipelineIdToUse) || "text";
- // Use settings.resolution if available, otherwise fall back to videoResolution
- let resolution = settings.resolution || videoResolution;
-
- // Adjust resolution to be divisible by required scale factor for the pipeline
- if (resolution) {
- const { resolution: adjustedResolution, wasAdjusted } =
- adjustResolutionForPipeline(pipelineIdToUse, resolution);
-
- if (wasAdjusted) {
- // Update settings with adjusted resolution
- updateSettings({ resolution: adjustedResolution });
- resolution = adjustedResolution;
- }
- }
-
- // Build load parameters dynamically based on pipeline capabilities and settings
- // The backend will use only the parameters it needs based on the pipeline schema
+ // Get current pipeline info (needed for both paths)
const currentPipeline = pipelines?.[pipelineIdToUse];
+
// Compute VACE enabled state - needed for both loadParams and initialParameters
const vaceEnabled = currentPipeline?.supportsVACE
? (settings.vaceEnabled ?? currentMode !== "video")
: false;
- let loadParams: Record | null = null;
+ // Skip loading local pipeline in cloud mode - processing happens on fal.ai
+ if (!isCloudMode) {
+ // Always load pipeline with current parameters - backend will handle the rest
+ console.log(`Loading ${pipelineIdToUse} pipeline...`);
- if (resolution) {
- // Start with common parameters
- loadParams = {
- height: resolution.height,
- width: resolution.width,
- };
+ // Use settings.resolution if available, otherwise fall back to videoResolution
+ let resolution = settings.resolution || videoResolution;
- // Add seed if pipeline supports quantization (implies it needs seed)
- if (currentPipeline?.supportsQuantization) {
- loadParams.seed = settings.seed ?? 42;
- loadParams.quantization = settings.quantization ?? null;
- loadParams.vae_type = settings.vaeType ?? "wan";
- }
+ // Adjust resolution to be divisible by required scale factor for the pipeline
+ if (resolution) {
+ const { resolution: adjustedResolution, wasAdjusted } =
+ adjustResolutionForPipeline(pipelineIdToUse, resolution);
- // Add LoRA parameters if pipeline supports LoRA
- if (currentPipeline?.supportsLoRA && settings.loras) {
- const loraParams = buildLoRAParams(
- settings.loras,
- settings.loraMergeStrategy
- );
- loadParams = { ...loadParams, ...loraParams };
+ if (wasAdjusted) {
+ // Update settings with adjusted resolution
+ updateSettings({ resolution: adjustedResolution });
+ resolution = adjustedResolution;
+ }
}
- // Add VACE parameters if pipeline supports VACE
- if (currentPipeline?.supportsVACE) {
- loadParams.vace_enabled = vaceEnabled;
+ // Build load parameters dynamically based on pipeline capabilities and settings
+ // The backend will use only the parameters it needs based on the pipeline schema
+ let loadParams: Record | null = null;
+
+ if (resolution) {
+ // Start with common parameters
+ loadParams = {
+ height: resolution.height,
+ width: resolution.width,
+ };
+
+ // Add seed if pipeline supports quantization (implies it needs seed)
+ if (currentPipeline?.supportsQuantization) {
+ loadParams.seed = settings.seed ?? 42;
+ loadParams.quantization = settings.quantization ?? null;
+ loadParams.vae_type = settings.vaeType ?? "wan";
+ }
+
+ // Add LoRA parameters if pipeline supports LoRA
+ if (currentPipeline?.supportsLoRA && settings.loras) {
+ const loraParams = buildLoRAParams(
+ settings.loras,
+ settings.loraMergeStrategy
+ );
+ loadParams = { ...loadParams, ...loraParams };
+ }
+
+ // Add VACE parameters if pipeline supports VACE
+ if (currentPipeline?.supportsVACE) {
+ loadParams.vace_enabled = vaceEnabled;
+
+ // Add VACE reference images if provided
+ const vaceParams = getVaceParams(
+ settings.refImages,
+ settings.vaceContextScale
+ );
+ loadParams = { ...loadParams, ...vaceParams };
+ }
- // Add VACE reference images if provided
- const vaceParams = getVaceParams(
- settings.refImages,
- settings.vaceContextScale
+ console.log(
+ `Loading ${pipelineIds.length} pipeline(s) (${pipelineIds.join(", ")}) with resolution ${resolution.width}x${resolution.height}`,
+ loadParams
);
- loadParams = { ...loadParams, ...vaceParams };
}
- console.log(
- `Loading ${pipelineIds.length} pipeline(s) (${pipelineIds.join(", ")}) with resolution ${resolution.width}x${resolution.height}`,
- loadParams
+ const loadSuccess = await loadPipeline(
+ pipelineIds,
+ loadParams || undefined
);
- }
-
- const loadSuccess = await loadPipeline(
- pipelineIds,
- loadParams || undefined
- );
- if (!loadSuccess) {
- console.error("Failed to load pipeline, cannot start stream");
- return false;
+ if (!loadSuccess) {
+ console.error("Failed to load pipeline, cannot start stream");
+ return false;
+ }
}
// Check video requirements based on input mode
@@ -987,6 +1011,10 @@ export function StreamPage() {
first_frame_image?: string;
last_frame_image?: string;
images?: string[];
+ // Cloud mode params - tells backend to auto-connect to fal
+ fal_cloud_enabled?: boolean;
+ fal_app_id?: string;
+ fal_api_key?: string;
} = {
// Signal the intended input mode to the backend so it doesn't
// briefly fall back to text mode before video frames arrive
@@ -1062,6 +1090,13 @@ export function StreamPage() {
initialParameters.spout_receiver = settings.spoutReceiver;
}
+ // Cloud mode params - tells backend to auto-connect to fal
+ if (isCloudMode) {
+ initialParameters.fal_cloud_enabled = true;
+ initialParameters.fal_app_id = settings.cloudMode?.appId;
+ initialParameters.fal_api_key = settings.cloudMode?.apiKey;
+ }
+
// Reset paused state when starting a fresh stream
updateSettings({ paused: false });
@@ -1084,7 +1119,7 @@ export function StreamPage() {
});
return;
}
- await downloadRecording(sessionId);
+ await api.downloadRecording(sessionId);
} catch (error) {
console.error("Error downloading recording:", error);
toast.error("Error downloading recording", {
@@ -1390,6 +1425,12 @@ export function StreamPage() {
onPreprocessorIdsChange={handlePreprocessorIdsChange}
postprocessorIds={settings.postprocessorIds ?? []}
onPostprocessorIdsChange={handlePostprocessorIdsChange}
+ cloudMode={settings.cloudMode}
+ onCloudModeChange={cloudModeUpdate => {
+ updateSettings({
+ cloudMode: { ...settings.cloudMode!, ...cloudModeUpdate },
+ });
+ }}
/>
diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts
index 74495bc23..35b15aee4 100644
--- a/frontend/src/types/index.ts
+++ b/frontend/src/types/index.ts
@@ -48,6 +48,21 @@ export interface LoRAConfig {
mergeMode?: LoraMergeStrategy;
}
+// Cloud mode status for server-side fal integration
+export type CloudModeStatus =
+ | "disconnected"
+ | "connecting"
+ | "connected"
+ | "error";
+
+export interface CloudModeState {
+ enabled: boolean;
+ appId: string;
+ apiKey: string;
+ status: CloudModeStatus;
+ errorMessage?: string;
+}
+
export interface SettingsState {
pipelineId: PipelineId;
resolution?: {
@@ -90,6 +105,8 @@ export interface SettingsState {
preprocessorIds?: string[];
// Postprocessors
postprocessorIds?: string[];
+ // Cloud mode settings (server-side fal integration)
+ cloudMode?: CloudModeState;
}
export interface PipelineInfo {
diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts
index b6c4e9294..57a14e9c1 100644
--- a/frontend/vite.config.ts
+++ b/frontend/vite.config.ts
@@ -10,6 +10,7 @@ export default defineConfig({
},
},
server: {
+ host: true,
proxy: {
"/api": {
target: "http://localhost:8000",
diff --git a/pyproject.toml b/pyproject.toml
index b377d001d..bdb2e5d79 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -56,6 +56,8 @@ dependencies = [
"triton-windows==3.5.1.post24; sys_platform == 'win32'",
"SpoutGL>=0.1.1; sys_platform == 'win32'",
"PyOpenGL>=3.1.10; sys_platform == 'win32'",
+ "aiohttp>=3.9.0",
+ "websockets>=12.0",
]
[project.scripts]
@@ -115,6 +117,7 @@ dev = [
"pre-commit>=4.0.0",
"twine>=5.0.0",
"pytest>=8.4.2",
+ "pytest-asyncio>=0.24.0",
"freezegun>=1.5.5",
]
diff --git a/src/scope/server/app.py b/src/scope/server/app.py
index 0e3ea58f2..7ae44c23d 100644
--- a/src/scope/server/app.py
+++ b/src/scope/server/app.py
@@ -52,6 +52,8 @@
from .schema import (
AssetFileInfo,
AssetsResponse,
+ FalConnectRequest,
+ FalStatusResponse,
HardwareInfoResponse,
HealthResponse,
IceCandidateRequest,
@@ -438,13 +440,32 @@ async def handle_webrtc_offer(
):
"""Handle WebRTC offer and return answer."""
try:
- # Ensure pipeline is loaded before proceeding
- status_info = await pipeline_manager.get_status_info_async()
- if status_info["status"] != "loaded":
- raise HTTPException(
- status_code=400,
- detail="Pipeline not loaded. Please load pipeline first.",
- )
+ # Check if cloud mode is enabled (fal config stored)
+ fal_config = webrtc_manager.get_fal_config()
+ is_cloud_mode = fal_config is not None
+
+ # Only require local pipeline when NOT in cloud mode
+ # In cloud mode, the pipeline runs on fal.ai servers
+ if not is_cloud_mode:
+ status_info = await pipeline_manager.get_status_info_async()
+ if status_info["status"] != "loaded":
+ # Try to auto-load pipeline from initialParameters
+ if (
+ request.initialParameters
+ and request.initialParameters.pipeline_ids
+ ):
+ pipeline_ids = request.initialParameters.pipeline_ids
+ logger.info(
+ f"Auto-loading pipeline from initialParameters: {pipeline_ids}"
+ )
+ await pipeline_manager.load_pipelines(pipeline_ids)
+ else:
+ raise HTTPException(
+ status_code=400,
+ detail="Pipeline not loaded. Please load pipeline first.",
+ )
+ else:
+ logger.info("Cloud mode enabled, skipping local pipeline check")
return await webrtc_manager.handle_offer(request, pipeline_manager)
@@ -865,6 +886,117 @@ async def get_hardware_info():
raise HTTPException(status_code=500, detail=str(e)) from e
+# =============================================================================
+# fal.ai cloud integration endpoints
+# =============================================================================
+
+
+@app.post("/api/v1/fal/connect", response_model=FalStatusResponse)
+async def connect_to_fal(
+ request: FalConnectRequest,
+ webrtc_manager: "WebRTCManager" = Depends(get_webrtc_manager),
+):
+ """Connect to fal.ai cloud for remote GPU inference.
+
+ This stores fal credentials for future sessions and connects any
+ existing WebRTC sessions to the specified fal.ai app.
+ """
+
+ try:
+ # Store config for future sessions (key fix: new sessions will inherit this)
+ webrtc_manager.set_fal_config(request.app_id, request.api_key)
+ logger.info(f"Stored fal config for future sessions: {request.app_id}")
+
+ # Connect existing sessions if any
+ connected_count = 0
+ for session_id, session in webrtc_manager.sessions.items():
+ if session.video_track and session.video_track.frame_processor:
+ # Get initial parameters from the video track if available
+ initial_params = getattr(
+ session.video_track, "initial_parameters", None
+ )
+ await session.video_track.frame_processor.connect_to_fal(
+ app_id=request.app_id,
+ api_key=request.api_key,
+ initial_parameters=initial_params,
+ )
+ connected_count += 1
+ logger.info(
+ f"Connected session {session_id} to fal app {request.app_id}"
+ )
+
+ if connected_count == 0:
+ logger.info("No existing sessions, fal config saved for new sessions")
+
+ return FalStatusResponse(connected=True, app_id=request.app_id)
+ except Exception as e:
+ logger.error(f"Error connecting to fal: {e}")
+ raise HTTPException(status_code=500, detail=str(e)) from e
+
+
+@app.post("/api/v1/fal/disconnect", response_model=FalStatusResponse)
+async def disconnect_from_fal(
+ webrtc_manager: "WebRTCManager" = Depends(get_webrtc_manager),
+):
+ """Disconnect from fal.ai cloud.
+
+ This clears stored fal credentials and disconnects all active
+ WebRTC sessions from fal.ai, returning to local GPU processing.
+ """
+
+ try:
+ # Clear stored config so new sessions won't use fal
+ webrtc_manager.clear_fal_config()
+ logger.info("Cleared fal config for future sessions")
+
+ # Disconnect existing sessions
+ disconnected_count = 0
+ for session_id, session in webrtc_manager.sessions.items():
+ if session.video_track and session.video_track.frame_processor:
+ await session.video_track.frame_processor.disconnect_from_fal()
+ disconnected_count += 1
+ logger.info(f"Disconnected session {session_id} from fal")
+
+ if disconnected_count == 0:
+ logger.info("No existing sessions to disconnect")
+
+ return FalStatusResponse(connected=False, app_id=None)
+ except Exception as e:
+ logger.error(f"Error disconnecting from fal: {e}")
+ raise HTTPException(status_code=500, detail=str(e)) from e
+
+
+@app.get("/api/v1/fal/status", response_model=FalStatusResponse)
+async def get_fal_status(
+ webrtc_manager: "WebRTCManager" = Depends(get_webrtc_manager),
+):
+ """Get current fal.ai cloud connection status.
+
+ Returns whether cloud mode is enabled (config stored or session connected)
+ and the app ID if available.
+ """
+
+ try:
+ # Check if any session is actively connected to fal
+ for session in webrtc_manager.sessions.values():
+ if session.video_track and session.video_track.frame_processor:
+ fp = session.video_track.frame_processor
+ if fp.fal_enabled and fp.fal_client:
+ # Return the app_id from the first connected session
+ app_id = getattr(fp.fal_client, "app_id", None)
+ return FalStatusResponse(connected=True, app_id=app_id)
+
+ # Check if fal config is stored for future sessions
+ fal_config = webrtc_manager.get_fal_config()
+ if fal_config:
+ return FalStatusResponse(connected=True, app_id=fal_config["app_id"])
+
+ return FalStatusResponse(connected=False, app_id=None)
+ except Exception as e:
+ logger.error(f"Error getting fal status: {e}")
+ raise HTTPException(status_code=500, detail=str(e)) from e
+
+
@app.get("/api/v1/logs/current")
async def get_current_logs():
"""Get the most recent application log file for bug reporting."""
diff --git a/src/scope/server/build.py b/src/scope/server/build.py
index 9e5a65141..2fc852a11 100644
--- a/src/scope/server/build.py
+++ b/src/scope/server/build.py
@@ -17,7 +17,7 @@ def run_command(cmd: list[str], cwd: Path | None = None) -> None:
if executable_path:
cmd[0] = executable_path
else:
- print(f"❌ Error: Could not find executable '{cmd[0]}' in PATH")
+ print(f"[ERROR] Could not find executable '{cmd[0]}' in PATH")
sys.exit(1)
try:
@@ -27,7 +27,7 @@ def run_command(cmd: list[str], cwd: Path | None = None) -> None:
if result.stdout:
print(result.stdout.strip())
except subprocess.CalledProcessError as e:
- print(f"❌ Error running command: {' '.join(cmd)}")
+ print(f"[ERROR] Running command: {' '.join(cmd)}")
if e.stderr:
print(f"Error output: {e.stderr.strip()}")
sys.exit(1)
@@ -35,40 +35,40 @@ def run_command(cmd: list[str], cwd: Path | None = None) -> None:
def main() -> None:
"""Main build function."""
- print("🚀 Building daydream-scope...")
+ print("[BUILD] Building daydream-scope...")
project_root = Path(__file__).parent.parent.parent.parent
# Check if we're in the right directory
if not (project_root / "pyproject.toml").exists():
print(
- "❌ Error: pyproject.toml not found. Please run this script from the project root."
+ "[ERROR] pyproject.toml not found. Please run this script from the project root."
)
sys.exit(1)
# Build frontend
- print("📦 Building frontend...")
+ print("[BUILD] Building frontend...")
frontend_dir = project_root / "frontend"
if not frontend_dir.exists():
- print("❌ Error: frontend directory not found")
+ print("[ERROR] frontend directory not found")
sys.exit(1)
# Always run npm install to ensure dependencies are up to date
- print("📥 Installing frontend dependencies...")
+ print("[BUILD] Installing frontend dependencies...")
run_command(["npm", "install"], cwd=frontend_dir)
# Build the frontend
- print("🔨 Building frontend assets...")
+ print("[BUILD] Building frontend assets...")
run_command(["npm", "run", "build"], cwd=frontend_dir)
# Check if build was successful
dist_dir = frontend_dir / "dist"
if not dist_dir.exists():
- print("❌ Error: Frontend build failed - dist directory not found")
+ print("[ERROR] Frontend build failed - dist directory not found")
sys.exit(1)
- print("✅ Frontend build completed successfully")
+ print("[OK] Frontend build completed successfully")
if __name__ == "__main__":
diff --git a/src/scope/server/fal_client.py b/src/scope/server/fal_client.py
new file mode 100644
index 000000000..a4729967a
--- /dev/null
+++ b/src/scope/server/fal_client.py
@@ -0,0 +1,398 @@
+"""WebSocket + WebRTC client for connecting to fal.ai cloud.
+
+Based on fal-demos/yolo_webcam_webrtc reference implementation.
+Scope acts as WebRTC client (creates offers), fal.ai acts as server.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+from collections.abc import Callable
+from typing import TYPE_CHECKING, Any
+
+import aiohttp
+import websockets
+from aiortc import (
+ RTCDataChannel,
+ RTCPeerConnection,
+ RTCSessionDescription,
+)
+from aiortc.sdp import candidate_from_sdp
+
+if TYPE_CHECKING:
+ from av import VideoFrame
+
+ from scope.server.fal_tracks import FalOutputTrack
+
+logger = logging.getLogger(__name__)
+
+TOKEN_EXPIRATION_SECONDS = 120
+
+
+class FalClient:
+ """WebSocket + WebRTC client for connecting to fal.ai cloud.
+
+ Based on fal-demos/yolo_webcam_webrtc reference implementation.
+ Scope acts as WebRTC client (creates offers), fal.ai acts as server.
+ """
+
+ def __init__(
+ self,
+ app_id: str,
+ api_key: str | None = None,
+ on_frame_received: Callable[[VideoFrame], None] | None = None,
+ use_auth: bool = False,
+ ):
+ self.app_id = app_id # e.g., "owner/app-name/ws"
+ self.api_key = api_key
+ self.on_frame_received = on_frame_received
+ self.use_auth = use_auth # Set to True for private apps
+
+ self.ws: websockets.WebSocketClientProtocol | None = None
+ self.pc: RTCPeerConnection | None = None
+ self.output_track: FalOutputTrack | None = None
+ self.data_channel: RTCDataChannel | None = None
+ self._pending_parameters: dict[str, Any] = {}
+ self.stop_event = asyncio.Event()
+ self._receive_task: asyncio.Task | None = None
+
+ async def _get_temporary_token(self) -> str:
+ """Get temporary JWT token from fal API (mirrors frontend pattern)."""
+ # Extract alias from app_id (e.g., "owner/app-name/webrtc" -> "app-name")
+ parts = self.app_id.split("/")
+ alias = parts[1] if len(parts) >= 2 else self.app_id
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ "https://rest.alpha.fal.ai/tokens/",
+ headers={
+ "Authorization": f"Key {self.api_key}",
+ "Content-Type": "application/json",
+ },
+ json={
+ "allowed_apps": [alias],
+ "token_expiration": TOKEN_EXPIRATION_SECONDS,
+ },
+ ) as resp:
+ if not resp.ok:
+ error_body = await resp.text()
+ raise RuntimeError(
+ f"Token request failed: {resp.status} {error_body}"
+ )
+ token = await resp.json()
+ # Handle both string and object responses
+ if isinstance(token, dict) and "detail" in token:
+ return token["detail"]
+ return token
+
+ def _build_ws_url(self, token: str | None = None) -> str:
+ """Build WebSocket URL, optionally with JWT token.
+
+ The app_id should be the full path including the WebSocket endpoint,
+ e.g., 'username/app-name/ws' or 'username/app-name/webrtc'.
+
+ For public apps, token can be None.
+ """
+ app_id = self.app_id.strip("/")
+ base_url = f"wss://fal.run/{app_id}"
+ if token:
+ return f"{base_url}?fal_jwt_token={token}"
+ return base_url
+
+ async def connect(self, initial_parameters: dict | None = None) -> None:
+ """Connect to fal WebSocket and establish WebRTC connection.
+
+ Args:
+ initial_parameters: Initial parameters to send with the offer
+ (e.g., pipeline_ids, prompts, etc.)
+ """
+ self._initial_parameters = initial_parameters
+
+ # Get temporary token only if auth is enabled (for private apps)
+ token = None
+ if self.use_auth and self.api_key:
+ logger.info("Using JWT authentication for private app")
+ token = await self._get_temporary_token()
+
+ ws_url = self._build_ws_url(token)
+
+ # Log URL without token for debugging
+ ws_url_without_token = ws_url.split("?")[0]
+ logger.info(f"Connecting to fal WebSocket: {ws_url_without_token}")
+ try:
+ logger.info("Attempting WebSocket connection (may take up to 2 min if app is cold)...")
+ self.ws = await asyncio.wait_for(
+ websockets.connect(ws_url, compression=None),
+ timeout=120.0, # 2 minutes for cold start
+ )
+ logger.info("WebSocket connection established, waiting for ready message...")
+ except TimeoutError:
+ logger.error(f"Timeout connecting to fal WebSocket after 10s")
+ raise RuntimeError(
+ f"Timeout connecting to fal WebSocket. Check that app_id '{self.app_id}' "
+ "is correct (format: 'username/app-name' or full path like 'username/app-name/ws')"
+ ) from None
+ except Exception as e:
+ logger.error(f"Failed to connect to fal WebSocket: {e}")
+ raise RuntimeError(f"Failed to connect to fal WebSocket: {e}") from e
+
+ # Wait for "ready" message from server (with timeout)
+ try:
+ logger.info("Waiting for ready message from fal server...")
+ ready_msg = await asyncio.wait_for(self.ws.recv(), timeout=120.0)
+ logger.info(f"Received message from fal: {ready_msg[:100] if ready_msg else 'empty'}")
+ except TimeoutError:
+ logger.error("Timeout waiting for ready message after 10s")
+ await self.ws.close()
+ raise RuntimeError(
+ f"Timeout waiting for 'ready' message from fal server. "
+ f"The fal app at '{self.app_id}' may not be running or may not be a WebRTC app."
+ ) from None
+ except Exception as e:
+ logger.error(f"Error receiving ready message: {e}")
+ await self.ws.close()
+ raise
+
+ ready_data = json.loads(ready_msg)
+ if ready_data.get("type") != "ready":
+ await self.ws.close()
+ raise RuntimeError(f"Expected 'ready' message, got: {ready_data}")
+ logger.info("fal server ready")
+
+ # Create peer connection with STUN server
+ from aiortc import RTCConfiguration, RTCIceServer
+
+ config = RTCConfiguration(
+ iceServers=[RTCIceServer(urls=["stun:stun.l.google.com:19302"])]
+ )
+ self.pc = RTCPeerConnection(configuration=config)
+
+ # Set up event handlers
+ self._setup_pc_handlers()
+
+ # Add output track (for sending frames to fal)
+ from scope.server.fal_tracks import FalOutputTrack
+
+ self.output_track = FalOutputTrack()
+ self.pc.addTrack(self.output_track)
+
+ # Create data channel for parameter forwarding (must be before createOffer)
+ self.data_channel = self.pc.createDataChannel("parameters", ordered=True)
+ self._setup_data_channel_handlers()
+
+ # Create and send offer (we are the client)
+ offer = await self.pc.createOffer()
+ await self.pc.setLocalDescription(offer)
+
+ # Build offer message with optional initial parameters
+ offer_msg: dict[str, Any] = {
+ "type": "offer",
+ "sdp": self.pc.localDescription.sdp,
+ }
+ if self._initial_parameters:
+ offer_msg["initialParameters"] = self._initial_parameters
+ logger.info(
+ f"Including initial parameters: {list(self._initial_parameters.keys())}"
+ )
+
+ await self.ws.send(json.dumps(offer_msg))
+ logger.info("Sent WebRTC offer")
+
+ # Start message receive loop
+ self._receive_task = asyncio.create_task(self._receive_loop())
+
+ def _setup_pc_handlers(self) -> None:
+ """Set up RTCPeerConnection event handlers."""
+ if self.pc is None:
+ return
+
+ @self.pc.on("icecandidate")
+ async def on_icecandidate(candidate):
+ if self.ws is None:
+ return
+ if candidate is None:
+ await self.ws.send(
+ json.dumps(
+ {
+ "type": "icecandidate",
+ "candidate": None,
+ }
+ )
+ )
+ else:
+ await self.ws.send(
+ json.dumps(
+ {
+ "type": "icecandidate",
+ "candidate": {
+ "candidate": candidate.candidate,
+ "sdpMid": candidate.sdpMid,
+ "sdpMLineIndex": candidate.sdpMLineIndex,
+ },
+ }
+ )
+ )
+
+ @self.pc.on("connectionstatechange")
+ async def on_connectionstatechange():
+ if self.pc is None:
+ return
+ logger.info(f"Connection state: {self.pc.connectionState}")
+ if self.pc.connectionState in ("failed", "closed", "disconnected"):
+ self.stop_event.set()
+
+ @self.pc.on("track")
+ def on_track(track):
+ """Handle incoming track (processed frames from fal)."""
+ if track.kind == "video":
+ logger.info("Received video track from fal")
+ asyncio.create_task(self._consume_track(track))
+
+ async def _consume_track(self, track) -> None:
+ """Consume frames from the incoming track."""
+ while not self.stop_event.is_set():
+ try:
+ frame = await track.recv()
+ if self.on_frame_received:
+ self.on_frame_received(frame)
+ except Exception as e:
+ logger.error(f"Error receiving frame: {e}")
+ break
+
+ def _setup_data_channel_handlers(self) -> None:
+ """Set up data channel event handlers."""
+ if self.data_channel is None:
+ return
+
+ @self.data_channel.on("open")
+ def on_data_channel_open():
+ logger.info("Data channel to fal opened")
+ # Send any pending parameters
+ if self._pending_parameters:
+ self._send_parameters(self._pending_parameters)
+ self._pending_parameters = {}
+
+ @self.data_channel.on("close")
+ def on_data_channel_close():
+ logger.info("Data channel to fal closed")
+
+ @self.data_channel.on("error")
+ def on_data_channel_error(error):
+ logger.error(f"Data channel error: {error}")
+
+ def send_parameters(self, parameters: dict[str, Any]) -> bool:
+ """Forward parameter update to fal.ai via data channel.
+
+ Args:
+ parameters: Dictionary of parameters to send.
+
+ Returns:
+ True if sent immediately, False if queued for later.
+ """
+ if self.data_channel and self.data_channel.readyState == "open":
+ return self._send_parameters(parameters)
+ else:
+ # Queue for when channel opens
+ self._pending_parameters.update(parameters)
+ logger.debug(f"Queued parameters for later: {list(parameters.keys())}")
+ return False
+
+ def _send_parameters(self, parameters: dict[str, Any]) -> bool:
+ """Internal: send parameters over data channel."""
+ if self.data_channel is None:
+ return False
+ try:
+ # Filter out None values (same as frontend)
+ filtered = {k: v for k, v in parameters.items() if v is not None}
+ message = json.dumps(filtered)
+ self.data_channel.send(message)
+ logger.debug(f"Sent parameters to fal: {list(filtered.keys())}")
+ return True
+ except Exception as e:
+ logger.error(f"Failed to send parameters: {e}")
+ return False
+
+ async def _receive_loop(self) -> None:
+ """Receive and handle WebSocket messages."""
+ if self.ws is None or self.pc is None:
+ return
+
+ try:
+ while not self.stop_event.is_set():
+ try:
+ message = await asyncio.wait_for(
+ self.ws.recv(),
+ timeout=1.0,
+ )
+ except TimeoutError:
+ continue
+
+ try:
+ data = json.loads(message)
+ except json.JSONDecodeError:
+ logger.warning(f"Non-JSON message: {message}")
+ continue
+
+ msg_type = data.get("type")
+
+ if msg_type == "answer":
+ # Set remote description from server's answer
+ answer = RTCSessionDescription(
+ sdp=data["sdp"],
+ type="answer",
+ )
+ await self.pc.setRemoteDescription(answer)
+ logger.info("Set remote description from answer")
+
+ elif msg_type == "icecandidate":
+ candidate_data = data.get("candidate")
+ if candidate_data is None:
+ await self.pc.addIceCandidate(None)
+ else:
+ candidate = candidate_from_sdp(
+ candidate_data.get("candidate", "")
+ )
+ candidate.sdpMid = candidate_data.get("sdpMid")
+ candidate.sdpMLineIndex = candidate_data.get("sdpMLineIndex")
+ await self.pc.addIceCandidate(candidate)
+
+ elif msg_type == "error":
+ logger.error(f"Server error: {data.get('error')}")
+
+ else:
+ logger.debug(f"Unknown message type: {msg_type}")
+
+ except websockets.exceptions.ConnectionClosed:
+ logger.info("WebSocket connection closed")
+ except Exception as e:
+ logger.error(f"Receive loop error: {e}")
+ finally:
+ self.stop_event.set()
+
+ async def send_frame(self, frame: VideoFrame) -> None:
+ """Send a frame to fal for processing."""
+ if self.output_track:
+ await self.output_track.put_frame(frame)
+
+ async def disconnect(self) -> None:
+ """Close WebRTC and WebSocket connections."""
+ self.stop_event.set()
+
+ if self._receive_task:
+ self._receive_task.cancel()
+ try:
+ await self._receive_task
+ except asyncio.CancelledError:
+ pass
+
+ if self.pc:
+ await self.pc.close()
+ self.pc = None
+
+ if self.ws:
+ await self.ws.close()
+ self.ws = None
+
+ logger.info("Disconnected from fal")
diff --git a/src/scope/server/fal_tracks.py b/src/scope/server/fal_tracks.py
new file mode 100644
index 000000000..1950deb40
--- /dev/null
+++ b/src/scope/server/fal_tracks.py
@@ -0,0 +1,136 @@
+"""Custom MediaStreamTrack classes for fal.ai WebRTC communication.
+
+FalOutputTrack: Sends frames from a queue to fal.ai via WebRTC
+FalInputTrack: Receives processed frames from fal.ai and queues them
+"""
+
+from __future__ import annotations
+
+import asyncio
+import fractions
+import time
+from typing import TYPE_CHECKING
+
+from aiortc.mediastreams import MediaStreamTrack
+
+if TYPE_CHECKING:
+ from av import VideoFrame
+
+
+class FalOutputTrack(MediaStreamTrack):
+ """Sends frames from queue to fal via WebRTC.
+
+ This is the outbound track - frames are put into the queue
+ and sent to fal.ai for processing.
+ """
+
+ kind = "video"
+
+ def __init__(self, target_fps: int = 30):
+ super().__init__()
+ self.frame_queue: asyncio.Queue[VideoFrame] = asyncio.Queue(maxsize=30)
+ self.target_fps = target_fps
+ self._start_time = time.time()
+ self._frame_count = 0
+
+ async def recv(self) -> VideoFrame:
+ """Called by aiortc to get next frame to send.
+
+ This method is called by the WebRTC stack when it needs
+ the next frame to encode and send.
+ """
+ frame = await self.frame_queue.get()
+
+ # Set pts (presentation timestamp) and time_base
+ self._frame_count += 1
+ frame.pts = self._frame_count
+ frame.time_base = fractions.Fraction(1, self.target_fps)
+
+ return frame
+
+ async def put_frame(self, frame: VideoFrame) -> bool:
+ """Add frame to be sent to fal.
+
+ Returns True if frame was queued, False if queue was full (frame dropped).
+ """
+ try:
+ self.frame_queue.put_nowait(frame)
+ return True
+ except asyncio.QueueFull:
+ # Drop oldest frame and add new one
+ try:
+ self.frame_queue.get_nowait()
+ self.frame_queue.put_nowait(frame)
+ return True
+ except asyncio.QueueEmpty:
+ return False
+
+ def put_frame_sync(self, frame: VideoFrame) -> bool:
+ """Synchronous version for use from non-async contexts."""
+ return self.put_frame_nowait(frame)
+
+ def put_frame_nowait(self, frame: VideoFrame) -> bool:
+ """Non-blocking frame put."""
+ try:
+ self.frame_queue.put_nowait(frame)
+ return True
+ except asyncio.QueueFull:
+ return False
+
+
+class FalInputTrack(MediaStreamTrack):
+ """Receives processed frames from fal via WebRTC.
+
+ This wraps an incoming track and makes frames available via a queue.
+ Similar pattern to YOLOTrack in reference, but stores frames instead
+ of processing them.
+ """
+
+ kind = "video"
+
+ def __init__(self, source_track: MediaStreamTrack):
+ super().__init__()
+ self.source_track = source_track
+ self.frame_queue: asyncio.Queue[VideoFrame] = asyncio.Queue(maxsize=30)
+ self._consume_task: asyncio.Task | None = None
+
+ def start_consuming(self) -> None:
+ """Start consuming frames from source track."""
+ self._consume_task = asyncio.create_task(self._consume_loop())
+
+ async def _consume_loop(self) -> None:
+ """Continuously receive frames from source and queue them."""
+ while True:
+ try:
+ frame = await self.source_track.recv()
+ try:
+ self.frame_queue.put_nowait(frame)
+ except asyncio.QueueFull:
+ # Drop oldest frame
+ try:
+ self.frame_queue.get_nowait()
+ self.frame_queue.put_nowait(frame)
+ except asyncio.QueueEmpty:
+ pass
+ except Exception:
+ break
+
+ async def recv(self) -> VideoFrame:
+ """Get next received frame."""
+ return await self.frame_queue.get()
+
+ def get_frame_nowait(self) -> VideoFrame | None:
+ """Non-blocking frame get."""
+ try:
+ return self.frame_queue.get_nowait()
+ except asyncio.QueueEmpty:
+ return None
+
+ async def stop(self) -> None:
+ """Stop consuming frames."""
+ if self._consume_task:
+ self._consume_task.cancel()
+ try:
+ await self._consume_task
+ except asyncio.CancelledError:
+ pass
diff --git a/src/scope/server/frame_processor.py b/src/scope/server/frame_processor.py
index 09abb7f03..65472839e 100644
--- a/src/scope/server/frame_processor.py
+++ b/src/scope/server/frame_processor.py
@@ -1,15 +1,20 @@
+from __future__ import annotations
+
import logging
import queue
import threading
import time
-from typing import Any
+from typing import TYPE_CHECKING, Any
import torch
-from aiortc.mediastreams import VideoFrame
+from av import VideoFrame
from .pipeline_manager import PipelineManager
from .pipeline_processor import PipelineProcessor
+if TYPE_CHECKING:
+ from .fal_client import FalClient
+
logger = logging.getLogger(__name__)
@@ -81,6 +86,13 @@ def __init__(
if pipeline_ids is not None:
self.pipeline_ids = pipeline_ids
+ # fal.ai cloud integration
+ self.fal_client: FalClient | None = None
+ self.fal_enabled = False
+ self._fal_received_frames: queue.Queue[VideoFrame] = queue.Queue(maxsize=30)
+ # Pending fal connection info (set in start() if cloud mode params are present)
+ self._pending_fal_connection: dict | None = None
+
def start(self):
if self.running:
return
@@ -96,6 +108,17 @@ def start(self):
spout_config = self.parameters.pop("spout_receiver")
self._update_spout_receiver(spout_config)
+ # Check if we should use fal cloud (from frontend initial params)
+ # Store pending connection info for async connect later
+ fal_cloud_enabled = self.parameters.get("fal_cloud_enabled", False)
+ if fal_cloud_enabled:
+ app_id = self.parameters.get("fal_app_id")
+ api_key = self.parameters.get("fal_api_key")
+ if app_id and api_key:
+ self._pending_fal_connection = {"app_id": app_id, "api_key": api_key}
+ logger.info("Cloud mode enabled, skipping local pipeline setup")
+ return # Don't set up local pipelines - fal connection will happen later
+
if not self.pipeline_ids:
logger.error("No pipeline IDs provided, cannot start")
self.running = False
@@ -150,6 +173,19 @@ def stop(self, error_message: str = None):
logger.error(f"Error releasing Spout receiver: {e}")
self.spout_receiver = None
+ # Clean up fal client (synchronous cleanup - async disconnect handled separately)
+ if self.fal_client is not None:
+ self.fal_enabled = False
+ # Note: For full cleanup, call disconnect_from_fal() before stop()
+ # This is a fallback that clears local state
+ self.fal_client = None
+ # Clear received frames queue
+ while not self._fal_received_frames.empty():
+ try:
+ self._fal_received_frames.get_nowait()
+ except queue.Empty:
+ break
+
logger.info("FrameProcessor stopped")
# Notify callback that frame processor has stopped
@@ -162,11 +198,29 @@ def stop(self, error_message: str = None):
except Exception as e:
logger.error(f"Error in frame processor stop callback: {e}")
+ async def stop_async(self, error_message: str | None = None) -> None:
+ """Async version of stop that properly disconnects from fal.
+
+ Use this when calling from an async context to ensure proper cleanup
+ of the fal WebRTC connection.
+ """
+ # Disconnect from fal first (async operation)
+ if self.fal_client is not None:
+ await self.disconnect_from_fal()
+
+ # Then do the regular synchronous cleanup
+ self.stop(error_message)
+
def put(self, frame: VideoFrame) -> bool:
if not self.running:
return False
- # Convert VideoFrame to tensor and put into first processor's input queue
+ # Route to fal cloud if enabled
+ if self.fal_enabled and self.fal_client and self.fal_client.output_track:
+ # Send frame directly to fal via WebRTC
+ return self.fal_client.output_track.put_frame_nowait(frame)
+
+ # Local processing: Convert VideoFrame to tensor and put into first processor's input queue
if self.pipeline_processors:
first_processor = self.pipeline_processors[0]
@@ -186,10 +240,35 @@ def put(self, frame: VideoFrame) -> bool:
return True
def get(self) -> torch.Tensor | None:
- if not self.running or not self.pipeline_processors:
+ if not self.running:
+ return None
+
+ # Get frame from fal cloud if enabled
+ if self.fal_enabled:
+ try:
+ fal_frame = self._fal_received_frames.get_nowait()
+ # Convert av.VideoFrame to tensor for consistency with local processing
+ frame_array = fal_frame.to_ndarray(format="rgb24")
+ frame = torch.from_numpy(frame_array)
+
+ # Enqueue frame for async Spout sending (non-blocking)
+ if self.spout_sender_enabled and self.spout_sender is not None:
+ try:
+ frame_np = frame.numpy()
+ self.spout_sender_queue.put_nowait(frame_np)
+ except queue.Full:
+ logger.debug("Spout output queue full, dropping frame")
+ except Exception as e:
+ logger.error(f"Error enqueueing Spout frame: {e}")
+
+ return frame
+ except queue.Empty:
+ return None
+
+ # Local processing: Get frame from last pipeline processor's output queue
+ if not self.pipeline_processors:
return None
- # Get frame from last pipeline processor's output queue
last_processor = self.pipeline_processors[-1]
if not last_processor.output_queue:
return None
@@ -242,26 +321,97 @@ def _get_pipeline_dimensions(self) -> tuple[int, int]:
return 512, 512
def update_parameters(self, parameters: dict[str, Any]):
- """Update parameters that will be used in the next pipeline call."""
- # Handle Spout output settings
+ """Update parameters that will be used in the next pipeline call.
+
+ When fal cloud mode is enabled, pipeline parameters are forwarded to fal.
+ Spout configuration is always handled locally.
+ """
+ # Handle Spout output settings (always local)
if "spout_sender" in parameters:
spout_config = parameters.pop("spout_sender")
self._update_spout_sender(spout_config)
- # Handle Spout input settings
+ # Handle Spout input settings (always local)
if "spout_receiver" in parameters:
spout_config = parameters.pop("spout_receiver")
self._update_spout_receiver(spout_config)
- # Update parameters for all pipeline processors
- for processor in self.pipeline_processors:
- processor.update_parameters(parameters)
+ # Route remaining parameters based on mode
+ if self.fal_enabled and self.fal_client:
+ # Forward to fal cloud via data channel
+ self.fal_client.send_parameters(parameters)
+ else:
+ # Local processing: update pipeline processors
+ for processor in self.pipeline_processors:
+ processor.update_parameters(parameters)
- # Update local parameters
+ # Always store locally for state tracking
self.parameters = {**self.parameters, **parameters}
return True
+ # =========================================================================
+ # fal.ai cloud integration methods
+ # =========================================================================
+
+ def _on_fal_frame_received(self, frame: VideoFrame) -> None:
+ """Callback when frame is received from fal.
+
+ This is called by FalClient when a processed frame arrives via WebRTC.
+ """
+ try:
+ self._fal_received_frames.put_nowait(frame)
+ except queue.Full:
+ # Drop oldest frame to make room
+ try:
+ self._fal_received_frames.get_nowait()
+ self._fal_received_frames.put_nowait(frame)
+ except queue.Empty:
+ pass
+
+ async def connect_to_fal(
+ self,
+ app_id: str,
+ api_key: str | None = None,
+ initial_parameters: dict | None = None,
+ ) -> None:
+ """Connect to fal.ai cloud for remote GPU inference.
+
+ Args:
+ app_id: The fal app ID (e.g., "owner/scope-fal/ws")
+ api_key: The fal API key for authentication (optional for public apps)
+ initial_parameters: Initial parameters to send with the offer (pipeline_ids, etc.)
+ """
+ # Disconnect existing connection if any
+ if self.fal_client is not None:
+ await self.disconnect_from_fal()
+
+ # Import FalClient here to avoid circular imports
+ from .fal_client import FalClient
+
+ self.fal_client = FalClient(
+ app_id=app_id,
+ api_key=api_key,
+ on_frame_received=self._on_fal_frame_received,
+ )
+ await self.fal_client.connect(initial_parameters=initial_parameters)
+ self.fal_enabled = True
+ logger.info(f"Connected to fal cloud: {app_id}")
+
+ async def disconnect_from_fal(self) -> None:
+ """Disconnect from fal.ai cloud."""
+ if self.fal_client is not None:
+ await self.fal_client.disconnect()
+ self.fal_client = None
+ self.fal_enabled = False
+ # Clear any pending received frames
+ while not self._fal_received_frames.empty():
+ try:
+ self._fal_received_frames.get_nowait()
+ except queue.Empty:
+ break
+ logger.info("Disconnected from fal cloud")
+
def _update_spout_sender(self, config: dict):
"""Update Spout output configuration."""
logger.info(f"Spout output config received: {config}")
@@ -491,8 +641,13 @@ def _spout_receiver_loop(self):
if rgb_frame is not None:
last_frame_time = time.time()
- # Convert to tensor and put into first processor's input queue
- if self.pipeline_processors:
+ # Route based on fal mode
+ if self.fal_enabled and self.fal_client:
+ # Convert numpy to av.VideoFrame for WebRTC and route through put()
+ video_frame = VideoFrame.from_ndarray(rgb_frame, format="rgb24")
+ self.put(video_frame)
+ elif self.pipeline_processors:
+ # Local processing: put directly into pipeline
first_processor = self.pipeline_processors[0]
frame_tensor = torch.from_numpy(rgb_frame)
frame_tensor = frame_tensor.unsqueeze(0)
diff --git a/src/scope/server/schema.py b/src/scope/server/schema.py
index fbe7ef3bf..52aff0449 100644
--- a/src/scope/server/schema.py
+++ b/src/scope/server/schema.py
@@ -489,3 +489,26 @@ class AssetsResponse(BaseModel):
"""Response containing all discoverable asset files."""
assets: list[AssetFileInfo]
+
+
+# =============================================================================
+# fal.ai cloud integration schemas
+# =============================================================================
+
+
+class FalConnectRequest(BaseModel):
+ """Request to connect to fal.ai cloud for remote GPU inference."""
+
+ app_id: str = Field(
+ ..., description="The fal app ID (e.g., 'owner/scope-fal/webrtc')"
+ )
+ api_key: str = Field(..., description="The fal API key for authentication")
+
+
+class FalStatusResponse(BaseModel):
+ """Response containing fal.ai cloud connection status."""
+
+ connected: bool = Field(..., description="Whether connected to fal cloud")
+ app_id: str | None = Field(
+ default=None, description="The connected fal app ID (if connected)"
+ )
diff --git a/src/scope/server/tracks.py b/src/scope/server/tracks.py
index 7a49a26a9..6988a4309 100644
--- a/src/scope/server/tracks.py
+++ b/src/scope/server/tracks.py
@@ -53,6 +53,10 @@ async def input_loop(self):
try:
input_frame = await self.track.recv()
+ # Skip frame if frame_processor isn't ready yet (can happen during initialization)
+ if self.frame_processor is None:
+ continue
+
# Store raw VideoFrame for later processing (tracks input FPS internally)
self.frame_processor.put(input_frame)
@@ -92,7 +96,7 @@ async def next_timestamp(self) -> tuple[int, fractions.Fraction]:
return self.timestamp, VIDEO_TIME_BASE
- def initialize_output_processing(self):
+ async def initialize_output_processing(self):
if not self.frame_processor:
self.frame_processor = FrameProcessor(
pipeline_manager=self.pipeline_manager,
@@ -101,6 +105,19 @@ def initialize_output_processing(self):
)
self.frame_processor.start()
+ # Connect to fal cloud if pending connection info exists
+ if (
+ hasattr(self.frame_processor, "_pending_fal_connection")
+ and self.frame_processor._pending_fal_connection
+ ):
+ conn_info = self.frame_processor._pending_fal_connection
+ await self.frame_processor.connect_to_fal(
+ app_id=conn_info["app_id"],
+ api_key=conn_info["api_key"],
+ initial_parameters=self.initial_parameters,
+ )
+ self.frame_processor._pending_fal_connection = None
+
def initialize_input_processing(self, track: MediaStreamTrack):
self.track = track
self.input_task_running = True
@@ -108,8 +125,8 @@ def initialize_input_processing(self, track: MediaStreamTrack):
async def recv(self) -> VideoFrame:
"""Return the next available processed frame"""
- # Lazy initialization on first call
- self.initialize_output_processing()
+ # Lazy initialization on first call (now async)
+ await self.initialize_output_processing()
# Keep running while either WebRTC input is active OR Spout input is enabled
while self.input_task_running or self._spout_receiver_enabled:
diff --git a/src/scope/server/webrtc.py b/src/scope/server/webrtc.py
index d81bce0e5..0ef0a1dc9 100644
--- a/src/scope/server/webrtc.py
+++ b/src/scope/server/webrtc.py
@@ -141,6 +141,24 @@ def __init__(self):
self.sessions: dict[str, Session] = {}
self.rtc_config = create_rtc_config()
self.is_first_track = True
+ # Store fal cloud config for new sessions
+ self._pending_fal_config: dict | None = None
+
+ def set_fal_config(self, app_id: str, api_key: str) -> None:
+ """Store fal credentials for new sessions.
+
+ When cloud mode is enabled, these credentials will be injected
+ into the initial_parameters of new WebRTC sessions.
+ """
+ self._pending_fal_config = {"app_id": app_id, "api_key": api_key}
+
+ def clear_fal_config(self) -> None:
+ """Clear pending fal config."""
+ self._pending_fal_config = None
+
+ def get_fal_config(self) -> dict | None:
+ """Get current fal config."""
+ return self._pending_fal_config
async def handle_offer(
self, request: WebRTCOfferRequest, pipeline_manager: PipelineManager
@@ -165,6 +183,15 @@ async def handle_offer(
)
logger.info(f"Received initial parameters: {initial_parameters}")
+ # Inject fal cloud config if enabled
+ if self._pending_fal_config:
+ initial_parameters["fal_cloud_enabled"] = True
+ initial_parameters["fal_app_id"] = self._pending_fal_config["app_id"]
+ initial_parameters["fal_api_key"] = self._pending_fal_config["api_key"]
+ logger.info(
+ f"Injected fal cloud config for new session: {self._pending_fal_config['app_id']}"
+ )
+
# Create new RTCPeerConnection with configuration
pc = RTCPeerConnection(self.rtc_config)
session = Session(pc)
diff --git a/tests/server/__init__.py b/tests/server/__init__.py
new file mode 100644
index 000000000..8bda5f707
--- /dev/null
+++ b/tests/server/__init__.py
@@ -0,0 +1 @@
+# Server tests
diff --git a/tests/server/test_fal_api.py b/tests/server/test_fal_api.py
new file mode 100644
index 000000000..98a72ec1b
--- /dev/null
+++ b/tests/server/test_fal_api.py
@@ -0,0 +1,221 @@
+"""Tests for fal.ai cloud API endpoints."""
+
+from unittest.mock import AsyncMock, MagicMock
+
+import pytest
+from fastapi.testclient import TestClient
+
+
+class TestFalApiEndpoints:
+ """Tests for fal API endpoints."""
+
+ @pytest.fixture
+ def mock_webrtc_manager(self):
+ """Create a mock WebRTC manager."""
+ manager = MagicMock()
+ manager.sessions = {}
+ # Explicitly set get_fal_config to return None (no pending fal config)
+ manager.get_fal_config.return_value = None
+ return manager
+
+ @pytest.fixture
+ def mock_session_with_fal(self):
+ """Create a mock session with fal-enabled frame processor."""
+ session = MagicMock()
+ frame_processor = MagicMock()
+ frame_processor.fal_enabled = True
+ frame_processor.fal_client = MagicMock()
+ frame_processor.fal_client.app_id = "test/app/webrtc"
+ frame_processor.connect_to_fal = AsyncMock()
+ frame_processor.disconnect_from_fal = AsyncMock()
+ session.video_track = MagicMock()
+ session.video_track.frame_processor = frame_processor
+ return session
+
+ @pytest.fixture
+ def mock_session_without_fal(self):
+ """Create a mock session without fal enabled."""
+ session = MagicMock()
+ frame_processor = MagicMock()
+ frame_processor.fal_enabled = False
+ frame_processor.fal_client = None
+ frame_processor.connect_to_fal = AsyncMock()
+ frame_processor.disconnect_from_fal = AsyncMock()
+ session.video_track = MagicMock()
+ session.video_track.frame_processor = frame_processor
+ return session
+
+ @pytest.fixture
+ def client(self, mock_webrtc_manager):
+ """Create a test client with mocked dependencies."""
+ from scope.server.app import app, get_webrtc_manager
+
+ app.dependency_overrides[get_webrtc_manager] = lambda: mock_webrtc_manager
+ client = TestClient(app)
+ yield client
+ app.dependency_overrides.clear()
+
+ def test_fal_status_not_connected(self, client, mock_webrtc_manager):
+ """Test fal status endpoint when not connected."""
+ response = client.get("/api/v1/fal/status")
+ assert response.status_code == 200
+ data = response.json()
+ assert data["connected"] is False
+ assert data["app_id"] is None
+
+ def test_fal_status_connected(
+ self, client, mock_webrtc_manager, mock_session_with_fal
+ ):
+ """Test fal status endpoint when connected."""
+ mock_webrtc_manager.sessions = {"session1": mock_session_with_fal}
+
+ response = client.get("/api/v1/fal/status")
+ assert response.status_code == 200
+ data = response.json()
+ assert data["connected"] is True
+ assert data["app_id"] == "test/app/webrtc"
+
+ def test_fal_connect_no_sessions(self, client, mock_webrtc_manager):
+ """Test fal connect endpoint with no active sessions."""
+ response = client.post(
+ "/api/v1/fal/connect",
+ json={"app_id": "owner/app/webrtc", "api_key": "test-key"},
+ )
+ assert response.status_code == 200
+ data = response.json()
+ assert data["connected"] is True
+ assert data["app_id"] == "owner/app/webrtc"
+
+ def test_fal_connect_with_sessions(
+ self, client, mock_webrtc_manager, mock_session_without_fal
+ ):
+ """Test fal connect endpoint with active sessions."""
+ mock_webrtc_manager.sessions = {"session1": mock_session_without_fal}
+
+ response = client.post(
+ "/api/v1/fal/connect",
+ json={"app_id": "owner/app/webrtc", "api_key": "test-key"},
+ )
+ assert response.status_code == 200
+ data = response.json()
+ assert data["connected"] is True
+ assert data["app_id"] == "owner/app/webrtc"
+
+ # Verify connect_to_fal was called
+ fp = mock_session_without_fal.video_track.frame_processor
+ fp.connect_to_fal.assert_called_once_with(
+ app_id="owner/app/webrtc",
+ api_key="test-key",
+ )
+
+ def test_fal_connect_validation_error(self, client):
+ """Test fal connect endpoint with invalid request."""
+ # Missing required fields
+ response = client.post(
+ "/api/v1/fal/connect",
+ json={},
+ )
+ assert response.status_code == 422 # Validation error
+
+ def test_fal_disconnect_no_sessions(self, client, mock_webrtc_manager):
+ """Test fal disconnect endpoint with no active sessions."""
+ response = client.post("/api/v1/fal/disconnect")
+ assert response.status_code == 200
+ data = response.json()
+ assert data["connected"] is False
+ assert data["app_id"] is None
+
+ def test_fal_disconnect_with_sessions(
+ self, client, mock_webrtc_manager, mock_session_with_fal
+ ):
+ """Test fal disconnect endpoint with active sessions."""
+ mock_webrtc_manager.sessions = {"session1": mock_session_with_fal}
+
+ response = client.post("/api/v1/fal/disconnect")
+ assert response.status_code == 200
+ data = response.json()
+ assert data["connected"] is False
+ assert data["app_id"] is None
+
+ # Verify disconnect_from_fal was called
+ fp = mock_session_with_fal.video_track.frame_processor
+ fp.disconnect_from_fal.assert_called_once()
+
+ def test_fal_connect_multiple_sessions(self, client, mock_webrtc_manager):
+ """Test fal connect endpoint with multiple active sessions."""
+ # Create multiple sessions
+ sessions = {}
+ for i in range(3):
+ session = MagicMock()
+ frame_processor = MagicMock()
+ frame_processor.connect_to_fal = AsyncMock()
+ session.video_track = MagicMock()
+ session.video_track.frame_processor = frame_processor
+ sessions[f"session{i}"] = session
+
+ mock_webrtc_manager.sessions = sessions
+
+ response = client.post(
+ "/api/v1/fal/connect",
+ json={"app_id": "owner/app/webrtc", "api_key": "test-key"},
+ )
+ assert response.status_code == 200
+
+ # Verify all sessions were connected
+ for session in sessions.values():
+ fp = session.video_track.frame_processor
+ fp.connect_to_fal.assert_called_once_with(
+ app_id="owner/app/webrtc",
+ api_key="test-key",
+ )
+
+ def test_fal_disconnect_multiple_sessions(self, client, mock_webrtc_manager):
+ """Test fal disconnect endpoint with multiple active sessions."""
+ # Create multiple sessions
+ sessions = {}
+ for i in range(3):
+ session = MagicMock()
+ frame_processor = MagicMock()
+ frame_processor.disconnect_from_fal = AsyncMock()
+ session.video_track = MagicMock()
+ session.video_track.frame_processor = frame_processor
+ sessions[f"session{i}"] = session
+
+ mock_webrtc_manager.sessions = sessions
+
+ response = client.post("/api/v1/fal/disconnect")
+ assert response.status_code == 200
+
+ # Verify all sessions were disconnected
+ for session in sessions.values():
+ fp = session.video_track.frame_processor
+ fp.disconnect_from_fal.assert_called_once()
+
+ def test_fal_status_with_session_without_video_track(
+ self, client, mock_webrtc_manager
+ ):
+ """Test fal status handles sessions without video track gracefully."""
+ session = MagicMock()
+ session.video_track = None
+ mock_webrtc_manager.sessions = {"session1": session}
+
+ response = client.get("/api/v1/fal/status")
+ assert response.status_code == 200
+ data = response.json()
+ assert data["connected"] is False
+ assert data["app_id"] is None
+
+ def test_fal_status_with_session_without_frame_processor(
+ self, client, mock_webrtc_manager
+ ):
+ """Test fal status handles sessions without frame processor gracefully."""
+ session = MagicMock()
+ session.video_track = MagicMock()
+ session.video_track.frame_processor = None
+ mock_webrtc_manager.sessions = {"session1": session}
+
+ response = client.get("/api/v1/fal/status")
+ assert response.status_code == 200
+ data = response.json()
+ assert data["connected"] is False
+ assert data["app_id"] is None
diff --git a/tests/server/test_fal_client.py b/tests/server/test_fal_client.py
new file mode 100644
index 000000000..b4c25a0f5
--- /dev/null
+++ b/tests/server/test_fal_client.py
@@ -0,0 +1,290 @@
+"""Tests for FalClient module."""
+
+from unittest.mock import patch
+
+import pytest
+
+
+class MockResponse:
+ """Mock aiohttp response with async context manager support."""
+
+ def __init__(self, ok=True, status=200, json_data=None, text_data=""):
+ self.ok = ok
+ self.status = status
+ self._json_data = json_data
+ self._text_data = text_data
+
+ async def json(self):
+ return self._json_data
+
+ async def text(self):
+ return self._text_data
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, *args):
+ pass
+
+
+class MockSession:
+ """Mock aiohttp ClientSession with async context manager support."""
+
+ def __init__(self, response: MockResponse):
+ self._response = response
+ self.post_calls = []
+
+ def post(self, url, **kwargs):
+ self.post_calls.append((url, kwargs))
+ return self._response
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, *args):
+ pass
+
+
+@pytest.mark.asyncio
+async def test_get_temporary_token_success():
+ """Test successful token acquisition from fal API."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app-name/webrtc", api_key="test-key")
+
+ mock_response = MockResponse(ok=True, json_data={"detail": "test-token"})
+ mock_session = MockSession(mock_response)
+
+ with patch(
+ "scope.server.fal_client.aiohttp.ClientSession", return_value=mock_session
+ ):
+ token = await client._get_temporary_token()
+ assert token == "test-token"
+
+
+@pytest.mark.asyncio
+async def test_get_temporary_token_string_response():
+ """Test token acquisition when API returns plain string."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app-name/webrtc", api_key="test-key")
+
+ mock_response = MockResponse(ok=True, json_data="plain-token-string")
+ mock_session = MockSession(mock_response)
+
+ with patch(
+ "scope.server.fal_client.aiohttp.ClientSession", return_value=mock_session
+ ):
+ token = await client._get_temporary_token()
+ assert token == "plain-token-string"
+
+
+@pytest.mark.asyncio
+async def test_get_temporary_token_failure():
+ """Test token acquisition failure handling."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app-name/webrtc", api_key="bad-key")
+
+ mock_response = MockResponse(ok=False, status=401, text_data="Unauthorized")
+ mock_session = MockSession(mock_response)
+
+ with patch(
+ "scope.server.fal_client.aiohttp.ClientSession", return_value=mock_session
+ ):
+ with pytest.raises(RuntimeError, match="Token request failed"):
+ await client._get_temporary_token()
+
+
+@pytest.mark.asyncio
+async def test_get_temporary_token_extracts_alias():
+ """Test that alias is correctly extracted from app_id."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/my-cool-app/webrtc", api_key="test-key")
+
+ mock_response = MockResponse(ok=True, json_data={"detail": "token"})
+ mock_session = MockSession(mock_response)
+
+ with patch(
+ "scope.server.fal_client.aiohttp.ClientSession", return_value=mock_session
+ ):
+ await client._get_temporary_token()
+
+ # Verify the alias was extracted correctly
+ assert len(mock_session.post_calls) == 1
+ _, kwargs = mock_session.post_calls[0]
+ assert kwargs["json"]["allowed_apps"] == ["my-cool-app"]
+
+
+def test_build_ws_url():
+ """Test WebSocket URL construction."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app-name/webrtc", api_key="test-key")
+ url = client._build_ws_url("my-token")
+ assert url == "wss://fal.run/owner/app-name/webrtc?fal_jwt_token=my-token"
+
+
+def test_build_ws_url_strips_slashes():
+ """Test URL construction handles leading/trailing slashes."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="/owner/app-name/webrtc/", api_key="test-key")
+ url = client._build_ws_url("my-token")
+ assert url == "wss://fal.run/owner/app-name/webrtc?fal_jwt_token=my-token"
+
+
+def test_fal_client_initialization():
+ """Test FalClient initializes with correct default state."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app/webrtc", api_key="test-key")
+
+ assert client.app_id == "owner/app/webrtc"
+ assert client.api_key == "test-key"
+ assert client.on_frame_received is None
+ assert client.ws is None
+ assert client.pc is None
+ assert client.output_track is None
+ assert not client.stop_event.is_set()
+
+
+def test_fal_client_with_callback():
+ """Test FalClient initializes with frame callback."""
+ from scope.server.fal_client import FalClient
+
+ callback = lambda frame: None # noqa: E731
+ client = FalClient(
+ app_id="owner/app/webrtc", api_key="test-key", on_frame_received=callback
+ )
+
+ assert client.on_frame_received is callback
+
+
+@pytest.mark.asyncio
+async def test_disconnect_when_not_connected():
+ """Test disconnect works cleanly when not connected."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app/webrtc", api_key="test-key")
+
+ # Should not raise any exceptions
+ await client.disconnect()
+
+ assert client.stop_event.is_set()
+ assert client.pc is None
+ assert client.ws is None
+
+
+def test_fal_client_initialization_includes_data_channel_attrs():
+ """Test FalClient initializes with data channel attributes."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app/webrtc", api_key="test-key")
+
+ assert client.data_channel is None
+ assert client._pending_parameters == {}
+
+
+def test_send_parameters_queues_when_channel_closed():
+ """Test parameters are queued when data channel is not open."""
+
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app/webrtc", api_key="test-key")
+
+ # No data channel (not connected)
+ client.data_channel = None
+
+ result = client.send_parameters({"prompt": "test prompt", "noise_scale": 0.5})
+
+ assert result is False
+ assert client._pending_parameters == {"prompt": "test prompt", "noise_scale": 0.5}
+
+
+def test_send_parameters_queues_when_channel_not_open():
+ """Test parameters are queued when data channel exists but not open."""
+ from unittest.mock import MagicMock
+
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app/webrtc", api_key="test-key")
+
+ # Data channel exists but not open
+ mock_channel = MagicMock()
+ mock_channel.readyState = "connecting"
+ client.data_channel = mock_channel
+
+ result = client.send_parameters({"prompt": "test prompt"})
+
+ assert result is False
+ assert client._pending_parameters == {"prompt": "test prompt"}
+
+
+def test_send_parameters_sends_when_channel_open():
+ """Test parameters are sent when data channel is open."""
+ from unittest.mock import MagicMock
+
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app/webrtc", api_key="test-key")
+
+ # Data channel is open
+ mock_channel = MagicMock()
+ mock_channel.readyState = "open"
+ mock_channel.send = MagicMock()
+ client.data_channel = mock_channel
+
+ result = client.send_parameters({"prompt": "test prompt", "noise_scale": 0.5})
+
+ assert result is True
+ mock_channel.send.assert_called_once()
+ # Verify JSON contains the parameters
+ sent_json = mock_channel.send.call_args[0][0]
+ import json
+
+ sent_data = json.loads(sent_json)
+ assert sent_data == {"prompt": "test prompt", "noise_scale": 0.5}
+
+
+def test_send_parameters_filters_none_values():
+ """Test parameters with None values are filtered out."""
+ from unittest.mock import MagicMock
+
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app/webrtc", api_key="test-key")
+
+ # Data channel is open
+ mock_channel = MagicMock()
+ mock_channel.readyState = "open"
+ mock_channel.send = MagicMock()
+ client.data_channel = mock_channel
+
+ result = client.send_parameters(
+ {"prompt": "test", "noise_scale": None, "denoising_steps": 5}
+ )
+
+ assert result is True
+ # Verify None values are filtered
+ sent_json = mock_channel.send.call_args[0][0]
+ import json
+
+ sent_data = json.loads(sent_json)
+ assert sent_data == {"prompt": "test", "denoising_steps": 5}
+ assert "noise_scale" not in sent_data
+
+
+def test_send_parameters_accumulates_pending():
+ """Test multiple send_parameters calls accumulate pending parameters."""
+ from scope.server.fal_client import FalClient
+
+ client = FalClient(app_id="owner/app/webrtc", api_key="test-key")
+ client.data_channel = None # Not connected
+
+ client.send_parameters({"prompt": "first"})
+ client.send_parameters({"noise_scale": 0.5})
+ client.send_parameters({"prompt": "second"}) # Should override first
+
+ assert client._pending_parameters == {"prompt": "second", "noise_scale": 0.5}
diff --git a/tests/server/test_fal_tracks.py b/tests/server/test_fal_tracks.py
new file mode 100644
index 000000000..d1ea89abd
--- /dev/null
+++ b/tests/server/test_fal_tracks.py
@@ -0,0 +1,277 @@
+"""Tests for fal tracks module."""
+
+import asyncio
+import fractions
+from unittest.mock import MagicMock
+
+import pytest
+
+from scope.server.fal_tracks import FalInputTrack, FalOutputTrack
+
+
+class TestFalOutputTrack:
+ """Tests for FalOutputTrack class."""
+
+ def test_initialization(self):
+ """Test FalOutputTrack initializes with correct defaults."""
+ track = FalOutputTrack()
+
+ assert track.kind == "video"
+ assert track.target_fps == 30
+ assert track._frame_count == 0
+ assert track.frame_queue.maxsize == 30
+
+ def test_initialization_custom_fps(self):
+ """Test FalOutputTrack with custom FPS."""
+ track = FalOutputTrack(target_fps=60)
+
+ assert track.target_fps == 60
+ assert track.frame_queue.maxsize == 30
+
+ @pytest.mark.asyncio
+ async def test_recv_returns_frame_with_pts(self):
+ """Test recv() returns frame with correct pts and time_base."""
+ track = FalOutputTrack(target_fps=30)
+
+ # Create mock frame
+ mock_frame = MagicMock()
+ mock_frame.pts = None
+ mock_frame.time_base = None
+
+ # Put frame in queue
+ await track.frame_queue.put(mock_frame)
+
+ # Receive frame
+ result = await track.recv()
+
+ assert result is mock_frame
+ assert result.pts == 1
+ assert result.time_base == fractions.Fraction(1, 30)
+
+ @pytest.mark.asyncio
+ async def test_recv_increments_frame_count(self):
+ """Test recv() increments frame count with each call."""
+ track = FalOutputTrack()
+
+ for i in range(3):
+ mock_frame = MagicMock()
+ await track.frame_queue.put(mock_frame)
+ result = await track.recv()
+ assert result.pts == i + 1
+
+ assert track._frame_count == 3
+
+ @pytest.mark.asyncio
+ async def test_put_frame_success(self):
+ """Test put_frame() successfully queues frame."""
+ track = FalOutputTrack()
+ mock_frame = MagicMock()
+
+ result = await track.put_frame(mock_frame)
+
+ assert result is True
+ assert track.frame_queue.qsize() == 1
+
+ @pytest.mark.asyncio
+ async def test_put_frame_drops_oldest_when_full(self):
+ """Test put_frame() drops oldest frame when queue is full."""
+ track = FalOutputTrack()
+ track.frame_queue = asyncio.Queue(maxsize=2)
+
+ frame1 = MagicMock(name="frame1")
+ frame2 = MagicMock(name="frame2")
+ frame3 = MagicMock(name="frame3")
+
+ await track.put_frame(frame1)
+ await track.put_frame(frame2)
+ # Queue is now full, frame3 should replace frame1
+ result = await track.put_frame(frame3)
+
+ assert result is True
+ assert track.frame_queue.qsize() == 2
+
+ # First frame out should be frame2 (frame1 was dropped)
+ out1 = await track.frame_queue.get()
+ out2 = await track.frame_queue.get()
+ assert out1 is frame2
+ assert out2 is frame3
+
+ def test_put_frame_nowait_success(self):
+ """Test put_frame_nowait() successfully queues frame."""
+ track = FalOutputTrack()
+ mock_frame = MagicMock()
+
+ result = track.put_frame_nowait(mock_frame)
+
+ assert result is True
+ assert track.frame_queue.qsize() == 1
+
+ def test_put_frame_nowait_returns_false_when_full(self):
+ """Test put_frame_nowait() returns False when queue is full."""
+ track = FalOutputTrack()
+ track.frame_queue = asyncio.Queue(maxsize=1)
+
+ frame1 = MagicMock()
+ frame2 = MagicMock()
+
+ track.put_frame_nowait(frame1)
+ result = track.put_frame_nowait(frame2)
+
+ assert result is False
+ assert track.frame_queue.qsize() == 1
+
+ def test_put_frame_sync_calls_nowait(self):
+ """Test put_frame_sync() uses put_frame_nowait()."""
+ track = FalOutputTrack()
+ mock_frame = MagicMock()
+
+ result = track.put_frame_sync(mock_frame)
+
+ assert result is True
+ assert track.frame_queue.qsize() == 1
+
+
+class TestFalInputTrack:
+ """Tests for FalInputTrack class."""
+
+ def test_initialization(self):
+ """Test FalInputTrack initializes correctly."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+
+ assert track.kind == "video"
+ assert track.source_track is mock_source
+ assert track.frame_queue.maxsize == 30
+ assert track._consume_task is None
+
+ def test_start_consuming_creates_task(self):
+ """Test start_consuming() creates asyncio task."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+
+ # Mock asyncio.create_task
+ with pytest.MonkeyPatch.context() as mp:
+ mock_task = MagicMock()
+ mp.setattr(asyncio, "create_task", lambda coro: mock_task)
+ track.start_consuming()
+
+ assert track._consume_task is mock_task
+
+ @pytest.mark.asyncio
+ async def test_recv_returns_frame_from_queue(self):
+ """Test recv() returns frame from queue."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+
+ mock_frame = MagicMock()
+ await track.frame_queue.put(mock_frame)
+
+ result = await track.recv()
+
+ assert result is mock_frame
+
+ def test_get_frame_nowait_returns_frame(self):
+ """Test get_frame_nowait() returns frame when available."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+
+ mock_frame = MagicMock()
+ track.frame_queue.put_nowait(mock_frame)
+
+ result = track.get_frame_nowait()
+
+ assert result is mock_frame
+
+ def test_get_frame_nowait_returns_none_when_empty(self):
+ """Test get_frame_nowait() returns None when queue is empty."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+
+ result = track.get_frame_nowait()
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_stop_cancels_consume_task(self):
+ """Test stop() cancels the consume task."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+
+ # Create a real task that can be cancelled
+ async def dummy_loop():
+ while True:
+ await asyncio.sleep(1)
+
+ task = asyncio.create_task(dummy_loop())
+ track._consume_task = task
+
+ await track.stop()
+
+ assert task.cancelled()
+
+ @pytest.mark.asyncio
+ async def test_stop_handles_no_task(self):
+ """Test stop() handles case when no task exists."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+
+ # Should not raise
+ await track.stop()
+
+ assert track._consume_task is None
+
+ @pytest.mark.asyncio
+ async def test_consume_loop_queues_frames(self):
+ """Test _consume_loop() receives and queues frames."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+
+ frames = [MagicMock(name=f"frame{i}") for i in range(3)]
+ frame_iter = iter(frames)
+
+ async def mock_recv():
+ try:
+ return next(frame_iter)
+ except StopIteration:
+ raise Exception("End of frames")
+
+ mock_source.recv = mock_recv
+
+ # Start consuming
+ track.start_consuming()
+
+ # Wait for frames to be consumed
+ await asyncio.sleep(0.1)
+
+ # Stop consuming
+ await track.stop()
+
+ # Check frames were queued (may not get all due to timing)
+ assert track.frame_queue.qsize() > 0
+
+ @pytest.mark.asyncio
+ async def test_consume_loop_drops_oldest_when_full(self):
+ """Test _consume_loop() drops oldest frame when queue is full."""
+ mock_source = MagicMock()
+ track = FalInputTrack(mock_source)
+ track.frame_queue = asyncio.Queue(maxsize=2)
+
+ frames = [MagicMock(name=f"frame{i}") for i in range(5)]
+ frame_index = 0
+
+ async def mock_recv():
+ nonlocal frame_index
+ if frame_index < len(frames):
+ frame = frames[frame_index]
+ frame_index += 1
+ return frame
+ raise Exception("End of frames")
+
+ mock_source.recv = mock_recv
+
+ track.start_consuming()
+ await asyncio.sleep(0.1)
+ await track.stop()
+
+ # Queue should have at most 2 frames (maxsize)
+ assert track.frame_queue.qsize() <= 2
diff --git a/tests/server/test_frame_processor_fal.py b/tests/server/test_frame_processor_fal.py
new file mode 100644
index 000000000..cce00b321
--- /dev/null
+++ b/tests/server/test_frame_processor_fal.py
@@ -0,0 +1,548 @@
+"""Tests for FrameProcessor fal.ai cloud integration."""
+
+import queue
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+
+class TestFrameProcessorFalAttributes:
+ """Tests for fal integration attributes."""
+
+ def test_fal_attributes_initialized(self):
+ """Test that fal attributes are properly initialized."""
+ from scope.server.frame_processor import FrameProcessor
+
+ # Create a mock pipeline manager
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+
+ # Check fal attributes exist and have correct initial values
+ assert processor.fal_client is None
+ assert processor.fal_enabled is False
+ assert isinstance(processor._fal_received_frames, queue.Queue)
+ assert processor._fal_received_frames.maxsize == 30
+
+
+class TestFrameProcessorFalCallback:
+ """Tests for _on_fal_frame_received callback."""
+
+ def test_on_fal_frame_received_queues_frame(self):
+ """Test that received frames are queued."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+
+ mock_frame = MagicMock()
+ processor._on_fal_frame_received(mock_frame)
+
+ assert processor._fal_received_frames.qsize() == 1
+ assert processor._fal_received_frames.get_nowait() is mock_frame
+
+ def test_on_fal_frame_received_drops_oldest_when_full(self):
+ """Test that oldest frame is dropped when queue is full."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+
+ # Fill the queue
+ frames = []
+ for i in range(30):
+ frame = MagicMock()
+ frame.id = i
+ frames.append(frame)
+ processor._on_fal_frame_received(frame)
+
+ assert processor._fal_received_frames.qsize() == 30
+
+ # Add one more frame
+ new_frame = MagicMock()
+ new_frame.id = 99
+ processor._on_fal_frame_received(new_frame)
+
+ # Queue should still be at max size
+ assert processor._fal_received_frames.qsize() == 30
+
+ # First frame should have been dropped
+ first = processor._fal_received_frames.get_nowait()
+ assert first.id == 1 # Frame 0 was dropped
+
+
+class TestFrameProcessorFalConnection:
+ """Tests for connect_to_fal and disconnect_from_fal."""
+
+ @pytest.mark.asyncio
+ async def test_connect_to_fal(self):
+ """Test fal connection initialization."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+
+ mock_client = AsyncMock()
+
+ # Patch where FalClient is imported (inside connect_to_fal)
+ with patch(
+ "scope.server.fal_client.FalClient", return_value=mock_client
+ ) as MockFalClient:
+ await processor.connect_to_fal(
+ app_id="owner/app/webrtc", api_key="test-key"
+ )
+
+ # Check FalClient was created with correct arguments
+ MockFalClient.assert_called_once_with(
+ app_id="owner/app/webrtc",
+ api_key="test-key",
+ on_frame_received=processor._on_fal_frame_received,
+ )
+
+ # Check connect was called
+ mock_client.connect.assert_called_once()
+
+ # Check state
+ assert processor.fal_enabled is True
+ assert processor.fal_client is mock_client
+
+ @pytest.mark.asyncio
+ async def test_connect_to_fal_disconnects_existing(self):
+ """Test that connecting disconnects any existing connection."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+
+ # Set up existing connection
+ old_client = AsyncMock()
+ processor.fal_client = old_client
+ processor.fal_enabled = True
+
+ new_client = AsyncMock()
+
+ # Patch where FalClient is imported (inside connect_to_fal)
+ with patch("scope.server.fal_client.FalClient", return_value=new_client):
+ await processor.connect_to_fal(
+ app_id="owner/app/webrtc", api_key="test-key"
+ )
+
+ # Old client should have been disconnected
+ old_client.disconnect.assert_called_once()
+
+ # New client should be connected
+ assert processor.fal_client is new_client
+ assert processor.fal_enabled is True
+
+ @pytest.mark.asyncio
+ async def test_disconnect_from_fal(self):
+ """Test fal disconnection."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+
+ mock_client = AsyncMock()
+ processor.fal_client = mock_client
+ processor.fal_enabled = True
+
+ # Add some frames to the queue
+ for _ in range(5):
+ processor._fal_received_frames.put_nowait(MagicMock())
+
+ await processor.disconnect_from_fal()
+
+ # Check client was disconnected
+ mock_client.disconnect.assert_called_once()
+
+ # Check state
+ assert processor.fal_client is None
+ assert processor.fal_enabled is False
+ assert processor._fal_received_frames.empty()
+
+ @pytest.mark.asyncio
+ async def test_disconnect_from_fal_when_not_connected(self):
+ """Test disconnect works when not connected."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+
+ # Should not raise any exceptions
+ await processor.disconnect_from_fal()
+
+ assert processor.fal_client is None
+ assert processor.fal_enabled is False
+
+
+class TestFrameProcessorFalRouting:
+ """Tests for frame routing to/from fal."""
+
+ def test_put_routes_to_fal_when_enabled(self):
+ """Test that put() routes frames to fal when enabled."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+
+ # Set up fal client with output track
+ mock_output_track = MagicMock()
+ mock_output_track.put_frame_nowait = MagicMock(return_value=True)
+ mock_client = MagicMock()
+ mock_client.output_track = mock_output_track
+
+ processor.fal_client = mock_client
+ processor.fal_enabled = True
+
+ mock_frame = MagicMock()
+ result = processor.put(mock_frame)
+
+ # Should route to fal
+ mock_output_track.put_frame_nowait.assert_called_once_with(mock_frame)
+ assert result is True
+
+ def test_put_routes_to_local_when_fal_disabled(self):
+ """Test that put() routes to local processing when fal disabled."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+ processor.fal_enabled = False
+
+ # Set up local pipeline processor
+ mock_pipeline_processor = MagicMock()
+ mock_pipeline_processor.input_queue = MagicMock()
+ mock_pipeline_processor.input_queue.put_nowait = MagicMock()
+ processor.pipeline_processors = [mock_pipeline_processor]
+
+ # Create a mock frame with to_ndarray
+ mock_frame = MagicMock()
+ mock_frame.to_ndarray = MagicMock(return_value=MagicMock())
+
+ with patch("scope.server.frame_processor.torch") as mock_torch:
+ mock_tensor = MagicMock()
+ mock_tensor.unsqueeze = MagicMock(return_value=mock_tensor)
+ mock_torch.from_numpy = MagicMock(return_value=mock_tensor)
+
+ result = processor.put(mock_frame)
+
+ # Should route to local pipeline
+ mock_pipeline_processor.input_queue.put_nowait.assert_called_once()
+ assert result is True
+
+ def test_get_returns_from_fal_when_enabled(self):
+ """Test that get() returns frames from fal queue when enabled."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+ processor.fal_enabled = True
+
+ # Add a mock frame to the fal received queue
+ mock_fal_frame = MagicMock()
+ mock_fal_frame.to_ndarray = MagicMock(return_value=MagicMock())
+ processor._fal_received_frames.put_nowait(mock_fal_frame)
+
+ with patch("scope.server.frame_processor.torch") as mock_torch:
+ mock_tensor = MagicMock()
+ mock_torch.from_numpy = MagicMock(return_value=mock_tensor)
+
+ result = processor.get()
+
+ # Should return frame from fal queue
+ mock_fal_frame.to_ndarray.assert_called_once_with(format="rgb24")
+ assert result is mock_tensor
+
+ def test_get_returns_none_when_fal_queue_empty(self):
+ """Test that get() returns None when fal queue is empty."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+ processor.fal_enabled = True
+
+ # Queue is empty
+ result = processor.get()
+
+ assert result is None
+
+ def test_get_returns_from_local_when_fal_disabled(self):
+ """Test that get() returns from local pipeline when fal disabled."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+ processor.fal_enabled = False
+
+ # Set up local pipeline processor with output queue
+ mock_frame = MagicMock()
+ mock_frame.squeeze = MagicMock(return_value=mock_frame)
+ mock_frame.cpu = MagicMock(return_value=mock_frame)
+
+ mock_output_queue = queue.Queue()
+ mock_output_queue.put(mock_frame)
+
+ mock_pipeline_processor = MagicMock()
+ mock_pipeline_processor.output_queue = mock_output_queue
+ processor.pipeline_processors = [mock_pipeline_processor]
+
+ result = processor.get()
+
+ # Should return frame from local pipeline
+ assert result is mock_frame
+ mock_frame.squeeze.assert_called_once_with(0)
+ mock_frame.cpu.assert_called_once()
+
+
+class TestFrameProcessorFalStop:
+ """Tests for stop() with fal cleanup."""
+
+ def test_stop_clears_fal_state(self):
+ """Test that stop() clears fal state."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+
+ # Set up fal state
+ mock_client = MagicMock()
+ processor.fal_client = mock_client
+ processor.fal_enabled = True
+
+ # Add some frames to queue
+ for _ in range(3):
+ processor._fal_received_frames.put_nowait(MagicMock())
+
+ processor.stop()
+
+ # Check fal state is cleared
+ assert processor.fal_client is None
+ assert processor.fal_enabled is False
+ assert processor._fal_received_frames.empty()
+
+ @pytest.mark.asyncio
+ async def test_stop_async_disconnects_fal(self):
+ """Test that stop_async() properly disconnects from fal."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+
+ # Set up fal client
+ mock_client = AsyncMock()
+ processor.fal_client = mock_client
+ processor.fal_enabled = True
+
+ await processor.stop_async()
+
+ # Check disconnect was called
+ mock_client.disconnect.assert_called_once()
+
+ # Check state is cleared
+ assert processor.fal_client is None
+ assert processor.fal_enabled is False
+ assert not processor.running
+
+
+class TestFrameProcessorSpoutFalRouting:
+ """Tests for Spout receiver routing to fal."""
+
+ def test_spout_receiver_routes_to_fal_when_enabled(self):
+ """Test that Spout frames route through put() when fal is enabled."""
+ from unittest.mock import patch
+
+ import numpy as np
+
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+ processor.spout_receiver_enabled = True
+
+ # Set up fal client
+ mock_output_track = MagicMock()
+ mock_output_track.put_frame_nowait = MagicMock(return_value=True)
+ mock_client = MagicMock()
+ mock_client.output_track = mock_output_track
+ processor.fal_client = mock_client
+ processor.fal_enabled = True
+
+ # Create a mock Spout receiver
+ mock_spout_receiver = MagicMock()
+ # Return a frame once, then None to exit loop
+ test_frame = np.zeros((480, 640, 3), dtype=np.uint8)
+ mock_spout_receiver.receive = MagicMock(side_effect=[test_frame, None])
+ processor.spout_receiver = mock_spout_receiver
+
+ # Mock VideoFrame.from_ndarray
+ with patch("scope.server.frame_processor.VideoFrame") as MockVideoFrame:
+ mock_video_frame = MagicMock()
+ MockVideoFrame.from_ndarray = MagicMock(return_value=mock_video_frame)
+
+ # Run one iteration of the loop manually
+ # We can't easily test the thread loop, so test the routing logic directly
+ rgb_frame = mock_spout_receiver.receive()
+ if processor.fal_enabled and processor.fal_client:
+ from av import VideoFrame
+
+ video_frame = VideoFrame.from_ndarray(rgb_frame, format="rgb24")
+ result = processor.put(video_frame)
+
+ # Verify frame was routed to fal
+ mock_output_track.put_frame_nowait.assert_called_once()
+
+ def test_spout_receiver_routes_to_local_when_fal_disabled(self):
+ """Test that Spout frames go to local pipeline when fal is disabled."""
+ import numpy as np
+
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+ processor.spout_receiver_enabled = True
+ processor.fal_enabled = False # fal disabled
+
+ # Set up local pipeline processor
+ mock_input_queue = MagicMock()
+ mock_pipeline_processor = MagicMock()
+ mock_pipeline_processor.input_queue = mock_input_queue
+ processor.pipeline_processors = [mock_pipeline_processor]
+
+ # Simulate what _spout_receiver_loop does when fal is disabled
+ rgb_frame = np.zeros((480, 640, 3), dtype=np.uint8)
+
+ # This is the logic from _spout_receiver_loop when fal is disabled
+ if not (processor.fal_enabled and processor.fal_client):
+ if processor.pipeline_processors:
+ import torch
+
+ first_processor = processor.pipeline_processors[0]
+ frame_tensor = torch.from_numpy(rgb_frame)
+ frame_tensor = frame_tensor.unsqueeze(0)
+ first_processor.input_queue.put_nowait(frame_tensor)
+
+ # Verify frame was put into local pipeline
+ mock_input_queue.put_nowait.assert_called_once()
+
+
+class TestFrameProcessorParameterRouting:
+ """Tests for parameter routing to fal vs local pipelines."""
+
+ def test_update_parameters_routes_to_fal_when_enabled(self):
+ """Test that parameters are forwarded to fal when cloud mode enabled."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+
+ # Set up fal client with mocked send_parameters
+ mock_client = MagicMock()
+ mock_client.send_parameters = MagicMock(return_value=True)
+ processor.fal_client = mock_client
+ processor.fal_enabled = True
+
+ # Set up local pipeline processor (should NOT be called)
+ mock_pipeline_processor = MagicMock()
+ processor.pipeline_processors = [mock_pipeline_processor]
+
+ # Send parameters
+ processor.update_parameters({"prompts": ["test prompt"], "noise_scale": 0.5})
+
+ # Should route to fal
+ mock_client.send_parameters.assert_called_once_with(
+ {"prompts": ["test prompt"], "noise_scale": 0.5}
+ )
+ # Local pipeline should NOT receive parameters
+ mock_pipeline_processor.update_parameters.assert_not_called()
+
+ def test_update_parameters_routes_to_local_when_fal_disabled(self):
+ """Test that parameters go to local pipelines when cloud mode disabled."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+ processor.fal_enabled = False
+ processor.fal_client = None
+
+ # Set up local pipeline processor
+ mock_pipeline_processor = MagicMock()
+ processor.pipeline_processors = [mock_pipeline_processor]
+
+ # Send parameters
+ processor.update_parameters({"prompts": ["test prompt"], "noise_scale": 0.5})
+
+ # Should route to local pipeline
+ mock_pipeline_processor.update_parameters.assert_called_once_with(
+ {"prompts": ["test prompt"], "noise_scale": 0.5}
+ )
+
+ def test_spout_params_stay_local_when_fal_enabled(self):
+ """Test that Spout parameters are always handled locally."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+
+ # Set up fal client
+ mock_client = MagicMock()
+ mock_client.send_parameters = MagicMock(return_value=True)
+ processor.fal_client = mock_client
+ processor.fal_enabled = True
+
+ # Mock _update_spout_sender and _update_spout_receiver
+ processor._update_spout_sender = MagicMock()
+ processor._update_spout_receiver = MagicMock()
+
+ # Send mixed parameters (Spout + pipeline params)
+ processor.update_parameters(
+ {
+ "spout_sender": {"enabled": True, "name": "TestSender"},
+ "spout_receiver": {"enabled": True, "name": "TestReceiver"},
+ "prompts": ["test prompt"],
+ }
+ )
+
+ # Spout params should be handled locally
+ processor._update_spout_sender.assert_called_once_with(
+ {"enabled": True, "name": "TestSender"}
+ )
+ processor._update_spout_receiver.assert_called_once_with(
+ {"enabled": True, "name": "TestReceiver"}
+ )
+
+ # Only non-Spout params should be forwarded to fal
+ mock_client.send_parameters.assert_called_once_with(
+ {"prompts": ["test prompt"]}
+ )
+
+ def test_parameters_stored_locally_regardless_of_mode(self):
+ """Test that parameters are always stored locally for state tracking."""
+ from scope.server.frame_processor import FrameProcessor
+
+ mock_pm = MagicMock()
+ processor = FrameProcessor(pipeline_manager=mock_pm)
+ processor.running = True
+
+ # Set up fal client
+ mock_client = MagicMock()
+ mock_client.send_parameters = MagicMock(return_value=True)
+ processor.fal_client = mock_client
+ processor.fal_enabled = True
+
+ # Send parameters
+ processor.update_parameters({"prompts": ["test"], "noise_scale": 0.5})
+
+ # Parameters should be stored locally
+ assert processor.parameters["prompts"] == ["test"]
+ assert processor.parameters["noise_scale"] == 0.5
diff --git a/uv.lock b/uv.lock
index 032306e65..7822cc084 100644
--- a/uv.lock
+++ b/uv.lock
@@ -484,6 +484,7 @@ version = "0.1.0b3"
source = { editable = "." }
dependencies = [
{ name = "accelerate" },
+ { name = "aiohttp" },
{ name = "aiortc" },
{ name = "click" },
{ name = "diffusers" },
@@ -516,6 +517,7 @@ dependencies = [
{ name = "triton-windows", marker = "sys_platform == 'win32'" },
{ name = "twilio" },
{ name = "uvicorn" },
+ { name = "websockets" },
]
[package.dev-dependencies]
@@ -525,6 +527,7 @@ dev = [
{ name = "imageio-ffmpeg" },
{ name = "pre-commit" },
{ name = "pytest" },
+ { name = "pytest-asyncio" },
{ name = "ruff" },
{ name = "twine" },
]
@@ -532,6 +535,7 @@ dev = [
[package.metadata]
requires-dist = [
{ name = "accelerate", specifier = ">=1.1.1" },
+ { name = "aiohttp", specifier = ">=3.9.0" },
{ name = "aiortc", specifier = ">=1.13.0" },
{ name = "click", specifier = ">=8.3.1" },
{ name = "diffusers", specifier = ">=0.31.0" },
@@ -563,6 +567,7 @@ requires-dist = [
{ name = "triton-windows", marker = "sys_platform == 'win32'", specifier = "==3.5.1.post24" },
{ name = "twilio", specifier = ">=9.8.0" },
{ name = "uvicorn", specifier = ">=0.35.0" },
+ { name = "websockets", specifier = ">=12.0" },
]
[package.metadata.requires-dev]
@@ -572,6 +577,7 @@ dev = [
{ name = "imageio-ffmpeg", specifier = ">=0.6.0" },
{ name = "pre-commit", specifier = ">=4.0.0" },
{ name = "pytest", specifier = ">=8.4.2" },
+ { name = "pytest-asyncio", specifier = ">=0.24.0" },
{ name = "ruff", specifier = ">=0.8.0" },
{ name = "twine", specifier = ">=5.0.0" },
]
@@ -2051,6 +2057,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
]
+[[package]]
+name = "pytest-asyncio"
+version = "1.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pytest" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" },
+]
+
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -2483,21 +2502,21 @@ dependencies = [
{ name = "typing-extensions", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
]
wheels = [
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp312-cp312-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp312-cp312-manylinux_2_28_x86_64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp312-cp312-win_amd64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313-manylinux_2_28_x86_64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313-win_amd64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313t-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313t-manylinux_2_28_x86_64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313t-win_amd64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314-manylinux_2_28_x86_64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314-win_amd64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314t-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314t-manylinux_2_28_x86_64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314t-win_amd64.whl" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:1176f250311fa95cc3bca8077af323e0d73ea385ba266e096af82e7e2b91f256" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7cb4018f4ce68b61fd3ef87dc1c4ca520731c7b5b200e360ad47b612d7844063" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp312-cp312-win_amd64.whl", hash = "sha256:3a01f0b64c10a82d444d9fd06b3e8c567b1158b76b2764b8f51bfd8f535064b0" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:0b80b7555dcd0a75b7b06016991f01281a0bb078cf28fa2d1dfb949fad2fbd07" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:63381a109a569b280ed3319da89d3afe5cf9ab5c879936382a212affb5c90552" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313-win_amd64.whl", hash = "sha256:ad9183864acdd99fc5143d7ca9d3d2e7ddfc9a9600ff43217825d4e5e9855ccc" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:2314521c74d76e513c53bb72c0ce3511ef0295ff657a432790df6c207e5d7962" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4454a4faca31af81566e3a4208f10f20b8a6d9cfe42791b0ca7ff134326468fc" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp313-cp313t-win_amd64.whl", hash = "sha256:24420e430e77136f7079354134b34e7ba9d87e539f5ac84c33b08e5c13412ebe" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:32c036296c557f19a1537ce981c40533650097114e1720a321a39a3b08d9df56" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:7788d3d03d939cf00f93ac0da5ab520846f66411e339cfbf519a806e8facf519" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314-win_amd64.whl", hash = "sha256:7bcd40cbffac475b478d6ce812f03da84e9a4894956efb89c3b7bcca5dbd4f91" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:e88c78e5b08ae9303aa15da43b68b44287ecbec16d898d9fad6998832fe626a5" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7d8769bdf3200ca16a92f14df404c3370171ac3732996528a8973d753eac562f" },
+ { url = "https://download.pytorch.org/whl/cu128/torch-2.9.1%2Bcu128-cp314-cp314t-win_amd64.whl", hash = "sha256:0c784b600959ec70ee01cb23e8bc870a0e0475af30378ff5e39f4abed8b7c1cc" },
]
[[package]]
@@ -2522,11 +2541,11 @@ dependencies = [
{ name = "torch", version = "2.9.1+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "python_full_version < '3.15' and platform_machine == 'aarch64' and platform_python_implementation == 'CPython' and sys_platform == 'linux'" },
]
wheels = [
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp312-cp312-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp313-cp313-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp313-cp313t-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp314-cp314-manylinux_2_28_aarch64.whl" },
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp314-cp314t-manylinux_2_28_aarch64.whl" },
+ { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:bd33a7cc32122bc92919f95ea0e7bf73588e71be0ca2c5cad8fb7eebd333e8dd" },
+ { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:7695d95e4e4c25fe1af3b880ffcd2dbcaa43cce7fd7edbe0157305b837c1dcf8" },
+ { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:9db0306f8eec7dc11745044c78dc49a80b84cc0935e36575677cdc2bce9be23c" },
+ { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:68c8c884e7730146b7915d863526e8f32194532629ecc64da865242d35f417c0" },
+ { url = "https://download.pytorch.org/whl/cu128/torchvision-0.24.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:932dcfe6718f1306b6844477939d18c9102e678cdaffc13da9c3a1841d57ddde" },
]
[[package]]
@@ -2736,6 +2755,51 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" },
]
+[[package]]
+name = "websockets"
+version = "16.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365, upload-time = "2026-01-10T09:22:46.787Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038, upload-time = "2026-01-10T09:22:47.999Z" },
+ { url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328, upload-time = "2026-01-10T09:22:49.809Z" },
+ { url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915, upload-time = "2026-01-10T09:22:51.071Z" },
+ { url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152, upload-time = "2026-01-10T09:22:52.224Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583, upload-time = "2026-01-10T09:22:53.443Z" },
+ { url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880, upload-time = "2026-01-10T09:22:55.033Z" },
+ { url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261, upload-time = "2026-01-10T09:22:56.251Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693, upload-time = "2026-01-10T09:22:57.478Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364, upload-time = "2026-01-10T09:22:59.333Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039, upload-time = "2026-01-10T09:23:01.171Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323, upload-time = "2026-01-10T09:23:02.341Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975, upload-time = "2026-01-10T09:23:03.756Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203, upload-time = "2026-01-10T09:23:05.01Z" },
+ { url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653, upload-time = "2026-01-10T09:23:06.301Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920, upload-time = "2026-01-10T09:23:07.492Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255, upload-time = "2026-01-10T09:23:09.245Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689, upload-time = "2026-01-10T09:23:10.483Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" },
+ { url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" },
+ { url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" },
+ { url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" },
+ { url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" },
+ { url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" },
+ { url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" },
+]
+
[[package]]
name = "yarl"
version = "1.22.0"