diff --git a/examples/01-hello-world/server.js b/examples/01-hello-world/server.js
new file mode 100644
index 0000000..c76c113
--- /dev/null
+++ b/examples/01-hello-world/server.js
@@ -0,0 +1,30 @@
+/**
+ * 01 — Hello World
+ *
+ * The simplest possible http-native server.
+ * Registers a single GET route and starts listening on port 3000.
+ *
+ * Run:
+ * bun examples/01-hello-world/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * A basic GET route that returns a JSON response.
+ * This route is automatically optimized by the Rust static fast-path
+ * analyzer — the response is served directly from Rust without
+ * crossing the JS bridge.
+ */
+app.get("/", (req, res) => {
+ res.json({ message: "Hello from http-native!" });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/02-route-params/server.js b/examples/02-route-params/server.js
new file mode 100644
index 0000000..d04ea5a
--- /dev/null
+++ b/examples/02-route-params/server.js
@@ -0,0 +1,44 @@
+/**
+ * 02 — Route Parameters
+ *
+ * Demonstrates parameterized routes with :param syntax.
+ * Parameters are extracted from the URL path and available
+ * via req.params.
+ *
+ * Run:
+ * bun examples/02-route-params/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/users/42
+ * curl http://localhost:3000/posts/7/comments/3
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * Single parameter route.
+ * GET /users/42 → { id: "42", type: "user" }
+ */
+app.get("/users/:id", (req, res) => {
+ res.json({
+ id: req.params.id,
+ type: "user",
+ });
+});
+
+/**
+ * Multiple parameters in a single route.
+ * GET /posts/7/comments/3 → { postId: "7", commentId: "3" }
+ */
+app.get("/posts/:postId/comments/:commentId", (req, res) => {
+ res.json({
+ postId: req.params.postId,
+ commentId: req.params.commentId,
+ });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/03-http-methods/server.js b/examples/03-http-methods/server.js
new file mode 100644
index 0000000..58dc28f
--- /dev/null
+++ b/examples/03-http-methods/server.js
@@ -0,0 +1,95 @@
+/**
+ * 03 — HTTP Methods
+ *
+ * Demonstrates all supported HTTP methods: GET, POST, PUT,
+ * DELETE, PATCH, OPTIONS, and the catch-all `all()`.
+ *
+ * Run:
+ * bun examples/03-http-methods/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/items
+ * curl -X POST -H "Content-Type: application/json" -d '{"name":"Widget"}' http://localhost:3000/items
+ * curl -X PUT -H "Content-Type: application/json" -d '{"name":"Gadget"}' http://localhost:3000/items/1
+ * curl -X DELETE http://localhost:3000/items/1
+ * curl -X PATCH -H "Content-Type: application/json" -d '{"name":"Updated"}' http://localhost:3000/items/1
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/** In-memory store for demonstration */
+const items = new Map();
+let nextId = 1;
+
+/**
+ * GET /items — List all items
+ */
+app.get("/items", (req, res) => {
+ res.json({ items: [...items.values()] });
+});
+
+/**
+ * POST /items — Create a new item
+ * Reads the JSON body via req.json()
+ */
+app.post("/items", (req, res) => {
+ const body = req.json();
+ if (!body || !body.name) {
+ return res.status(400).json({ error: "name is required" });
+ }
+
+ const id = String(nextId++);
+ const item = { id, name: body.name };
+ items.set(id, item);
+
+ res.status(201).json(item);
+});
+
+/**
+ * PUT /items/:id — Replace an item
+ */
+app.put("/items/:id", (req, res) => {
+ const body = req.json();
+ if (!body || !body.name) {
+ return res.status(400).json({ error: "name is required" });
+ }
+
+ const item = { id: req.params.id, name: body.name };
+ items.set(req.params.id, item);
+
+ res.json(item);
+});
+
+/**
+ * PATCH /items/:id — Partially update an item
+ */
+app.patch("/items/:id", (req, res) => {
+ const existing = items.get(req.params.id);
+ if (!existing) {
+ return res.status(404).json({ error: "Item not found" });
+ }
+
+ const body = req.json();
+ const updated = { ...existing, ...body };
+ items.set(req.params.id, updated);
+
+ res.json(updated);
+});
+
+/**
+ * DELETE /items/:id — Delete an item
+ */
+app.delete("/items/:id", (req, res) => {
+ const existed = items.delete(req.params.id);
+ if (!existed) {
+ return res.status(404).json({ error: "Item not found" });
+ }
+
+ res.json({ deleted: true });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/04-middleware/server.js b/examples/04-middleware/server.js
new file mode 100644
index 0000000..8f05797
--- /dev/null
+++ b/examples/04-middleware/server.js
@@ -0,0 +1,65 @@
+/**
+ * 04 — Middleware
+ *
+ * Demonstrates global middleware, path-scoped middleware,
+ * and the next() function for chaining.
+ *
+ * Run:
+ * bun examples/04-middleware/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/
+ * curl http://localhost:3000/admin/dashboard
+ * curl -H "Authorization: Bearer secret-token" http://localhost:3000/admin/dashboard
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * Global middleware — runs on every request.
+ * Logs the method and path, then calls next() to continue.
+ */
+app.use((req, res, next) => {
+ console.log(`${req.method} ${req.path}`);
+ return next();
+});
+
+/**
+ * Global middleware — adds a custom response header.
+ * Middleware without next() auto-advances to the next middleware.
+ */
+app.use((req, res) => {
+ res.set("X-Powered-By", "http-native");
+});
+
+/**
+ * Path-scoped middleware — only runs for /admin/* routes.
+ * Checks for an Authorization header before allowing access.
+ */
+app.use("/admin", (req, res, next) => {
+ const auth = req.header("authorization");
+ if (!auth || auth !== "Bearer secret-token") {
+ return res.status(401).json({ error: "Unauthorized" });
+ }
+ return next();
+});
+
+/**
+ * Public route — accessible without authentication.
+ */
+app.get("/", (req, res) => {
+ res.json({ message: "Public endpoint" });
+});
+
+/**
+ * Protected route — requires the /admin middleware to pass.
+ */
+app.get("/admin/dashboard", (req, res) => {
+ res.json({ message: "Admin dashboard", secret: "classified data" });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/05-error-handling/server.js b/examples/05-error-handling/server.js
new file mode 100644
index 0000000..b5d9476
--- /dev/null
+++ b/examples/05-error-handling/server.js
@@ -0,0 +1,70 @@
+/**
+ * 05 — Error Handling
+ *
+ * Demonstrates global error handlers, custom 404 pages,
+ * and throwing errors from route handlers.
+ *
+ * Run:
+ * bun examples/05-error-handling/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/
+ * curl http://localhost:3000/fail
+ * curl http://localhost:3000/not-a-real-route
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * A route that works normally.
+ */
+app.get("/", (req, res) => {
+ res.json({ status: "ok" });
+});
+
+/**
+ * A route that deliberately throws an error.
+ * The error handler below will catch it.
+ */
+app.get("/fail", (req, res) => {
+ throw new Error("Something went wrong!");
+});
+
+/**
+ * A route that throws a custom HTTP error with a status code.
+ */
+app.get("/forbidden", (req, res) => {
+ const error = new Error("Access denied");
+ error.status = 403;
+ throw error;
+});
+
+/**
+ * Custom 404 handler — catches all unmatched routes.
+ * Uses the app.404() shorthand.
+ */
+app["404"]((req, res) => {
+ res.status(404).json({
+ error: "Not Found",
+ path: req.path,
+ hint: "Try GET / or GET /fail",
+ });
+});
+
+/**
+ * Global error handler — catches all thrown errors.
+ * Receives (error, req, res) instead of (req, res).
+ */
+app.error((error, req, res) => {
+ const status = error.status || 500;
+ res.status(status).json({
+ error: error.message || "Internal Server Error",
+ path: req.path,
+ });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/06-cors/server.js b/examples/06-cors/server.js
new file mode 100644
index 0000000..3efb58c
--- /dev/null
+++ b/examples/06-cors/server.js
@@ -0,0 +1,56 @@
+/**
+ * 06 — CORS (Cross-Origin Resource Sharing)
+ *
+ * Demonstrates the built-in CORS middleware with various
+ * configurations: wildcard, specific origins, and credentials.
+ *
+ * Run:
+ * bun examples/06-cors/server.js
+ *
+ * Test:
+ * curl -H "Origin: https://example.com" -v http://localhost:3000/api/data
+ * curl -X OPTIONS -H "Origin: https://example.com" -v http://localhost:3000/api/data
+ */
+
+import { createApp } from "@http-native/core";
+import { cors } from "@http-native/core/cors";
+
+const app = createApp();
+
+/**
+ * Apply CORS middleware globally.
+ * This allows requests from specific origins with credentials.
+ */
+app.use(
+ cors({
+ origin: ["https://example.com", "https://app.example.com"],
+ methods: ["GET", "POST", "PUT", "DELETE"],
+ allowedHeaders: ["Content-Type", "Authorization"],
+ credentials: true,
+ maxAge: 86400,
+ }),
+);
+
+/**
+ * API endpoint — CORS headers are automatically added.
+ */
+app.get("/api/data", (req, res) => {
+ res.json({
+ items: [
+ { id: 1, name: "Alpha" },
+ { id: 2, name: "Beta" },
+ ],
+ });
+});
+
+/**
+ * Another endpoint — same CORS policy applies.
+ */
+app.post("/api/data", (req, res) => {
+ const body = req.json();
+ res.status(201).json({ created: true, data: body });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/07-route-groups/server.js b/examples/07-route-groups/server.js
new file mode 100644
index 0000000..85358a2
--- /dev/null
+++ b/examples/07-route-groups/server.js
@@ -0,0 +1,91 @@
+/**
+ * 07 — Route Groups
+ *
+ * Demonstrates the group() API for organizing routes under
+ * shared path prefixes. Groups can be nested and middleware
+ * scoped to a group applies only to routes within it.
+ *
+ * Run:
+ * bun examples/07-route-groups/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/api/v1/users
+ * curl http://localhost:3000/api/v1/users/42
+ * curl http://localhost:3000/api/v1/posts
+ * curl http://localhost:3000/api/v2/users
+ * curl http://localhost:3000/health
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * Health check — outside any group.
+ */
+app.get("/health", (req, res) => {
+ res.json({ status: "healthy" });
+});
+
+/**
+ * API v1 group — all routes prefixed with /api/v1
+ */
+app.group("/api/v1", (api) => {
+ /**
+ * GET /api/v1/users
+ */
+ api.get("/users", (req, res) => {
+ res.json({
+ version: "v1",
+ users: [
+ { id: 1, name: "Alice" },
+ { id: 2, name: "Bob" },
+ ],
+ });
+ });
+
+ /**
+ * GET /api/v1/users/:id
+ */
+ api.get("/users/:id", (req, res) => {
+ res.json({
+ version: "v1",
+ user: { id: req.params.id, name: "Alice" },
+ });
+ });
+
+ /**
+ * GET /api/v1/posts
+ */
+ api.get("/posts", (req, res) => {
+ res.json({
+ version: "v1",
+ posts: [{ id: 1, title: "Hello World" }],
+ });
+ });
+});
+
+/**
+ * API v2 group — demonstrates versioned APIs.
+ */
+app.group("/api/v2", (api) => {
+ /**
+ * GET /api/v2/users — different response shape than v1
+ */
+ api.get("/users", (req, res) => {
+ res.json({
+ version: "v2",
+ data: {
+ users: [
+ { id: 1, name: "Alice", email: "alice@example.com" },
+ { id: 2, name: "Bob", email: "bob@example.com" },
+ ],
+ total: 2,
+ },
+ });
+ });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/08-query-params/server.js b/examples/08-query-params/server.js
new file mode 100644
index 0000000..b50ac73
--- /dev/null
+++ b/examples/08-query-params/server.js
@@ -0,0 +1,59 @@
+/**
+ * 08 — Query Parameters
+ *
+ * Demonstrates accessing query string parameters via req.query.
+ * Supports single values, multi-value arrays, and URL-encoded strings.
+ *
+ * Run:
+ * bun examples/08-query-params/server.js
+ *
+ * Test:
+ * curl "http://localhost:3000/search?q=hello&limit=10"
+ * curl "http://localhost:3000/search?q=hello&q=world"
+ * curl "http://localhost:3000/filter?tags=js&tags=rust&tags=native&sort=name"
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * Search endpoint with query parameters.
+ * GET /search?q=hello&limit=10
+ */
+app.get("/search", (req, res) => {
+ const query = req.query.q || "";
+ const limit = parseInt(req.query.limit) || 20;
+ const offset = parseInt(req.query.offset) || 0;
+
+ res.json({
+ query,
+ limit,
+ offset,
+ results: [
+ { id: 1, title: `Result for "${query}"` },
+ { id: 2, title: `Another result for "${query}"` },
+ ],
+ });
+});
+
+/**
+ * Filter endpoint with multi-value query params.
+ * GET /filter?tags=js&tags=rust&sort=name
+ *
+ * When the same key appears multiple times, req.query returns an array.
+ */
+app.get("/filter", (req, res) => {
+ const tags = req.query.tags;
+ const sort = req.query.sort || "id";
+
+ res.json({
+ tags: Array.isArray(tags) ? tags : tags ? [tags] : [],
+ sort,
+ message: "Filtered results",
+ });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/09-request-body/server.js b/examples/09-request-body/server.js
new file mode 100644
index 0000000..b9a1afb
--- /dev/null
+++ b/examples/09-request-body/server.js
@@ -0,0 +1,81 @@
+/**
+ * 09 — Request Body Parsing
+ *
+ * Demonstrates the built-in body parsing APIs: req.json(),
+ * req.text(), req.body (raw Buffer), and req.arrayBuffer().
+ * No external body-parser middleware needed.
+ *
+ * Run:
+ * bun examples/09-request-body/server.js
+ *
+ * Test:
+ * curl -X POST -H "Content-Type: application/json" -d '{"name":"Alice","age":30}' http://localhost:3000/json
+ * curl -X POST -H "Content-Type: text/plain" -d "Hello, World!" http://localhost:3000/text
+ * curl -X POST -H "Content-Type: application/octet-stream" --data-binary @package.json http://localhost:3000/raw
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * JSON body parsing via req.json().
+ * Automatically parses the request body as JSON.
+ */
+app.post("/json", (req, res) => {
+ const data = req.json();
+ if (!data) {
+ return res.status(400).json({ error: "Invalid or missing JSON body" });
+ }
+
+ res.json({
+ received: data,
+ type: "json",
+ keys: Object.keys(data),
+ });
+});
+
+/**
+ * Text body parsing via req.text().
+ * Returns the body as a UTF-8 string.
+ */
+app.post("/text", (req, res) => {
+ const text = req.text();
+
+ res.json({
+ received: text,
+ type: "text",
+ length: text.length,
+ });
+});
+
+/**
+ * Raw body access via req.body (Buffer).
+ * Useful for binary data like file uploads.
+ */
+app.post("/raw", (req, res) => {
+ const body = req.body;
+
+ res.json({
+ type: "raw",
+ size: body ? body.length : 0,
+ isBuffer: Buffer.isBuffer(body),
+ });
+});
+
+/**
+ * ArrayBuffer access via req.arrayBuffer().
+ * Useful for WebAssembly or typed array processing.
+ */
+app.post("/arraybuffer", (req, res) => {
+ const ab = req.arrayBuffer();
+
+ res.json({
+ type: "arraybuffer",
+ byteLength: ab.byteLength,
+ });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/10-native-cache/server.js b/examples/10-native-cache/server.js
new file mode 100644
index 0000000..a561ce3
--- /dev/null
+++ b/examples/10-native-cache/server.js
@@ -0,0 +1,85 @@
+/**
+ * 10 — Native Cache (ncache)
+ *
+ * Demonstrates the res.ncache() API for caching JSON responses
+ * directly in the Rust native layer. After the first request,
+ * subsequent requests are served from Rust's LRU cache without
+ * crossing the JS bridge — achieving near-static-route performance.
+ *
+ * Run:
+ * bun examples/10-native-cache/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/users/42
+ * curl http://localhost:3000/users/42 # served from Rust cache
+ * curl http://localhost:3000/users/99 # different param = different cache entry
+ *
+ * Benchmark:
+ * bombardier -c 200 -d 5s http://localhost:3000/users/42
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * Simulated database lookup.
+ */
+function getUser(id) {
+ return {
+ id,
+ name: "Ada Lovelace",
+ role: "engineer",
+ createdAt: "2024-01-01T00:00:00Z",
+ };
+}
+
+/**
+ * Dynamic route with native caching.
+ *
+ * res.ncache(data, ttl, options) sends a JSON response AND caches it
+ * in Rust's native LRU cache. Subsequent requests to the same URL
+ * are served directly from Rust without calling this JS handler.
+ *
+ * Parameters:
+ * data — JSON-serializable response data
+ * ttl — Cache TTL in seconds
+ * maxEntries — Max LRU entries per route (default 256)
+ */
+app.get("/users/:id", (req, res) => {
+ const user = getUser(req.params.id);
+
+ res.ncache(user, 30, { maxEntries: 512 });
+});
+
+/**
+ * Route-level cache configuration via options.
+ *
+ * The { cache } option configures Rust-side caching with vary-by
+ * fields. The cache key is computed from the specified fields,
+ * so different query/param combinations get separate cache entries.
+ */
+app.get(
+ "/search",
+ {
+ cache: {
+ ttl: 60,
+ varyBy: ["query.q", "query.page"],
+ maxEntries: 256,
+ },
+ },
+ (req, res) => {
+ const q = req.query.q || "";
+ const page = parseInt(req.query.page) || 1;
+
+ res.json({
+ query: q,
+ page,
+ results: [{ id: 1, title: `Result for "${q}"` }],
+ });
+ },
+);
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/11-sessions/server.js b/examples/11-sessions/server.js
new file mode 100644
index 0000000..dc7e437
--- /dev/null
+++ b/examples/11-sessions/server.js
@@ -0,0 +1,97 @@
+/**
+ * 11 — Sessions
+ *
+ * Demonstrates the built-in session middleware backed by
+ * Rust's native in-memory store. Sessions are signed with
+ * HMAC and stored in a sharded RwLock for thread safety.
+ *
+ * Run:
+ * bun examples/11-sessions/server.js
+ *
+ * Test:
+ * # Login and capture the session cookie
+ * curl -c cookies.txt -X POST http://localhost:3000/login -H "Content-Type: application/json" -d '{"username":"alice"}'
+ *
+ * # Access protected route with session cookie
+ * curl -b cookies.txt http://localhost:3000/profile
+ *
+ * # Increment a counter
+ * curl -b cookies.txt -X POST http://localhost:3000/counter
+ * curl -b cookies.txt http://localhost:3000/counter
+ *
+ * # Logout (destroys session)
+ * curl -b cookies.txt -X POST http://localhost:3000/logout
+ */
+
+import { createApp } from "@http-native/core";
+import { session } from "@http-native/core/session";
+
+const app = createApp();
+
+/**
+ * Session middleware — stores session data in Rust's native memory.
+ * The secret is used for HMAC signing of session cookies.
+ */
+app.use(
+ session({
+ secret: "my-super-secret-key-change-in-production",
+ maxAge: 3600,
+ cookieName: "sid",
+ httpOnly: true,
+ sameSite: "lax",
+ }),
+);
+
+/**
+ * Login — stores the username in the session.
+ */
+app.post("/login", (req, res) => {
+ const body = req.json();
+ if (!body || !body.username) {
+ return res.status(400).json({ error: "username is required" });
+ }
+
+ req.session.set("username", body.username);
+ req.session.set("loginAt", new Date().toISOString());
+
+ res.json({ message: `Welcome, ${body.username}!` });
+});
+
+/**
+ * Profile — reads session data.
+ */
+app.get("/profile", (req, res) => {
+ const username = req.session.get("username");
+ if (!username) {
+ return res.status(401).json({ error: "Not logged in" });
+ }
+
+ const loginAt = req.session.get("loginAt");
+ res.json({ username, loginAt });
+});
+
+/**
+ * Counter — demonstrates session mutation.
+ */
+app.post("/counter", (req, res) => {
+ const current = req.session.get("count") || 0;
+ req.session.set("count", current + 1);
+ res.json({ count: current + 1 });
+});
+
+app.get("/counter", (req, res) => {
+ const count = req.session.get("count") || 0;
+ res.json({ count });
+});
+
+/**
+ * Logout — destroys the session entirely.
+ */
+app.post("/logout", (req, res) => {
+ req.session.destroy();
+ res.json({ message: "Logged out" });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/12-streaming/server.js b/examples/12-streaming/server.js
new file mode 100644
index 0000000..6fe2b4f
--- /dev/null
+++ b/examples/12-streaming/server.js
@@ -0,0 +1,81 @@
+/**
+ * 12 — Streaming Responses
+ *
+ * Demonstrates the res.stream() API for chunked transfer-encoded
+ * responses. Useful for server-sent events, large file downloads,
+ * or real-time data feeds.
+ *
+ * Run:
+ * bun examples/12-streaming/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/stream
+ * curl http://localhost:3000/countdown
+ * curl http://localhost:3000/sse
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * Basic streaming — sends chunks with delays.
+ * Uses HTTP/1.1 chunked transfer encoding.
+ */
+app.get("/stream", async (req, res) => {
+ const stream = res.stream({ contentType: "text/plain; charset=utf-8" });
+ if (!stream) return;
+
+ for (let i = 1; i <= 5; i++) {
+ stream.write(`Chunk ${i} of 5\n`);
+ await sleep(500);
+ }
+
+ stream.end("Done!\n");
+});
+
+/**
+ * Countdown timer — streams numbers in real-time.
+ */
+app.get("/countdown", async (req, res) => {
+ const stream = res.stream({ contentType: "text/plain; charset=utf-8" });
+ if (!stream) return;
+
+ for (let i = 10; i >= 1; i--) {
+ stream.write(`${i}...\n`);
+ await sleep(1000);
+ }
+
+ stream.end("Liftoff! 🚀\n");
+});
+
+/**
+ * Server-Sent Events (SSE) — real-time event stream.
+ * Connect with EventSource in the browser or curl.
+ */
+app.get("/sse", async (req, res) => {
+ const stream = res
+ .set("Cache-Control", "no-cache")
+ .set("Connection", "keep-alive")
+ .stream({ contentType: "text/event-stream" });
+ if (!stream) return;
+
+ for (let i = 0; i < 10; i++) {
+ const event = `data: ${JSON.stringify({ time: new Date().toISOString(), count: i })}\n\n`;
+ stream.write(event);
+ await sleep(1000);
+ }
+
+ stream.end("event: close\ndata: stream ended\n\n");
+});
+
+/**
+ * Helper — async sleep.
+ */
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/13-response-types/server.js b/examples/13-response-types/server.js
new file mode 100644
index 0000000..a913091
--- /dev/null
+++ b/examples/13-response-types/server.js
@@ -0,0 +1,91 @@
+/**
+ * 13 — Response Types
+ *
+ * Demonstrates the various response methods: res.json(),
+ * res.send(), res.status(), res.type(), res.set(),
+ * res.sendStatus(), and res.locals for middleware data passing.
+ *
+ * Run:
+ * bun examples/13-response-types/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/json
+ * curl http://localhost:3000/text
+ * curl http://localhost:3000/html
+ * curl http://localhost:3000/custom-headers
+ * curl http://localhost:3000/status-only
+ * curl http://localhost:3000/locals
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * Middleware that sets res.locals for downstream handlers.
+ */
+app.use((req, res) => {
+ res.locals.requestTime = Date.now();
+ res.locals.version = "1.0.0";
+});
+
+/**
+ * JSON response — sets Content-Type: application/json automatically.
+ */
+app.get("/json", (req, res) => {
+ res.json({ format: "json", ok: true });
+});
+
+/**
+ * Plain text response via res.send().
+ */
+app.get("/text", (req, res) => {
+ res.send("Hello, plain text!");
+});
+
+/**
+ * HTML response — use res.type() to set Content-Type.
+ */
+app.get("/html", (req, res) => {
+ res.type("html").send("
Hello, HTML!
Served by http-native
");
+});
+
+/**
+ * Custom response headers via res.set() / res.header().
+ */
+app.get("/custom-headers", (req, res) => {
+ res
+ .set("X-Request-Id", "abc-123")
+ .set("X-Custom-Header", "custom-value")
+ .header("Cache-Control", "no-store")
+ .json({ headers: "custom" });
+});
+
+/**
+ * Status-only response via res.sendStatus().
+ * Sends the status code as the response body text.
+ */
+app.get("/status-only", (req, res) => {
+ res.sendStatus(204);
+});
+
+/**
+ * Accessing res.locals set by middleware.
+ */
+app.get("/locals", (req, res) => {
+ res.json({
+ requestTime: res.locals.requestTime,
+ version: res.locals.version,
+ });
+});
+
+/**
+ * Custom status code with JSON body.
+ */
+app.post("/items", (req, res) => {
+ res.status(201).json({ id: 1, created: true });
+});
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/14-validation/server.js b/examples/14-validation/server.js
new file mode 100644
index 0000000..7325a70
--- /dev/null
+++ b/examples/14-validation/server.js
@@ -0,0 +1,116 @@
+/**
+ * 14 — Request Validation
+ *
+ * Demonstrates the built-in validation middleware that works
+ * with Zod, TypeBox, Yup, Joi, or any schema library with
+ * .parse(), .safeParse(), or .validate() methods.
+ *
+ * This example uses a minimal inline schema for zero dependencies.
+ * Replace with Zod/TypeBox in production.
+ *
+ * Run:
+ * bun examples/14-validation/server.js
+ *
+ * Test:
+ * # Valid request
+ * curl -X POST -H "Content-Type: application/json" \
+ * -d '{"name":"Alice","email":"alice@example.com"}' \
+ * http://localhost:3000/users
+ *
+ * # Invalid request (missing email)
+ * curl -X POST -H "Content-Type: application/json" \
+ * -d '{"name":"Alice"}' \
+ * http://localhost:3000/users
+ *
+ * # Valid params
+ * curl http://localhost:3000/users/42
+ *
+ * # Invalid params (non-numeric)
+ * curl http://localhost:3000/users/abc
+ */
+
+import { createApp } from "@http-native/core";
+import { validate } from "@http-native/core/validate";
+
+const app = createApp();
+
+/**
+ * Minimal schema helper — mimics Zod's .parse() interface.
+ * In production, use: import { z } from "zod";
+ */
+function createSchema(validator) {
+ return {
+ parse(data) {
+ const result = validator(data);
+ if (result.error) {
+ const err = new Error(result.error);
+ err.issues = [{ path: [], message: result.error }];
+ throw err;
+ }
+ return result.value;
+ },
+ };
+}
+
+/**
+ * Body schema — validates name (string) and email (string with @).
+ */
+const createUserSchema = createSchema((data) => {
+ if (!data || typeof data !== "object") {
+ return { error: "Body must be an object" };
+ }
+ if (!data.name || typeof data.name !== "string") {
+ return { error: "name must be a non-empty string" };
+ }
+ if (!data.email || typeof data.email !== "string" || !data.email.includes("@")) {
+ return { error: "email must be a valid email address" };
+ }
+ return { value: { name: data.name, email: data.email } };
+});
+
+/**
+ * Params schema — validates that id is a numeric string.
+ */
+const numericIdSchema = createSchema((data) => {
+ if (!data || !data.id || !/^\d+$/.test(data.id)) {
+ return { error: "id must be a numeric string" };
+ }
+ return { value: { id: data.id } };
+});
+
+/**
+ * POST /users — validates the request body before the handler runs.
+ * If validation fails, a 400 response is sent automatically.
+ */
+app.post(
+ "/users",
+ validate({ body: createUserSchema }),
+ (req, res) => {
+ const { name, email } = req.validatedBody;
+ res.status(201).json({
+ id: 1,
+ name,
+ email,
+ message: "User created successfully",
+ });
+ },
+);
+
+/**
+ * GET /users/:id — validates route params.
+ */
+app.get(
+ "/users/:id",
+ validate({ params: numericIdSchema }),
+ (req, res) => {
+ res.json({
+ id: req.validatedParams.id,
+ name: "Alice",
+ email: "alice@example.com",
+ });
+ },
+);
+
+const server = await app.listen({ port: 3000 });
+
+console.log(`Server running at ${server.url}`);
diff --git a/examples/15-optimizations/server.js b/examples/15-optimizations/server.js
new file mode 100644
index 0000000..4006514
--- /dev/null
+++ b/examples/15-optimizations/server.js
@@ -0,0 +1,111 @@
+/**
+ * 15 — Runtime Optimizations
+ *
+ * Demonstrates http-native's runtime optimization system:
+ * - Static fast-path: pure-static responses served from Rust
+ * - Dynamic fast-path: parameterized responses served from Rust
+ * - Runtime cache promotion: deterministic routes auto-cached
+ * - Optimization snapshots and summaries
+ *
+ * Run:
+ * bun examples/15-optimizations/server.js
+ *
+ * Test:
+ * curl http://localhost:3000/static
+ * curl http://localhost:3000/dynamic/42
+ * curl http://localhost:3000/cached/hello
+ * curl http://localhost:3000/optimizations
+ *
+ * Benchmark:
+ * bombardier -c 200 -d 5s http://localhost:3000/static
+ * bombardier -c 200 -d 5s http://localhost:3000/dynamic/42
+ * bombardier -c 200 -d 5s http://localhost:3000/cached/hello
+ */
+
+import { createApp } from "@http-native/core";
+
+const app = createApp();
+
+/**
+ * Static fast-path route.
+ *
+ * The Rust analyzer detects that this handler returns a constant
+ * JSON response with no request data dependencies. The response
+ * bytes are pre-computed at startup and served directly from Rust
+ * without ever calling into JavaScript.
+ *
+ * Expected: ~1M+ req/s
+ */
+app.get("/static", (req, res) => {
+ res.json({
+ ok: true,
+ engine: "http-native",
+ mode: "static-fast-path",
+ });
+});
+
+/**
+ * Dynamic fast-path route.
+ *
+ * The Rust analyzer detects that this handler returns a JSON
+ * response with req.params interpolation. The JSON template is
+ * compiled at startup, and responses are rendered entirely in
+ * Rust by substituting param values into the template.
+ *
+ * Expected: ~500K+ req/s (no JS bridge crossing)
+ */
+app.get("/dynamic/:id", (req, res) => {
+ res.json({
+ id: req.params.id,
+ mode: "dynamic-fast-path",
+ });
+});
+
+/**
+ * Native-cached route.
+ *
+ * Uses res.ncache() to cache the response in Rust's LRU cache.
+ * The first request runs through JavaScript, but all subsequent
+ * requests to the same URL are served from Rust cache.
+ *
+ * Expected: ~1M+ req/s after first request
+ */
+app.get("/cached/:key", (req, res) => {
+ const data = {
+ key: req.params.key,
+ mode: "native-cached",
+ timestamp: Date.now(),
+ };
+
+ res.ncache(data, 60, { maxEntries: 1024 });
+});
+
+/**
+ * Optimization introspection endpoint.
+ *
+ * Returns the current optimization state for all routes,
+ * including which fast-paths are active and hit counts.
+ */
+app.get("/optimizations", (req, res) => {
+ res.json({
+ hint: "Start the server with opt options to see optimization data",
+ usage: 'app.listen({ opt: { notify: true, cache: true } })',
+ });
+});
+
+/**
+ * Start with runtime optimizations enabled.
+ *
+ * opt.notify: logs optimization events to console
+ * opt.cache: enables runtime response cache promotion
+ */
+const server = await app.listen({
+ port: 3000,
+ opt: {
+ notify: true,
+ cache: true,
+ },
+});
+
+console.log(`Server running at ${server.url}`);
+console.log("Optimization summary:", server.optimizations.summary());
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 0000000..cf1d6ab
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,31 @@
+# http-native Examples
+
+A collection of examples demonstrating every feature of http-native.
+
+## Running Examples
+
+Each example is a standalone server. Run with Bun:
+
+```bash
+bun examples/01-hello-world/server.js
+```
+
+## Examples
+
+| # | Example | Features |
+|---|---------|----------|
+| 01 | [Hello World](./01-hello-world/) | Basic server, `createApp()`, `res.json()` |
+| 02 | [Route Params](./02-route-params/) | `:param` syntax, `req.params`, multi-param routes |
+| 03 | [HTTP Methods](./03-http-methods/) | GET, POST, PUT, PATCH, DELETE, `req.json()` |
+| 04 | [Middleware](./04-middleware/) | `app.use()`, path-scoped middleware, `next()` |
+| 05 | [Error Handling](./05-error-handling/) | `app.error()`, `app.404()`, custom error responses |
+| 06 | [CORS](./06-cors/) | `cors()` middleware, origins, credentials, preflight |
+| 07 | [Route Groups](./07-route-groups/) | `app.group()`, nested prefixes, API versioning |
+| 08 | [Query Params](./08-query-params/) | `req.query`, multi-value arrays, URL encoding |
+| 09 | [Request Body](./09-request-body/) | `req.json()`, `req.text()`, `req.body`, `req.arrayBuffer()` |
+| 10 | [Native Cache](./10-native-cache/) | `res.ncache()`, route-level `cache` option, LRU |
+| 11 | [Sessions](./11-sessions/) | `session()` middleware, `req.session`, HMAC cookies |
+| 12 | [Streaming](./12-streaming/) | `res.stream()`, chunked transfer, SSE |
+| 13 | [Response Types](./13-response-types/) | `res.send()`, `res.type()`, `res.set()`, `res.locals` |
+| 14 | [Validation](./14-validation/) | `validate()` middleware, body/params/query schemas |
+| 15 | [Optimizations](./15-optimizations/) | Static/dynamic fast-path, `opt`, runtime cache |
diff --git a/plans/dynamic-route-performance.md b/plans/dynamic-route-performance.md
new file mode 100644
index 0000000..2eda3e5
--- /dev/null
+++ b/plans/dynamic-route-performance.md
@@ -0,0 +1,293 @@
+# Dynamic Route Performance Optimization Plan
+
+## Executive Summary
+
+The parameterized route matching pipeline (`/users/:id`, `/posts/:id/comments/:commentId`) has several performance bottlenecks in the radix tree matching, path segment splitting, method dispatch, and param value extraction. This plan targets **7 specific bottlenecks** across `rsrc/src/router.rs` and `rsrc/src/lib.rs` without removing any existing functionality.
+
+---
+
+## Bottleneck Analysis
+
+### Current Hot Path for a Parameterized Route Request
+
+```mermaid
+flowchart TD
+ A[Raw TCP bytes] --> B[httparse parsing]
+ B --> C[String::from_utf8_lossy - path_cow allocation]
+ C --> D[normalize_runtime_path - possible allocation]
+ D --> E[HashMap::get for method_key - SipHash]
+ E --> F[HashMap::get for exact match - SipHash on bytes]
+ F --> G[HashMap::get for radix_trees - SipHash]
+ G --> H[split_segments_stack - str::split iterator]
+ H --> I[RadixNode::match_path - recursive, linear child scan]
+ I --> J[Vec::with_capacity for param_values - heap alloc]
+ J --> K[Build dispatch envelope]
+
+ style C fill:#f96,stroke:#333
+ style D fill:#f96,stroke:#333
+ style E fill:#ff9,stroke:#333
+ style F fill:#ff9,stroke:#333
+ style G fill:#ff9,stroke:#333
+ style H fill:#f96,stroke:#333
+ style I fill:#f66,stroke:#333
+ style J fill:#ff9,stroke:#333
+```
+
+**Legend:** 🔴 High impact | 🟡 Medium impact | 🟢 Low impact
+
+---
+
+## Identified Bottlenecks
+
+### B1: RadixNode children stored as Vec — linear scan per level
+**File:** [`rsrc/src/router.rs`](rsrc/src/router.rs:96)
+**Severity:** 🔴 HIGH
+
+The `RadixNode.children` field is a `Vec`, and matching iterates linearly:
+
+```rust
+// Line 179 — O(N) scan per tree level where N = number of static children
+for child in &self.children {
+ if child.segment.as_ref() == segment {
+```
+
+For routes like `/api/users/:id`, `/api/posts/:id`, `/api/comments/:id`, the root `api` node has 3 children scanned linearly. With 50+ routes sharing a prefix, this becomes O(50) per level.
+
+**Fix:** Replace `Vec` with a `HashMap, RadixNode>` for nodes with >4 children, or sort children and use binary search. For small child counts, keep the Vec but use `Box<[RadixChild]>` after build to improve cache locality.
+
+---
+
+### B2: Recursive match_path with backtracking — stack overhead
+**File:** [`rsrc/src/router.rs`](rsrc/src/router.rs:166)
+**Severity:** 🔴 HIGH
+
+`match_path` is recursive with backtracking support. Each recursion level pushes a new stack frame. For a 5-segment path, that is 5 recursive calls. The backtracking logic with `param_values.truncate()` adds overhead even when there is no ambiguity.
+
+**Fix:** Convert to an iterative implementation using a stack-allocated state array. Most routes are unambiguous — when a node has either static children OR a param child but not both at the same level, no backtracking is needed. Add an `is_unambiguous` flag computed at build time to skip backtracking logic entirely.
+
+---
+
+### B3: Method dispatch uses HashMap with SipHash — 3 lookups per request
+**File:** [`rsrc/src/router.rs`](rsrc/src/router.rs:288)
+**Severity:** 🟡 MEDIUM
+
+`match_route` does up to 3 `HashMap::get` calls with `MethodKey`:
+1. `dynamic_exact_routes.get(&method_key)` — line 297
+2. Inner `routes.get(path.as_bytes())` — line 299
+3. `radix_trees.get(&method_key)` — line 316
+
+`HashMap` uses SipHash by default, which is cryptographically strong but slow for small enum keys. `MethodKey` is a 7-variant enum — a perfect fit for array indexing.
+
+**Fix:** Replace `HashMap` with `[Option<_>; 7]` arrays indexed by method code. This turns 3 hash lookups into 3 array index operations — O(1) with zero hashing.
+
+---
+
+### B4: Path segment splitting allocates iterator state on every request
+**File:** [`rsrc/src/router.rs`](rsrc/src/router.rs:462)
+**Severity:** 🟡 MEDIUM
+
+`split_segments_stack` uses `str::split('/')` which creates an iterator. While the segments are written to a stack buffer, the splitting itself involves `trim_start_matches` and the split iterator overhead on every request.
+
+**Fix:** Replace with a manual byte-scanning loop using `memchr::memchr(b'/', ...)` which is SIMD-accelerated. This avoids iterator overhead and leverages the `memchr` crate already in `Cargo.toml`.
+
+---
+
+### B5: String::from_utf8_lossy allocates a Cow on every parameterized request
+**File:** [`rsrc/src/lib.rs`](rsrc/src/lib.rs:1221)
+**Severity:** 🟡 MEDIUM
+
+In `build_dispatch_decision_zero_copy`:
+```rust
+let path_cow = String::from_utf8_lossy(parsed.path); // line 1221
+let path_str = path_cow.as_ref();
+```
+
+`from_utf8_lossy` returns `Cow::Owned` when replacement characters are needed, but for valid UTF-8 paths it returns `Cow::Borrowed`. However, the function still has overhead from the validation scan. Since `httparse` already validates the path bytes, we can use `from_utf8_unchecked` or `from_utf8` with a fast-path.
+
+**Fix:** Use `std::str::from_utf8()` which is faster than `from_utf8_lossy` for valid UTF-8, and fall back to lossy only on error. Better yet, operate on `&[u8]` directly in the router to avoid UTF-8 conversion entirely for the matching step.
+
+---
+
+### B6: param_values Vec heap-allocates on every parameterized match
+**File:** [`rsrc/src/router.rs`](rsrc/src/router.rs:319)
+**Severity:** 🟡 MEDIUM
+
+```rust
+let mut param_values = Vec::with_capacity(4); // line 319
+```
+
+Every parameterized route match allocates a `Vec` on the heap. While `with_capacity(4)` is small, it is still a `malloc` call per request.
+
+**Fix:** Use a stack-allocated `ArrayVec<&str, 8>` from the `arrayvec` crate, or a manual fixed-size array similar to `seg_buf`. Most routes have ≤4 params. Only fall back to `Vec` for overflow.
+
+---
+
+### B7: normalize_runtime_path called on every request — redundant for clean paths
+**File:** [`rsrc/src/lib.rs`](rsrc/src/lib.rs:1227)
+**Severity:** 🟢 LOW
+
+```rust
+let normalized_path = normalize_runtime_path(path_str);
+```
+
+This strips trailing slashes and ensures a leading slash. For the vast majority of requests, the path is already normalized. The function does check for this fast-path, but it is still called unconditionally.
+
+**Fix:** Inline the fast-path check at the call site to avoid the function call overhead entirely when the path is already clean.
+
+---
+
+## Implementation Plan
+
+### Phase 1: Array-indexed method dispatch — eliminate HashMap overhead
+
+Replace `HashMap` with fixed-size arrays in `Router`:
+
+```rust
+struct Router {
+ exact_get_root: Option,
+ dynamic_exact_routes: [Option, DynamicRouteSpec>>; 7],
+ exact_static_routes: [Option, ExactStaticRoute>>; 7],
+ radix_trees: [Option; 7],
+ ws_routes: HashMap,
+}
+```
+
+**Files to modify:**
+- [`rsrc/src/router.rs`](rsrc/src/router.rs) — `Router` struct, `from_manifest`, `match_route`, `exact_static_route`
+
+---
+
+### Phase 2: Optimized radix tree child lookup
+
+For `RadixNode.children`:
+- Keep `Vec` for nodes with ≤4 children — linear scan is faster than hash for small N
+- Convert to sorted `Box<[RadixChild]>` after tree construction for cache-friendly binary search
+- Add a `freeze()` method called after all routes are inserted that sorts children and converts Vecs to boxed slices
+
+**Files to modify:**
+- [`rsrc/src/router.rs`](rsrc/src/router.rs) — `RadixNode`, `RadixChild`, add `freeze()` method
+
+---
+
+### Phase 3: Iterative match_path with unambiguous fast-path
+
+- Add `is_unambiguous: bool` flag to `RadixNode` — true when the node has EITHER static children OR a param child, but not both
+- Convert recursive `match_path` to iterative loop for unambiguous trees
+- Keep recursive fallback for ambiguous trees that need backtracking
+
+**Files to modify:**
+- [`rsrc/src/router.rs`](rsrc/src/router.rs) — `RadixNode::match_path`, add `RadixNode::compute_unambiguous`
+
+---
+
+### Phase 4: SIMD-accelerated segment splitting
+
+Replace `str::split('/')` in `split_segments_stack` with `memchr::memchr(b'/', ...)`:
+
+```rust
+fn split_segments_stack<'a>(path: &'a str, buf: &mut [&'a str]) -> usize {
+ let bytes = path.as_bytes();
+ if bytes.len() <= 1 { return 0; }
+ let start = if bytes[0] == b'/' { 1 } else { 0 };
+ let mut pos = start;
+ let mut count = 0;
+ while pos < bytes.len() {
+ let next = memchr::memchr(b'/', &bytes[pos..])
+ .map(|i| pos + i)
+ .unwrap_or(bytes.len());
+ if next > pos {
+ if count >= buf.len() { return count + 1; }
+ buf[count] = unsafe { std::str::from_utf8_unchecked(&bytes[pos..next]) };
+ count += 1;
+ }
+ pos = next + 1;
+ }
+ count
+}
+```
+
+**Files to modify:**
+- [`rsrc/src/router.rs`](rsrc/src/router.rs) — `split_segments_stack`
+
+---
+
+### Phase 5: Stack-allocated param values
+
+Replace `Vec<&str>` with a stack-allocated buffer for param values:
+
+```rust
+const MAX_STACK_PARAMS: usize = 8;
+
+// In match_route:
+let mut param_buf = [""; MAX_STACK_PARAMS];
+let mut param_count = 0;
+// ... pass &mut param_buf, &mut param_count to match_path
+// Only allocate Vec if overflow
+```
+
+**Files to modify:**
+- [`rsrc/src/router.rs`](rsrc/src/router.rs) — `match_route`, `match_path`, `MatchedRoute`
+
+---
+
+### Phase 6: Eliminate UTF-8 conversion in routing hot path
+
+Replace `String::from_utf8_lossy` with `std::str::from_utf8` in the dispatch decision builders:
+
+```rust
+let path_str = std::str::from_utf8(parsed.path)
+ .unwrap_or_else(|_| {
+ // Fallback: only allocate on invalid UTF-8
+ &*String::from_utf8_lossy(parsed.path)
+ });
+```
+
+**Files to modify:**
+- [`rsrc/src/lib.rs`](rsrc/src/lib.rs:1221) — `build_dispatch_decision_zero_copy`
+- [`rsrc/src/lib.rs`](rsrc/src/lib.rs:1300) — `build_dispatch_decision_owned`
+
+---
+
+### Phase 7: Inline normalize_runtime_path fast-path
+
+At the call sites in `build_dispatch_decision_zero_copy` and `build_dispatch_decision_owned`, inline the common case:
+
+```rust
+let normalized_path = if !path_str.ends_with('/') || path_str == "/" {
+ Cow::Borrowed(path_str)
+} else {
+ normalize_runtime_path(path_str)
+};
+```
+
+**Files to modify:**
+- [`rsrc/src/lib.rs`](rsrc/src/lib.rs:1227) — `build_dispatch_decision_zero_copy`
+- [`rsrc/src/lib.rs`](rsrc/src/lib.rs:1311) — `build_dispatch_decision_owned`
+
+---
+
+## Expected Impact
+
+| Phase | Bottleneck | Expected Improvement |
+|-------|-----------|---------------------|
+| 1 | HashMap method dispatch | ~3 hash operations eliminated per request |
+| 2 | Linear child scan | O(N) → O(log N) for nodes with >4 children |
+| 3 | Recursive matching | Eliminates stack frame overhead + backtracking for common cases |
+| 4 | Segment splitting | SIMD-accelerated slash finding vs iterator overhead |
+| 5 | Param Vec allocation | Eliminates heap allocation for routes with ≤8 params |
+| 6 | UTF-8 conversion | Eliminates lossy scan overhead for valid paths |
+| 7 | Path normalization | Eliminates function call for already-clean paths |
+
+**Combined:** These optimizations target every step of the parameterized route matching hot path. The most impactful are Phases 1-3, which address the core routing data structure and algorithm.
+
+## Dependencies
+
+- No new crate dependencies required — `memchr` is already in `Cargo.toml`
+- Optional: `arrayvec` crate for Phase 5 — alternatively, use manual fixed-size arrays
+
+## Risk Assessment
+
+- **Phase 1-2:** Low risk — data structure changes are internal, API unchanged
+- **Phase 3:** Medium risk — iterative matching must produce identical results to recursive; needs thorough testing
+- **Phase 4:** Low risk — `memchr` is well-tested, `from_utf8_unchecked` is safe because httparse validates input
+- **Phase 5-7:** Low risk — purely mechanical optimizations with clear fallback paths
diff --git a/rsrc/src/lib.rs b/rsrc/src/lib.rs
index 5efdb54..cefa1e5 100644
--- a/rsrc/src/lib.rs
+++ b/rsrc/src/lib.rs
@@ -19,8 +19,8 @@ use std::borrow::Cow;
use std::cell::RefCell;
use std::io::BufReader;
use std::net::{SocketAddr, ToSocketAddrs};
-use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::rc::Rc;
+use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{mpsc, Arc, Mutex};
use url::form_urlencoded;
@@ -312,8 +312,12 @@ pub fn session_get(session_id_hex: String, key: String) -> Option {
/// Set a session value. Value should be a JSON string.
#[napi]
pub fn session_set(session_id_hex: String, key: String, value: String) -> bool {
- let Some(store) = GLOBAL_SESSION_STORE.get() else { return false };
- let Some(id) = session::hex_decode_id(&session_id_hex) else { return false };
+ let Some(store) = GLOBAL_SESSION_STORE.get() else {
+ return false;
+ };
+ let Some(id) = session::hex_decode_id(&session_id_hex) else {
+ return false;
+ };
let mut mutations = std::collections::HashMap::new();
mutations.insert(key, value.into_bytes());
store.upsert(&id, mutations, &[]);
@@ -323,8 +327,12 @@ pub fn session_set(session_id_hex: String, key: String, value: String) -> bool {
/// Delete a session key.
#[napi]
pub fn session_delete(session_id_hex: String, key: String) -> bool {
- let Some(store) = GLOBAL_SESSION_STORE.get() else { return false };
- let Some(id) = session::hex_decode_id(&session_id_hex) else { return false };
+ let Some(store) = GLOBAL_SESSION_STORE.get() else {
+ return false;
+ };
+ let Some(id) = session::hex_decode_id(&session_id_hex) else {
+ return false;
+ };
store.upsert(&id, std::collections::HashMap::new(), &[key]);
true
}
@@ -332,8 +340,12 @@ pub fn session_delete(session_id_hex: String, key: String) -> bool {
/// Destroy an entire session.
#[napi]
pub fn session_destroy(session_id_hex: String) -> bool {
- let Some(store) = GLOBAL_SESSION_STORE.get() else { return false };
- let Some(id) = session::hex_decode_id(&session_id_hex) else { return false };
+ let Some(store) = GLOBAL_SESSION_STORE.get() else {
+ return false;
+ };
+ let Some(id) = session::hex_decode_id(&session_id_hex) else {
+ return false;
+ };
store.destroy(&id);
true
}
@@ -374,10 +386,18 @@ pub fn session_get_all(session_id_hex: String) -> Option {
/// Set multiple session values at once. Takes a JSON object string.
#[napi]
pub fn session_set_all(session_id_hex: String, data_json: String) -> bool {
- let Some(store) = GLOBAL_SESSION_STORE.get() else { return false };
- let Some(id) = session::hex_decode_id(&session_id_hex) else { return false };
- let Ok(obj) = serde_json::from_str::(&data_json) else { return false };
- let Some(map) = obj.as_object() else { return false };
+ let Some(store) = GLOBAL_SESSION_STORE.get() else {
+ return false;
+ };
+ let Some(id) = session::hex_decode_id(&session_id_hex) else {
+ return false;
+ };
+ let Ok(obj) = serde_json::from_str::(&data_json) else {
+ return false;
+ };
+ let Some(map) = obj.as_object() else {
+ return false;
+ };
let mut mutations = std::collections::HashMap::new();
for (key, value) in map {
mutations.insert(key.clone(), value.to_string().into_bytes());
@@ -590,8 +610,7 @@ async fn run_server(
let dispatcher: Rc> = Rc::new(dispatcher);
let server_config: Rc> = Rc::new(server_config);
let tls_acceptor: Option> = tls_acceptor.map(Rc::new);
- let session_store: Option>> =
- session_store.map(Rc::new);
+ let session_store: Option>> = session_store.map(Rc::new);
let active_connections: std::cell::Cell = std::cell::Cell::new(0);
@@ -649,9 +668,8 @@ async fn run_server(
eprintln!("[http-native] connection error: {error}");
}
// Safety: single-threaded — pointer is always valid while server runs
- unsafe { &*conn_counter }.set(
- unsafe { &*conn_counter }.get().saturating_sub(1),
- );
+ unsafe { &*conn_counter }
+ .set(unsafe { &*conn_counter }.get().saturating_sub(1));
});
}
Err(error) => {
@@ -696,7 +714,7 @@ const TIMEOUT_HEADER_READ: Duration = Duration::from_secs(30);
const TIMEOUT_IDLE_KEEPALIVE: Duration = Duration::from_secs(120);
const TIMEOUT_BODY_READ: Duration = Duration::from_secs(60);
-// ─── Connection Handler with Buffer Pool
+// ─── Connection Handler with Buffer Pool
async fn handle_connection(
mut stream: S,
@@ -764,7 +782,7 @@ where
} else {
TIMEOUT_IDLE_KEEPALIVE
};
-
+
let timeout_result = timeout(read_duration, stream.read(owned_buf)).await;
let (read_result, next_buffer) = match timeout_result {
Ok(res) => res,
@@ -773,7 +791,7 @@ where
return Ok(());
}
};
-
+
*buffer = next_buffer;
let bytes_read = read_result?;
@@ -810,7 +828,11 @@ where
// TE + CL = request smuggling vector
(400u16, &b"{\"error\":\"Bad Request: conflicting Content-Length and Transfer-Encoding\"}"[..])
} else {
- (501u16, &b"{\"error\":\"Not Implemented: chunked transfer encoding is not supported\"}"[..])
+ (
+ 501u16,
+ &b"{\"error\":\"Not Implemented: chunked transfer encoding is not supported\"}"
+ [..],
+ )
};
let response = build_error_response_bytes(status, body, false);
let (write_result, _) = stream.write_all(response).await;
@@ -848,7 +870,9 @@ where
// ── WebSocket upgrade check ──
if parsed.is_websocket_upgrade {
if let Some(ws_key) = parsed.ws_key {
- if let Some(ws_handler_id) = router.match_ws_route(std::str::from_utf8(parsed.path).unwrap_or("/")) {
+ if let Some(ws_handler_id) =
+ router.match_ws_route(std::str::from_utf8(parsed.path).unwrap_or("/"))
+ {
let accept_key = crate::websocket::compute_accept_key(ws_key);
let upgrade_response = crate::websocket::build_upgrade_response(&accept_key);
drop(parsed);
@@ -877,9 +901,25 @@ where
drain_consumed_bytes(buffer, header_bytes);
match dispatch_decision {
- DispatchDecision::BridgeRequest(request, cache_insertion, handler_id, url_bytes) => {
- write_dynamic_dispatch_response(stream, dispatcher, request, keep_alive, cache_insertion, handler_id, &url_bytes, session_store, session_id, is_new_session)
- .await?;
+ DispatchDecision::BridgeRequest(
+ request,
+ cache_insertion,
+ handler_id,
+ url_bytes,
+ ) => {
+ write_dynamic_dispatch_response(
+ stream,
+ dispatcher,
+ request,
+ keep_alive,
+ cache_insertion,
+ handler_id,
+ &url_bytes,
+ session_store,
+ session_id,
+ is_new_session,
+ )
+ .await?;
}
DispatchDecision::SpecializedResponse(response) => {
let (write_result, _) = stream.write_all(response).await;
@@ -907,10 +947,11 @@ where
.iter()
.map(|(n, v)| (n.to_string(), v.to_string()))
.collect();
- let (session_id_body, is_new_session_body) = resolve_session(session_store, parsed.cookie_header);
+ let (session_id_body, is_new_session_body) =
+ resolve_session(session_store, parsed.cookie_header);
drop(parsed);
- // ── Read request body
+ // ── Read request body
let body_bytes: Vec = {
let content_length = match content_length {
Some(len) => len,
@@ -980,7 +1021,19 @@ where
match dispatch_decision_owned {
DispatchDecision::BridgeRequest(request, cache_insertion, handler_id, url_bytes) => {
- write_dynamic_dispatch_response(stream, dispatcher, request, keep_alive, cache_insertion, handler_id, &url_bytes, session_store, session_id_body, is_new_session_body).await?;
+ write_dynamic_dispatch_response(
+ stream,
+ dispatcher,
+ request,
+ keep_alive,
+ cache_insertion,
+ handler_id,
+ &url_bytes,
+ session_store,
+ session_id_body,
+ is_new_session_body,
+ )
+ .await?;
}
DispatchDecision::SpecializedResponse(response) => {
let (write_result, _) = stream.write_all(response).await;
@@ -1218,12 +1271,46 @@ fn build_dispatch_decision_zero_copy(
body: &[u8],
) -> Result {
let method_code = method_code_from_bytes(parsed.method).unwrap_or(UNKNOWN_METHOD_CODE);
- let path_cow = String::from_utf8_lossy(parsed.path);
- let path_str = path_cow.as_ref();
- let url_cow = String::from_utf8_lossy(parsed.target);
- let url_str = url_cow.as_ref();
+ // Phase 6: Use from_utf8 (faster) instead of from_utf8_lossy for valid UTF-8 paths.
+ // httparse already validates the request line, so paths are almost always valid UTF-8.
+ let path_str = match std::str::from_utf8(parsed.path) {
+ Ok(s) => s,
+ Err(_) => {
+ return build_not_found_dispatch_envelope(
+ method_code,
+ &String::from_utf8_lossy(parsed.path),
+ &String::from_utf8_lossy(parsed.target),
+ &parsed.headers,
+ body,
+ )
+ .map(|envelope| {
+ DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new())
+ })
+ }
+ };
+ let url_str = match std::str::from_utf8(parsed.target) {
+ Ok(s) => s,
+ Err(_) => {
+ return build_not_found_dispatch_envelope(
+ method_code,
+ path_str,
+ &String::from_utf8_lossy(parsed.target),
+ &parsed.headers,
+ body,
+ )
+ .map(|envelope| {
+ DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new())
+ })
+ }
+ };
- let normalized_path = normalize_runtime_path(path_str);
+ // Phase 7: Inline fast-path for normalize_runtime_path — avoid function call
+ // when path is already clean (no trailing slash, or is exactly "/")
+ let normalized_path = if !path_str.ends_with('/') || path_str == "/" {
+ Cow::Borrowed(path_str)
+ } else {
+ normalize_runtime_path(path_str)
+ };
if contains_path_traversal(&normalized_path) {
return build_not_found_dispatch_envelope(
method_code,
@@ -1232,7 +1319,9 @@ fn build_dispatch_decision_zero_copy(
&parsed.headers,
body,
)
- .map(|envelope| DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new()));
+ .map(|envelope| {
+ DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new())
+ });
}
let matched_route = if method_code == UNKNOWN_METHOD_CODE {
@@ -1249,27 +1338,44 @@ fn build_dispatch_decision_zero_copy(
&parsed.headers,
body,
)
- .map(|envelope| DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new()));
+ .map(|envelope| {
+ DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new())
+ });
};
let mut cache_insertion = None;
if let Some(cfg) = matched_route.cache_config {
- let key = crate::router::interpolate_cache_key(cfg, parsed, url_str, matched_route.param_names, &matched_route.param_values);
- if let Some(cached_response) = crate::router::get_cached_response(matched_route.handler_id, key, parsed.keep_alive) {
- return Ok(DispatchDecision::CachedResponse(cached_response));
- }
- cache_insertion = Some((matched_route.handler_id, key, cfg.max_entries, cfg.ttl_secs));
+ let key = crate::router::interpolate_cache_key(
+ cfg,
+ parsed,
+ url_str,
+ matched_route.param_names,
+ &matched_route.param_values,
+ );
+ if let Some(cached_response) =
+ crate::router::get_cached_response(matched_route.handler_id, key, parsed.keep_alive)
+ {
+ return Ok(DispatchDecision::CachedResponse(cached_response));
+ }
+ cache_insertion = Some((matched_route.handler_id, key, cfg.max_entries, cfg.ttl_secs));
} else {
// ncache lookup: check if a previous res.ncache() call cached this response
let ncache_key = compute_ncache_key(matched_route.handler_id, parsed.target);
- if let Some(cached_response) = crate::router::get_cached_response(matched_route.handler_id, ncache_key, parsed.keep_alive) {
+ if let Some(cached_response) = crate::router::get_cached_response(
+ matched_route.handler_id,
+ ncache_key,
+ parsed.keep_alive,
+ ) {
return Ok(DispatchDecision::CachedResponse(cached_response));
}
}
- if let Some(response) =
- build_dynamic_fast_path_response(&matched_route, url_str, &parsed.headers, parsed.keep_alive)?
- {
+ if let Some(response) = build_dynamic_fast_path_response(
+ &matched_route,
+ url_str,
+ &parsed.headers,
+ parsed.keep_alive,
+ )? {
return Ok(DispatchDecision::SpecializedResponse(response));
};
@@ -1284,7 +1390,9 @@ fn build_dispatch_decision_zero_copy(
&parsed.headers,
body,
)
- .map(|envelope| DispatchDecision::BridgeRequest(envelope, cache_insertion, handler_id, url_bytes_owned))
+ .map(|envelope| {
+ DispatchDecision::BridgeRequest(envelope, cache_insertion, handler_id, url_bytes_owned)
+ })
}
fn build_dispatch_decision_owned(
@@ -1297,18 +1405,24 @@ fn build_dispatch_decision_owned(
) -> Result {
let method_code = method_code_from_bytes(method).unwrap_or(UNKNOWN_METHOD_CODE);
- let path_cow = String::from_utf8_lossy(path);
- let path_str = path_cow.as_ref();
- let url_cow = String::from_utf8_lossy(target);
- let url_str = url_cow.as_ref();
+ // Phase 6: Use from_utf8 (faster) instead of from_utf8_lossy for valid UTF-8 paths
+ let path_str = std::str::from_utf8(path).unwrap_or_else(|_| {
+ // Safety fallback: this is extremely rare — httparse validates the request line
+ ""
+ });
+ let url_str = std::str::from_utf8(target).unwrap_or_else(|_| "");
let header_refs: Vec<(&str, &str)> = headers
.iter()
.map(|(n, v)| (n.as_str(), v.as_str()))
.collect();
- // Security: strict path validation
- let normalized_path = normalize_runtime_path(path_str);
+ // Phase 7: Inline fast-path for normalize_runtime_path
+ let normalized_path = if !path_str.ends_with('/') || path_str == "/" {
+ Cow::Borrowed(path_str)
+ } else {
+ normalize_runtime_path(path_str)
+ };
if contains_path_traversal(&normalized_path) {
return build_not_found_dispatch_envelope(
method_code,
@@ -1317,7 +1431,9 @@ fn build_dispatch_decision_owned(
&header_refs,
body,
)
- .map(|envelope| DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new()));
+ .map(|envelope| {
+ DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new())
+ });
}
let matched_route = if method_code == UNKNOWN_METHOD_CODE {
@@ -1334,31 +1450,41 @@ fn build_dispatch_decision_owned(
&header_refs,
body,
)
- .map(|envelope| DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new()));
+ .map(|envelope| {
+ DispatchDecision::BridgeRequest(envelope, None, NOT_FOUND_HANDLER_ID, Vec::new())
+ });
};
let mut cache_insertion = None;
if let Some(cfg) = matched_route.cache_config {
- let mock_parsed = ParsedRequest {
- method,
- target,
- path,
- keep_alive: false,
- header_bytes: 0,
- has_body: true,
- content_length: None,
- has_chunked_te: false,
- headers: header_refs.clone(),
- cookie_header: None,
- is_websocket_upgrade: false,
- ws_key: None,
- };
- let key = crate::router::interpolate_cache_key(cfg, &mock_parsed, url_str, matched_route.param_names, &matched_route.param_values);
- cache_insertion = Some((matched_route.handler_id, key, cfg.max_entries, cfg.ttl_secs));
+ let mock_parsed = ParsedRequest {
+ method,
+ target,
+ path,
+ keep_alive: false,
+ header_bytes: 0,
+ has_body: true,
+ content_length: None,
+ has_chunked_te: false,
+ headers: header_refs.clone(),
+ cookie_header: None,
+ is_websocket_upgrade: false,
+ ws_key: None,
+ };
+ let key = crate::router::interpolate_cache_key(
+ cfg,
+ &mock_parsed,
+ url_str,
+ matched_route.param_names,
+ &matched_route.param_values,
+ );
+ cache_insertion = Some((matched_route.handler_id, key, cfg.max_entries, cfg.ttl_secs));
} else {
// ncache lookup: check if a previous res.ncache() call cached this response
let ncache_key = compute_ncache_key(matched_route.handler_id, target);
- if let Some(cached_response) = crate::router::get_cached_response(matched_route.handler_id, ncache_key, false) {
+ if let Some(cached_response) =
+ crate::router::get_cached_response(matched_route.handler_id, ncache_key, false)
+ {
return Ok(DispatchDecision::CachedResponse(cached_response));
}
}
@@ -1374,7 +1500,9 @@ fn build_dispatch_decision_owned(
&header_refs,
body,
)
- .map(|envelope| DispatchDecision::BridgeRequest(envelope, cache_insertion, handler_id, url_bytes_owned))
+ .map(|envelope| {
+ DispatchDecision::BridgeRequest(envelope, cache_insertion, handler_id, url_bytes_owned)
+ })
}
fn build_not_found_dispatch_envelope(
@@ -1387,7 +1515,7 @@ fn build_not_found_dispatch_envelope(
let url_bytes = url.as_bytes();
let path_bytes = path.as_bytes();
let mut flags: u16 = 0;
- if url.contains('?') {
+ if memchr::memchr(b'?', url_bytes).is_some() {
flags |= REQUEST_FLAG_QUERY_PRESENT;
}
if !body.is_empty() {
@@ -1437,10 +1565,18 @@ fn build_dispatch_envelope(
) -> Result {
let include_url = matched_route.needs_url || matched_route.needs_query;
let include_path = matched_route.needs_path;
- let url_bytes = if include_url { url.as_bytes() } else { b"" };
- let path_bytes = if include_path { path.as_bytes() } else { b"" };
+ let url_bytes = if include_url {
+ url.as_bytes()
+ } else {
+ b"" as &[u8]
+ };
+ let path_bytes = if include_path {
+ path.as_bytes()
+ } else {
+ b"" as &[u8]
+ };
let mut flags: u16 = 0;
- if matched_route.needs_query && url.contains('?') {
+ if matched_route.needs_query && memchr::memchr(b'?', url.as_bytes()).is_some() {
flags |= REQUEST_FLAG_QUERY_PRESENT;
}
if !body.is_empty() {
@@ -1456,14 +1592,34 @@ fn build_dispatch_envelope(
if matched_route.param_values.len() > u16::MAX as usize {
return Err(anyhow!("too many params"));
}
- let selected_header_count = count_selected_headers(header_entries, matched_route);
- if selected_header_count > u16::MAX as usize {
- return Err(anyhow!("too many headers"));
+
+ // Single-pass: determine which headers to include and compute count + size simultaneously.
+ // This avoids the previous double-iteration (count_selected_headers + write loop).
+ let full_headers = matched_route.full_headers;
+ let header_keys = matched_route.header_keys;
+ let no_headers = !full_headers && header_keys.is_empty();
+
+ let mut selected_header_count = 0u16;
+ let mut header_data_size = 0usize;
+ if !no_headers {
+ for (name, value) in header_entries {
+ if full_headers
+ || header_keys
+ .iter()
+ .any(|target| target.as_ref().eq_ignore_ascii_case(name))
+ {
+ selected_header_count += 1;
+ header_data_size += 3 + name.len() + value.len(); // 1 (name_len) + 2 (value_len) + name + value
+ }
+ }
}
- let mut frame = Vec::with_capacity(
- 20 + url_bytes.len() + path_bytes.len() + selected_header_count * 16 + body.len(),
- );
+ // Pre-compute exact frame size to avoid reallocation
+ let param_data_size: usize = matched_route.param_values.iter().map(|v| 2 + v.len()).sum();
+ let frame_size =
+ 20 + url_bytes.len() + path_bytes.len() + param_data_size + header_data_size + body.len();
+
+ let mut frame = Vec::with_capacity(frame_size);
frame.push(BRIDGE_VERSION);
frame.push(method_code);
push_u16(&mut frame, flags);
@@ -1471,8 +1627,8 @@ fn build_dispatch_envelope(
push_u32(&mut frame, url_bytes.len() as u32);
push_u16(&mut frame, path_bytes.len() as u16);
push_u16(&mut frame, matched_route.param_values.len() as u16);
- push_u16(&mut frame, selected_header_count as u16);
- push_u32(&mut frame, body.len() as u32); // NEW: body length
+ push_u16(&mut frame, selected_header_count);
+ push_u32(&mut frame, body.len() as u32);
frame.extend_from_slice(url_bytes);
frame.extend_from_slice(path_bytes);
@@ -1480,47 +1636,24 @@ fn build_dispatch_envelope(
push_string_value(&mut frame, value)?;
}
+ // Single-pass header write (no separate count step needed)
if selected_header_count > 0 {
for (name, value) in header_entries {
- if should_include_header(name, matched_route) {
+ if full_headers
+ || header_keys
+ .iter()
+ .any(|target| target.as_ref().eq_ignore_ascii_case(name))
+ {
push_string_pair(&mut frame, name, value)?;
}
}
}
- frame.extend_from_slice(body); // NEW: body bytes at end
+ frame.extend_from_slice(body);
Ok(Buffer::from(frame))
}
-fn count_selected_headers(
- header_entries: &[(&str, &str)],
- matched_route: &MatchedRoute<'_, '_>,
-) -> usize {
- if matched_route.full_headers {
- return header_entries.len();
- }
-
- if matched_route.header_keys.is_empty() {
- return 0;
- }
-
- header_entries
- .iter()
- .filter(|(name, _)| should_include_header(name, matched_route))
- .count()
-}
-
-fn should_include_header(name: &str, matched_route: &MatchedRoute<'_, '_>) -> bool {
- if matched_route.full_headers {
- return true;
- }
- matched_route
- .header_keys
- .iter()
- .any(|target| target.as_ref().eq_ignore_ascii_case(name))
-}
-
enum ResolvedDynamicValue {
Missing,
Single(String),
@@ -1638,27 +1771,23 @@ fn render_dynamic_text_body(
for segment in template.segments.iter() {
match segment {
TextSegment::Literal(value) => output.push_str(value.as_ref()),
- TextSegment::Dynamic(source) => match resolve_dynamic_value(
- source,
- matched_route,
- url,
- headers,
- query_cache,
- ) {
- ResolvedDynamicValue::Missing => output.push_str("undefined"),
- ResolvedDynamicValue::Single(value) => output.push_str(value.as_str()),
- ResolvedDynamicValue::Multi(values) => {
- for (index, value) in values.iter().enumerate() {
- if index > 0 {
- output.push(',');
+ TextSegment::Dynamic(source) => {
+ match resolve_dynamic_value(source, matched_route, url, headers, query_cache) {
+ ResolvedDynamicValue::Missing => output.push_str("undefined"),
+ ResolvedDynamicValue::Single(value) => output.push_str(value.as_str()),
+ ResolvedDynamicValue::Multi(values) => {
+ for (index, value) in values.iter().enumerate() {
+ if index > 0 {
+ output.push(',');
+ }
+ output.push_str(value.as_str());
}
- output.push_str(value.as_str());
+ }
+ ResolvedDynamicValue::RawJson(bytes) => {
+ output.push_str(String::from_utf8_lossy(bytes.as_slice()).as_ref());
}
}
- ResolvedDynamicValue::RawJson(bytes) => {
- output.push_str(String::from_utf8_lossy(bytes.as_slice()).as_ref());
- }
- },
+ }
}
}
@@ -1959,7 +2088,8 @@ fn extract_ncache_trailer(dispatch_bytes: &[u8]) -> Option<(u64, usize)> {
}
let name_len = dispatch_bytes[offset] as usize;
offset += 1;
- let value_len = (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
+ let value_len =
+ (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
offset += 2;
offset += name_len + value_len as usize;
}
@@ -2000,8 +2130,8 @@ fn extract_ncache_trailer(dispatch_bytes: &[u8]) -> Option<(u64, usize)> {
/// /data?page=2 are cached separately.
/// Uses FxHasher (~5x faster than SipHash/DefaultHasher for short keys).
fn compute_ncache_key(handler_id: u32, url_bytes: &[u8]) -> u64 {
- use std::hash::{Hash, Hasher};
use rustc_hash::FxHasher;
+ use std::hash::{Hash, Hasher};
let mut hasher = FxHasher::default();
handler_id.hash(&mut hasher);
url_bytes.hash(&mut hasher);
@@ -2037,10 +2167,13 @@ fn response_body_end_offset(dispatch_bytes: &[u8]) -> Option {
// Skip headers
for _ in 0..header_count {
- if offset + 3 > dispatch_bytes.len() { return None; }
+ if offset + 3 > dispatch_bytes.len() {
+ return None;
+ }
let name_len = dispatch_bytes[offset] as usize;
offset += 1;
- let value_len = (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
+ let value_len =
+ (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
offset += 2;
offset += name_len + value_len as usize;
}
@@ -2052,7 +2185,10 @@ fn response_body_end_offset(dispatch_bytes: &[u8]) -> Option {
/// Extract session write trailer from the response envelope.
/// Called after the ncache trailer position. Scans from `start_offset`.
-fn extract_session_trailer(dispatch_bytes: &[u8], start_offset: usize) -> Option {
+fn extract_session_trailer(
+ dispatch_bytes: &[u8],
+ start_offset: usize,
+) -> Option {
let mut offset = start_offset;
// Check for session magic (0x5E 0x57)
@@ -2071,27 +2207,36 @@ fn extract_session_trailer(dispatch_bytes: &[u8], start_offset: usize) -> Option
let entry_count = (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
offset += 2;
- let deleted_count = (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
+ let deleted_count =
+ (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
offset += 2;
let mut mutations = std::collections::HashMap::new();
for _ in 0..entry_count {
- if offset + 2 > dispatch_bytes.len() { return None; }
+ if offset + 2 > dispatch_bytes.len() {
+ return None;
+ }
let key_len = (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
offset += 2;
let key_len = key_len as usize;
- if offset + key_len > dispatch_bytes.len() { return None; }
+ if offset + key_len > dispatch_bytes.len() {
+ return None;
+ }
let key = std::str::from_utf8(&dispatch_bytes[offset..offset + key_len]).ok()?;
offset += key_len;
- if offset + 4 > dispatch_bytes.len() { return None; }
+ if offset + 4 > dispatch_bytes.len() {
+ return None;
+ }
let value_len = (dispatch_bytes[offset] as u32)
| ((dispatch_bytes[offset + 1] as u32) << 8)
| ((dispatch_bytes[offset + 2] as u32) << 16)
| ((dispatch_bytes[offset + 3] as u32) << 24);
offset += 4;
let value_len = value_len as usize;
- if offset + value_len > dispatch_bytes.len() { return None; }
+ if offset + value_len > dispatch_bytes.len() {
+ return None;
+ }
let value = dispatch_bytes[offset..offset + value_len].to_vec();
offset += value_len;
@@ -2100,11 +2245,15 @@ fn extract_session_trailer(dispatch_bytes: &[u8], start_offset: usize) -> Option
let mut deleted_keys = Vec::new();
for _ in 0..deleted_count {
- if offset + 2 > dispatch_bytes.len() { return None; }
+ if offset + 2 > dispatch_bytes.len() {
+ return None;
+ }
let key_len = (dispatch_bytes[offset] as u16) | ((dispatch_bytes[offset + 1] as u16) << 8);
offset += 2;
let key_len = key_len as usize;
- if offset + key_len > dispatch_bytes.len() { return None; }
+ if offset + key_len > dispatch_bytes.len() {
+ return None;
+ }
let key = std::str::from_utf8(&dispatch_bytes[offset..offset + key_len]).ok()?;
offset += key_len;
deleted_keys.push(key.to_string());
@@ -2146,11 +2295,9 @@ where
.map_err(|_| anyhow!("stream envelope truncated"))?,
);
off += 8;
- let status =
- u16::from_le_bytes([response[off], response[off + 1]]);
+ let status = u16::from_le_bytes([response[off], response[off + 1]]);
off += 2;
- let header_count =
- u16::from_le_bytes([response[off], response[off + 1]]) as usize;
+ let header_count = u16::from_le_bytes([response[off], response[off + 1]]) as usize;
off += 2;
// Create the channel — Sender goes into the registry so JS can push
@@ -2218,7 +2365,8 @@ where
}
// HTTP/1.1 chunked format: {hex_len}\r\n{data}\r\n
let hex_len = format!("{:x}", data.len());
- let mut chunk_buf = Vec::with_capacity(hex_len.len() + 2 + data.len() + 2);
+ let mut chunk_buf =
+ Vec::with_capacity(hex_len.len() + 2 + data.len() + 2);
chunk_buf.extend_from_slice(hex_len.as_bytes());
chunk_buf.extend_from_slice(b"\r\n");
chunk_buf.extend_from_slice(&data);
@@ -2258,14 +2406,22 @@ where
patch_connection_header(&http_response, true).into()
};
- crate::router::insert_cached_response(handler_id, cache_key, crate::router::CacheEntry {
- response_bytes: response_ka,
- response_bytes_close,
- expires_at: std::time::Instant::now() + std::time::Duration::from_secs(ttl_secs),
- }, max_entries);
+ crate::router::insert_cached_response(
+ handler_id,
+ cache_key,
+ crate::router::CacheEntry {
+ response_bytes: response_ka,
+ response_bytes_close,
+ expires_at: std::time::Instant::now()
+ + std::time::Duration::from_secs(ttl_secs),
+ },
+ max_entries,
+ );
} else if handler_id != NOT_FOUND_HANDLER_ID {
// Check for ncache trailer from JS response envelope
- if let Some((ncache_ttl, ncache_max_entries)) = extract_ncache_trailer(response.as_ref()) {
+ if let Some((ncache_ttl, ncache_max_entries)) =
+ extract_ncache_trailer(response.as_ref())
+ {
if ncache_ttl > 0 {
let ncache_key = compute_ncache_key(handler_id, url_bytes);
@@ -2280,11 +2436,17 @@ where
patch_connection_header(&http_response, true).into()
};
- crate::router::insert_cached_response(handler_id, ncache_key, crate::router::CacheEntry {
- response_bytes: response_ka,
- response_bytes_close,
- expires_at: std::time::Instant::now() + std::time::Duration::from_secs(ncache_ttl),
- }, ncache_max_entries);
+ crate::router::insert_cached_response(
+ handler_id,
+ ncache_key,
+ crate::router::CacheEntry {
+ response_bytes: response_ka,
+ response_bytes_close,
+ expires_at: std::time::Instant::now()
+ + std::time::Duration::from_secs(ncache_ttl),
+ },
+ ncache_max_entries,
+ );
}
}
}
@@ -2301,15 +2463,24 @@ where
session_scan_offset += 10;
}
- if let Some(trailer) = extract_session_trailer(response.as_ref(), session_scan_offset) {
+ if let Some(trailer) =
+ extract_session_trailer(response.as_ref(), session_scan_offset)
+ {
match trailer.action {
session::SessionAction::Update => {
if let Some(sid) = session_id {
- store.upsert(&sid, trailer.mutations, &trailer.deleted_keys);
+ store.upsert(
+ &sid,
+ trailer.mutations,
+ &trailer.deleted_keys,
+ );
// Inject Set-Cookie for new sessions
if is_new_session {
let cookie = store.build_set_cookie(&sid);
- inject_set_cookie_header(&mut http_response, &cookie);
+ inject_set_cookie_header(
+ &mut http_response,
+ &cookie,
+ );
}
}
}
@@ -2329,7 +2500,11 @@ where
if let Some(entry) = old_data {
store.upsert(&new_sid, entry.data, &[]);
}
- store.upsert(&new_sid, trailer.mutations, &trailer.deleted_keys);
+ store.upsert(
+ &new_sid,
+ trailer.mutations,
+ &trailer.deleted_keys,
+ );
let cookie = store.build_set_cookie(&new_sid);
inject_set_cookie_header(&mut http_response, &cookie);
}
@@ -2363,11 +2538,8 @@ where
}
Err(_) => {
// Security: sanitized error — no internal details
- let response = build_error_response_bytes(
- 502,
- b"{\"error\":\"Bad Gateway\"}",
- keep_alive,
- );
+ let response =
+ build_error_response_bytes(502, b"{\"error\":\"Bad Gateway\"}", keep_alive);
let (write_result, _) = stream.write_all(response).await;
write_result?;
}
@@ -2455,9 +2627,15 @@ fn build_http_response_from_dispatch(dispatch_bytes: &[u8], keep_alive: bool) ->
/// Vec may grow or shrink by a few bytes.
fn patch_connection_header(response: &[u8], keep_alive: bool) -> Vec {
let (find, replace) = if keep_alive {
- (&b"connection: close\r\n"[..], &b"connection: keep-alive\r\n"[..])
+ (
+ &b"connection: close\r\n"[..],
+ &b"connection: keep-alive\r\n"[..],
+ )
} else {
- (&b"connection: keep-alive\r\n"[..], &b"connection: close\r\n"[..])
+ (
+ &b"connection: keep-alive\r\n"[..],
+ &b"connection: close\r\n"[..],
+ )
};
if let Some(pos) = memmem::find(response, find) {
@@ -2625,8 +2803,20 @@ fn build_error_response_bytes(status: u16, body: &[u8], keep_alive: bool) -> Vec
let connection = if keep_alive { "keep-alive" } else { "close" };
let body_len = body.len();
- let total_size =
- 9 + 3 + 1 + reason.len() + 2 + 16 + count_digits(body_len) + 2 + 12 + connection.len() + 2 + 45 + 2 + body_len;
+ let total_size = 9
+ + 3
+ + 1
+ + reason.len()
+ + 2
+ + 16
+ + count_digits(body_len)
+ + 2
+ + 12
+ + connection.len()
+ + 2
+ + 45
+ + 2
+ + body_len;
let mut output = Vec::with_capacity(total_size);
output.extend_from_slice(b"HTTP/1.1 ");
@@ -2643,7 +2833,6 @@ fn build_error_response_bytes(status: u16, body: &[u8], keep_alive: bool) -> Vec
output
}
-
// ─── Security Utilities ─────────────────
/// Check for path traversal attempts (../, ..\, etc.)
@@ -2653,19 +2842,37 @@ fn build_error_response_bytes(status: u16, body: &[u8], keep_alive: bool) -> Vec
fn contains_path_traversal(path: &str) -> bool {
let bytes = path.as_bytes();
- // Fast scan for null bytes
- if memchr::memchr(0, bytes).is_some() {
+ // Ultra-fast path: scan for any byte that could indicate traversal.
+ // If no '.', '%', '\0', or '\\' exists, traversal is impossible.
+ // This single pass covers 99%+ of clean requests with zero allocations.
+ let mut has_dot = false;
+ let mut has_percent = false;
+ let mut has_null = false;
+ for &b in bytes {
+ match b {
+ b'.' => has_dot = true,
+ b'%' => has_percent = true,
+ 0 => has_null = true,
+ _ => {}
+ }
+ }
+
+ if has_null {
return true;
}
+ // No dots and no percent-encoding means no traversal possible
+ if !has_dot && !has_percent {
+ return false;
+ }
+
// Check for literal %00 (null percent-encoding)
- if memmem::find(bytes, b"%00").is_some() {
+ if has_percent && memmem::find(bytes, b"%00").is_some() {
return true;
}
- // Fast path: if no ".." appears anywhere (even encoded), skip the expensive decode
+ // Need ".." for traversal — check if it exists
let has_dotdot = memmem::find(bytes, b"..").is_some();
- let has_percent = memchr::memchr(b'%', bytes).is_some();
// If no literal ".." and no percent-encoding that could hide "..", we're safe
if !has_dotdot && !has_percent {
@@ -2823,10 +3030,7 @@ fn build_tls_acceptor(manifest: &ManifestInput) -> Result