diff --git a/bun.lock b/bun.lock index fdc570c..dfa87db 100644 --- a/bun.lock +++ b/bun.lock @@ -62,6 +62,7 @@ "next": "^16.0.0", "react": "^19.0.0", "react-dom": "^19.0.0", + "simple-icons": "^16.9.0", }, "devDependencies": { "@tailwindcss/postcss": "^4.0.0", @@ -1024,6 +1025,8 @@ "signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + "simple-icons": ["simple-icons@16.9.0", "", {}, "sha512-aKst2C7cLkFyaiQ/Crlwxt9xYOpGPk05XuJZ0ZTJNNCzHCKYrGWz2ebJSi5dG8CmTCxUF/BGs6A8uyJn/EQxqw=="], + "source-map": ["source-map@0.7.6", "", {}, "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ=="], "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="], diff --git a/packages/web/app/docs/docs.css b/packages/web/app/docs/docs.css index 9c2ad9e..926b816 100644 --- a/packages/web/app/docs/docs.css +++ b/packages/web/app/docs/docs.css @@ -1,5 +1,5 @@ .docs-panel #nd-docs-layout { - --fd-docs-height: calc(100dvh - var(--docs-pad) * 2 - 2px) !important; + --fd-docs-height: 100dvh !important; min-height: unset !important; height: var(--fd-docs-height) !important; max-height: var(--fd-docs-height) !important; @@ -75,9 +75,9 @@ [data-state][class*="shadow-lg"][class*="end-0"] { background: #0a0a0a !important; border-left: 1px solid rgba(255, 255, 255, 0.07) !important; - border-radius: var(--panel-radius) 0 0 var(--panel-radius) !important; - margin: var(--docs-pad) 0 var(--docs-pad) 0 !important; - height: calc(100dvh - var(--docs-pad) * 2) !important; + border-radius: 0 !important; + margin: 0 !important; + height: 100dvh !important; top: 0 !important; bottom: 0 !important; } diff --git a/packages/web/app/docs/layout.tsx b/packages/web/app/docs/layout.tsx index 81a2d1b..965f949 100644 --- a/packages/web/app/docs/layout.tsx +++ b/packages/web/app/docs/layout.tsx @@ -5,27 +5,22 @@ import "./docs.css" export default function Layout({ children }: { children: ReactNode }) { return ( -
-
-
-
- -
- - {children} - -
+
+
+ + {children} +
) diff --git a/packages/web/app/globals.css b/packages/web/app/globals.css index d1f2ebc..56b798c 100644 --- a/packages/web/app/globals.css +++ b/packages/web/app/globals.css @@ -3,6 +3,9 @@ @import "fumadocs-ui/css/preset.css"; @theme { + --font-sans: + "Geist Sans", ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial, + "Apple Color Emoji", "Segoe UI Emoji"; --font-mono: "Geist Mono", ui-monospace, monospace; } @@ -51,6 +54,48 @@ color: #0a0a0a; } +@keyframes cursor-fade-up { + from { + opacity: 0; + transform: translate3d(0, 14px, 0); + filter: blur(0.5px); + } + to { + opacity: 1; + transform: translate3d(0, 0, 0); + filter: blur(0); + } +} + +.cursor-landing { + --cursor-accent: #ff8a3d; + --cursor-accent-soft: rgba(255, 138, 61, 0.14); +} + +.cursor-fade-up { + animation: cursor-fade-up 700ms cubic-bezier(0.22, 1, 0.36, 1) both; +} + +@keyframes cursor-shimmer { + 0% { + background-position: 0% 50%; + } + 100% { + background-position: 100% 50%; + } +} + +.animate-shimmer { + animation: cursor-shimmer 1400ms linear infinite; +} + +@media (prefers-reduced-motion: reduce) { + .cursor-fade-up, + .animate-shimmer { + animation: none; + } +} + .grain::before { content: ""; position: absolute; @@ -80,3 +125,14 @@ width: 0; height: 0; } + +.cursor-terminal-scroll { + scrollbar-width: none; + -ms-overflow-style: none; +} + +.cursor-terminal-scroll::-webkit-scrollbar { + display: none; + width: 0; + height: 0; +} diff --git a/packages/web/app/layout.tsx b/packages/web/app/layout.tsx index 574d2ad..5dac849 100644 --- a/packages/web/app/layout.tsx +++ b/packages/web/app/layout.tsx @@ -1,6 +1,7 @@ import { Analytics } from "@vercel/analytics/react" import { RootProvider } from "fumadocs-ui/provider/next" import { GeistMono } from "geist/font/mono" +import { GeistSans } from "geist/font/sans" import type { Metadata, Viewport } from "next" import "./globals.css" @@ -41,8 +42,8 @@ export const viewport: Viewport = { export default function Layout({ children }: { children: React.ReactNode }) { return ( - - + + { - const paths = [ - join(process.cwd(), "../cruel/package.json"), - join(process.cwd(), "packages/cruel/package.json"), - ] - - for (const path of paths) { - try { - const file = await readFile(path, "utf8") - const data: unknown = JSON.parse(file) - if (typeof data === "object" && data !== null) { - const value = Reflect.get(data, "version") - if (typeof value === "string" && value.length > 0) return `v${value}` - } - } catch {} - } - - return "v0.0.0" -} - -export default async function Page() { - const tag = await version() +import { CursorFrontier } from "../components/landing/cursor-bento" +import { CursorFeatures } from "../components/landing/cursor-features" +import { CursorFooter } from "../components/landing/cursor-footer" +import { CursorHero } from "../components/landing/cursor-hero" +import { CursorNav } from "../components/landing/cursor-nav" +export default function Page() { return ( -
-
-
-
-
-
- -
- -
-
-
-
-
- chaos engineering -
-

- cruel -

-

- inject failures, latency, and timeouts into any async function. works with fetch, - ai sdks, databases, anything. -

-
- -
-
-									{code.split("\n").map((line, i) => (
-										
{tokenize(line)}
- ))} -
-
-
- -
- - -
- -
-
-
- -
-
{tag}
-
- $ - bun add cruel -
-
-
+
+
+
+
+
+ + + + +
) } - -function Star() { - return ( -
-
-
-
-
-
- ) -} - -function tokenize(line: string): React.ReactNode { - if (!line.trim()) return "\u00A0" - - const out: React.ReactNode[] = [] - let rest = line - let k = 0 - - const rules: [RegExp, string][] = [ - [/^(import|from|const|await)\b/, "text-white/20"], - [/[{}[\]()]/, "text-white/12"], - [/"[^"]*"/, "text-white/40"], - [/\b(cruel|fetch)\b/, "text-white/70"], - [/\b\d+(\.\d+)?\b/, "text-white/30"], - [/[:,.]/, "text-white/12"], - [/=/, "text-white/15"], - ] - - while (rest.length > 0) { - let hit: { i: number; n: number; c: string } | null = null - for (const [re, c] of rules) { - const m = rest.match(re) - if (m?.index !== undefined && (!hit || m.index < hit.i)) { - hit = { i: m.index, n: m[0].length, c } - } - } - if (!hit) { - out.push( - - {rest} - , - ) - break - } - if (hit.i > 0) - out.push( - - {rest.slice(0, hit.i)} - , - ) - out.push( - - {rest.slice(hit.i, hit.i + hit.n)} - , - ) - rest = rest.slice(hit.i + hit.n) - } - - return out -} diff --git a/packages/web/app/story/page.tsx b/packages/web/app/story/page.tsx index 09008bb..9da7dbf 100644 --- a/packages/web/app/story/page.tsx +++ b/packages/web/app/story/page.tsx @@ -1,5 +1,74 @@ import type { Metadata } from "next" -import Link from "next/link" +import { CursorNav } from "../../components/landing/cursor-nav" + +const intro = [ + "3am. phone buzzes. production alert. something is wrong, but not with our code - our code is perfect. passed every test, every lint check, every code review. the problem? the ai provider started rate limiting us and our retry logic had a subtle bug that nobody ever caught. because in development, the api never fails.", + "users see a blank screen. classic.", + "this happened more than once. different projects, different providers, same pattern. everything works beautifully in development. the streams flow smoothly, the tokens arrive one by one, the json parses clean, the error boundaries sit there looking pretty, completely untested against real failures.", + "then you deploy and the real world introduces itself.", + "rate limits hit during your busiest hour. streams cut mid-sentence on your longest responses. structured output comes back with malformed json. context length errors surface only on conversations from your most engaged users - the ones you really don't want to lose. content filters trigger on inputs nobody on the team ever thought to test against.", + "and it's not any one provider's fault. this is just the nature of building on top of ai apis. every provider - whether it's openai, anthropic, google, mistral, cohere, or anyone else - has their own failure modes, their own error formats, their own rate limit behaviors. they're all doing incredible work pushing the boundaries of what's possible. but distributed systems fail. that's not a bug, it's physics.", + "the question isn't whether your ai integration will encounter failures in production. the question is whether you've tested what happens when it does.", +] as const + +type part = { + readonly title: string + readonly text: readonly string[] +} + +const parts: readonly part[] = [ + { + title: "the duct tape era", + text: [ + "for the longest time, my approach to this problem was embarrassingly manual. need to test a rate limit? hardcode a mock response that returns a 429. need to test a stream cut? write a custom readable stream that stops halfway through. need to test a timeout? add a setTimeout that never resolves.", + 'copy and paste between projects. slightly different each time. never quite matching the real error format. always incomplete. always the thing i\'d "get to later" and never actually finish.', + "and honestly? most of the time i just skipped it entirely. shipped the code, crossed my fingers, and hoped that the error handling i wrote based on reading the docs would actually work when a real failure hit.", + "spoiler: reading the docs is not the same as testing against real failures.", + "the retry logic that looks correct in a code review? it doesn't respect the retry-after header. the stream error handler that catches the right error type? it doesn't clean up the partial response in the ui. the circuit breaker pattern you implemented from that blog post? it's never actually been tripped.", + "you don't know if your parachute works until you jump. and we were all jumping without ever testing the chute.", + ], + }, + { + title: "the idea", + text: [ + "i work on the ai sdk team at vercel. it's an incredible team - lars, nico, and everyone else shipping tools that millions of developers use every day. being part of this team means i get to see how ai integrations work across the entire ecosystem. every provider, every framework, every edge case.", + "and i kept seeing the same pattern: developers build amazing ai features, test them against the happy path, ship to production, and then discover their error handling has gaps when real-world chaos hits.", + "the ai sdk already does incredible work abstracting away provider differences. unified api, streaming support, structured output, tool calling - all the hard parts handled cleanly. but the one thing no sdk can do for you is test your app's resilience against failures that only happen in production.", + 'that\'s when it clicked. what if there was a library that could simulate every failure mode you\'d encounter in production? not mocking the entire api - just wrapping your existing code with configurable chaos. tell it "fail 10% of the time" or "add random latency" or "cut the stream halfway through" and let your error handling prove itself.', + "not a mock. not a test framework. just chaos. realistic, configurable, provider-accurate chaos that works with anything async.", + ], + }, + { + title: "building it", + text: [ + "the core took a weekend. wrap any async function, inject failures at a configurable rate, add random latency between two bounds, occasionally just... never resolve. the fundamentals of chaos in about 200 lines of typescript.", + "then came the network simulation layer. packet loss, dns failures, disconnects, slow connections. then http chaos - status codes, rate limits, server errors. then stream manipulation - cuts, pauses, corruption, truncation. each layer building on the core but targeting specific failure domains.", + "the ai sdk integration is where it got really interesting. i didn't want generic failures - i wanted failures that match real provider behavior exactly. when cruel simulates a rate limit, it returns the correct status code with a realistic retry-after header. when it simulates an overloaded error, the error object has the right shape, the right properties, the right behavior that the ai sdk's retry system expects.", + "this means your error handling code sees exactly what it would see from a real provider failure. no surprises in production because you already tested against the real thing - or at least something indistinguishable from it.", + "then resilience patterns. circuit breaker, retry with backoff, bulkhead isolation, timeout wrappers, fallbacks. not because cruel is trying to be a resilience library - there are great ones already - but because when you're chaos testing, you want to verify these patterns actually work under pressure.", + "zero dependencies. i was obsessive about this one. no runtime deps means no supply chain risk, no version conflicts, no transitive dependency nightmares. just typescript and your code. install it, import it, use it. nothing else comes along for the ride.", + ], + }, + { + title: "the name", + text: [ + 'i thought about this for longer than i\'d like to admit. tested a bunch of names. "chaos-inject" felt corporate. "fault-line" felt geological. "havoc" was taken.', + "then i just thought about what chaos testing should feel like. it should be uncomfortable. it should break things you thought were solid. it should find the bugs you didn't know existed. it should be relentless and thorough and completely without sympathy for your assumptions.", + "it should be cruel.", + "that's the whole philosophy in one word. if your tests are gentle, your production failures will be brutal. better to find out now - in development, with a stack trace and a debugger and a cup of coffee - than at 3am from a production alert while your users watch a loading spinner that never stops.", + ], + }, + { + title: "what's next", + text: [ + "cruel is open source and i'm building it in public. the core is stable, the ai sdk integration works, the resilience patterns are solid. but there's so much more to do.", + "better test matchers. more realistic failure scenarios. deeper integration with vitest and jest. maybe a visual dashboard that shows you exactly how your app behaves under different chaos profiles. provider-specific failure libraries that evolve as the apis evolve.", + "if you're building ai apps and you've ever been bitten by a production failure you didn't test for, give cruel a try. break things on purpose. find the bugs before your users do.", + "and if you find a failure mode i haven't thought of yet, open an issue. the cruelest ideas come from real production pain.", + "zero mercy.", + ], + }, +] as const export const metadata: Metadata = { title: "story - cruel", @@ -8,252 +77,45 @@ export const metadata: Metadata = { export default function Page() { return ( -
-
-
-
-
+
+
+
+
-
-
- -
+
+ -
-
-

+
+
+
+

why i built cruel

-

february 2026

-
- -
-

- 3am. phone buzzes. production alert. something is wrong, but not with our code - our - code is perfect. passed every test, every lint check, every code review. the - problem? the ai provider started rate limiting us and our retry logic had a subtle - bug that nobody ever caught. because in development, the api never fails. -

- -

users see a blank screen. classic.

- -

- this happened more than once. different projects, different providers, same pattern. - everything works beautifully in development. the streams flow smoothly, the tokens - arrive one by one, the json parses clean, the error boundaries sit there looking - pretty, completely untested against real failures. -

- -

then you deploy and the real world introduces itself.

- -

- rate limits hit during your busiest hour. streams cut mid-sentence on your longest - responses. structured output comes back with malformed json. context length errors - surface only on conversations from your most engaged users - the ones you really - don't want to lose. content filters trigger on inputs nobody on the team ever - thought to test against. -

- -

- and it's not any one provider's fault. this is just the nature of building on top of - ai apis. every provider - whether it's openai, anthropic, google, mistral, cohere, - or anyone else - has their own failure modes, their own error formats, their own - rate limit behaviors. they're all doing incredible work pushing the boundaries of - what's possible. but distributed systems fail. that's not a bug, it's physics. -

- -

- the question isn't whether your ai integration will encounter failures in - production. the question is whether you've tested what happens when it does. -

- -

- the duct tape era -

- -

- for the longest time, my approach to this problem was embarrassingly manual. need to - test a rate limit? hardcode a mock response that returns a 429. need to test a - stream cut? write a custom readable stream that stops halfway through. need to test - a timeout? add a setTimeout that never resolves. -

- -

- copy and paste between projects. slightly different each time. never quite matching - the real error format. always incomplete. always the thing i'd "get to later" and - never actually finish. -

- -

- and honestly? most of the time i just skipped it entirely. shipped the code, crossed - my fingers, and hoped that the error handling i wrote based on reading the docs - would actually work when a real failure hit. -

- -

spoiler: reading the docs is not the same as testing against real failures.

- -

- the retry logic that looks correct in a code review? it doesn't respect the - retry-after header. the stream error handler that catches the right error type? it - doesn't clean up the partial response in the ui. the circuit breaker pattern you - implemented from that blog post? it's never actually been tripped. -

- -

- you don't know if your parachute works until you jump. and we were all jumping - without ever testing the chute. -

- -

- the idea -

- -

- i work on the ai sdk team at vercel. it's an incredible team - lars, nico, and - everyone else shipping tools that millions of developers use every day. being part - of this team means i get to see how ai integrations work across the entire - ecosystem. every provider, every framework, every edge case. -

- -

- and i kept seeing the same pattern: developers build amazing ai features, test them - against the happy path, ship to production, and then discover their error handling - has gaps when real-world chaos hits. -

+
february 2026
-

- the ai sdk already does incredible work abstracting away provider differences. - unified api, streaming support, structured output, tool calling - all the hard parts - handled cleanly. but the one thing no sdk can do for you is test your app's - resilience against failures that only happen in production. -

- -

- that's when it clicked. what if there was a library that could simulate every - failure mode you'd encounter in production? not mocking the entire api - just - wrapping your existing code with configurable chaos. tell it "fail 10% of the time" - or "add random latency" or "cut the stream halfway through" and let your error - handling prove itself. -

- -

- not a mock. not a test framework. just chaos. realistic, configurable, - provider-accurate chaos that works with anything async. -

- -

- building it -

- -

- the core took a weekend. wrap any async function, inject failures at a configurable - rate, add random latency between two bounds, occasionally just... never resolve. the - fundamentals of chaos in about 200 lines of typescript. -

- -

- then came the network simulation layer. packet loss, dns failures, disconnects, slow - connections. then http chaos - status codes, rate limits, server errors. then stream - manipulation - cuts, pauses, corruption, truncation. each layer building on the core - but targeting specific failure domains. -

- -

- the ai sdk integration is where it got really interesting. i didn't want generic - failures - i wanted failures that match real provider behavior exactly. when cruel - simulates a rate limit, it returns the correct status code with a realistic - retry-after header. when it simulates an overloaded error, the error object has the - right shape, the right properties, the right behavior that the ai sdk's retry system - expects. -

- -

- this means your error handling code sees exactly what it would see from a real - provider failure. no surprises in production because you already tested against the - real thing - or at least something indistinguishable from it. -

- -

- then resilience patterns. circuit breaker, retry with backoff, bulkhead isolation, - timeout wrappers, fallbacks. not because cruel is trying to be a resilience library - - there are great ones already - but because when you're chaos testing, you want to - verify these patterns actually work under pressure. -

- -

- zero dependencies. i was obsessive about this one. no runtime deps means no supply - chain risk, no version conflicts, no transitive dependency nightmares. just - typescript and your code. install it, import it, use it. nothing else comes along - for the ride. -

- -

- the name -

- -

- i thought about this for longer than i'd like to admit. tested a bunch of names. - "chaos-inject" felt corporate. "fault-line" felt geological. "havoc" was taken. -

- -

- then i just thought about what chaos testing should feel like. it should be - uncomfortable. it should break things you thought were solid. it should find the - bugs you didn't know existed. it should be relentless and thorough and completely - without sympathy for your assumptions. -

- -

it should be cruel.

- -

- that's the whole philosophy in one word. if your tests are gentle, your production - failures will be brutal. better to find out now - in development, with a stack trace - and a debugger and a cup of coffee - than at 3am from a production alert while your - users watch a loading spinner that never stops. -

- -

- what's next -

- -

- cruel is open source and i'm building it in public. the core is stable, the ai sdk - integration works, the resilience patterns are solid. but there's so much more to - do. -

- -

- better test matchers. more realistic failure scenarios. deeper integration with - vitest and jest. maybe a visual dashboard that shows you exactly how your app - behaves under different chaos profiles. provider-specific failure libraries that - evolve as the apis evolve. -

- -

- if you're building ai apps and you've ever been bitten by a production failure you - didn't test for, give cruel a try. break things on purpose. find the bugs before - your users do. -

- -

- and if you find a failure mode i haven't thought of yet, open an issue. the cruelest - ideas come from real production pain. -

+
+ {intro.map((line) => ( +

{line}

+ ))} +
-

zero mercy.

-
-

-
+
+ {parts.map((item) => ( +
+

+ {item.title} +

+
+ {item.text.map((line) => ( +

{line}

+ ))} +
+
+ ))} +
+ +
+
) diff --git a/packages/web/components/landing/code-demo.tsx b/packages/web/components/landing/code-demo.tsx new file mode 100644 index 0000000..5d085dd --- /dev/null +++ b/packages/web/components/landing/code-demo.tsx @@ -0,0 +1,59 @@ +export function CodeDemo() { + return ( +
+
+
+
+
+
chaos.ts
+
+
+
+ 1 + + import {"{"} cruel {"}"}{" "} + from{" "} + "cruel" + +
+
+ 2 + +
+
+ 3 + + const api = cruel(fetch, {"{"} + +
+
+ 4 + + fail: 0.1,{" "} + {"// 10% failure rate"} + +
+
+ 5 + + delay: [100,{" "} + 500],{" "} + {"// 100-500ms latency"} + +
+
+ 6 + + timeout: 0.05,{" "} + {"// 5% timeouts"} + +
+
+ 7 + {"})"} +
+
+
+
+ ) +} diff --git a/packages/web/components/landing/cursor-bento.tsx b/packages/web/components/landing/cursor-bento.tsx new file mode 100644 index 0000000..b564c32 --- /dev/null +++ b/packages/web/components/landing/cursor-bento.tsx @@ -0,0 +1,148 @@ +import Link from "next/link" + +type item = { + readonly id: string + readonly title: string + readonly body: string + readonly href: string + readonly label: string +} + +const items: readonly item[] = [ + { + id: "001", + title: "Chaos presets", + body: "Start from realistic presets, then tune options per provider, model, or API.", + href: "/docs/chaos", + label: "Read chaos", + }, + { + id: "002", + title: "Core and AI SDK", + body: "Wrap fetch and async functions with cruel(...), or wrap providers and models with cruel/ai-sdk.", + href: "/docs/core", + label: "Read core api", + }, + { + id: "003", + title: "Stream drills", + body: "Test slow tokens, chunk corruption, and stream cuts before shipping UI.", + href: "/docs/chaos", + label: "Read streaming", + }, + { + id: "004", + title: "CI replay", + body: "Seed chaos once and replay deterministic sequences in every pipeline.", + href: "/docs/advanced", + label: "Read replay", + }, +] + +function mark(index: number) { + if (index === 0) { + return ( + + ) + } + + if (index === 1) { + return ( + + ) + } + + if (index === 2) { + return ( + + ) + } + + return ( + + ) +} + +export function CursorFrontier() { + return ( +
+
+
+
+

+ Chaos infrastructure that ships. +

+
+
+

+ We focus on production-grade failure simulation for AI apps and async backends, not + synthetic demos. +

+
+ + Open workflow + + +
+
+
+ +
+ {items.map((item, index) => ( +
= 2 ? "md:border-b-0" : ""} ${ + index > 0 ? "lg:border-l" : "lg:border-l-0" + } lg:border-b-0`} + > +
{item.id}
+
{mark(index)}
+

+ {item.title} +

+

+ {item.body} +

+
+ + {item.label} + +
+
+ ))} +
+
+
+ ) +} diff --git a/packages/web/components/landing/cursor-features.tsx b/packages/web/components/landing/cursor-features.tsx new file mode 100644 index 0000000..80f804e --- /dev/null +++ b/packages/web/components/landing/cursor-features.tsx @@ -0,0 +1,187 @@ +"use client" + +import Link from "next/link" +import type { ReactNode } from "react" +import { CursorMacWindow } from "./cursor-mac-window" +import { CursorStage } from "./cursor-stage" + +type row = { + readonly tone: "cmd" | "code" | "dim" + readonly text: string +} + +const presetrows: readonly row[] = [ + { tone: "cmd", text: "$ bun add cruel ai @ai-sdk/openai" }, + { tone: "code", text: 'import { openai } from "@ai-sdk/openai"' }, + { tone: "code", text: 'import { generateText } from "ai"' }, + { tone: "code", text: 'import { cruelModel, presets } from "cruel/ai-sdk"' }, + { tone: "dim", text: "" }, + { tone: "code", text: 'const model = cruelModel(openai("gpt-4o"), {' }, + { tone: "code", text: " ...presets.nightmare," }, + { tone: "code", text: " timeout: 0.2," }, + { tone: "code", text: "})" }, +] + +const streamrows: readonly row[] = [ + { tone: "code", text: 'const model = cruelModel(openai("gpt-4o"), {' }, + { tone: "code", text: " slowTokens: [40, 120]," }, + { tone: "code", text: " corruptChunks: 0.03," }, + { tone: "code", text: " streamCut: 0.05," }, + { tone: "code", text: "})" }, + { tone: "dim", text: "" }, + { tone: "code", text: "const result = streamText({ model, prompt })" }, + { tone: "code", text: "for await (const part of result.fullStream) {" }, + { tone: "code", text: " consume(part)" }, + { tone: "code", text: "}" }, +] + +const diagnosticrows: readonly row[] = [ + { tone: "code", text: 'import { cruelModel } from "cruel/ai-sdk"' }, + { tone: "code", text: "const events = [] as string[]" }, + { tone: "code", text: "const model = cruelModel(openai('gpt-4o'), {" }, + { tone: "code", text: " onChaos: (event) => events.push(event.type)," }, + { tone: "code", text: "})" }, + { tone: "dim", text: "" }, + { tone: "code", text: "await generateText({ model, prompt })" }, + { tone: "code", text: "const counts = events.reduce(group, {})" }, + { tone: "code", text: "console.log(counts)" }, +] + +function LearnMore({ href, label }: { readonly href: string; readonly label: string }) { + return ( + + {label} + + ) +} + +function rowstyle(tone: row["tone"]): string { + switch (tone) { + case "cmd": + return "text-white/78" + case "dim": + return "text-white/42" + default: + return "text-white/68" + } +} + +function Panel({ rows }: { readonly rows: readonly row[] }) { + return ( +
+
+ {rows.map((entry, index) => ( +
+ {entry.text || "\u00A0"} +
+ ))} +
+
+ ) +} + +function Spotlight({ + tone, + title, + description, + bullets, + linkHref, + linkLabel, + flip, + window, +}: { + readonly tone: "dune" | "mist" | "sage" + readonly title: string + readonly description: string + readonly bullets: readonly string[] + readonly linkHref: string + readonly linkLabel: string + readonly flip?: boolean + readonly window: ReactNode +}) { + return ( +
+
+

{title}

+

{description}

+
    + {bullets.map((b) => ( +
  • + + {b} +
  • + ))} +
+
+ +
+
+ +
+ +
+ + {window} + +
+
+
+
+ ) +} + +export function CursorFeatures() { + return ( +
+
+
+ } + /> + + } + /> + + } + /> +
+
+
+ ) +} diff --git a/packages/web/components/landing/cursor-footer.tsx b/packages/web/components/landing/cursor-footer.tsx new file mode 100644 index 0000000..5aacdd2 --- /dev/null +++ b/packages/web/components/landing/cursor-footer.tsx @@ -0,0 +1,109 @@ +import Link from "next/link" + +export function CursorFooter() { + return ( +
+
+
+
+
Cruel
+
+ Chaos engineering for AI SDK and async APIs. +
+
+ © {new Date().getFullYear()} Visible +
+
+ +
+
Product
+
+ + Docs + + + Core API + + + AI SDK integration + + + Chaos modes + + + Resilience patterns + +
+
+ +
+
+ Resources +
+ +
+ +
+
Company
+
+ + Visible + + + Story + +
+
+
+
+
+ ) +} diff --git a/packages/web/components/landing/cursor-hero.tsx b/packages/web/components/landing/cursor-hero.tsx new file mode 100644 index 0000000..ae18954 --- /dev/null +++ b/packages/web/components/landing/cursor-hero.tsx @@ -0,0 +1,61 @@ +"use client" + +import Link from "next/link" +import { CursorMacWindow } from "./cursor-mac-window" +import { CursorStage } from "./cursor-stage" +import { CursorTerminal } from "./cursor-terminal" + +export function CursorHero() { + return ( +
+
+
+

+ Ship resilient AI and APIs. +

+

+ Inject realistic failures into AI SDK flows and core async APIs before launch. +

+
+ +
+ + Start Here + + +
+
+ +
+
+ +
+ + + +
+
+
+
+
+ ) +} diff --git a/packages/web/components/landing/cursor-mac-window.tsx b/packages/web/components/landing/cursor-mac-window.tsx new file mode 100644 index 0000000..d2c1878 --- /dev/null +++ b/packages/web/components/landing/cursor-mac-window.tsx @@ -0,0 +1,28 @@ +import type { ReactNode } from "react" + +export function CursorMacWindow({ + title, + bar = true, + children, +}: { + readonly title: string + readonly bar?: boolean + readonly children: ReactNode +}) { + return ( +
+ {bar ? ( +
+
+
+
+
+
+
{title}
+
+
+ ) : null} + {children} +
+ ) +} diff --git a/packages/web/components/landing/cursor-nav.tsx b/packages/web/components/landing/cursor-nav.tsx new file mode 100644 index 0000000..fd5ce2a --- /dev/null +++ b/packages/web/components/landing/cursor-nav.tsx @@ -0,0 +1,101 @@ +"use client" + +import { Github } from "lucide-react" +import Link from "next/link" +import { useEffect, useState } from "react" + +export function CursorNav() { + const [scrolled, setScrolled] = useState(false) + + useEffect(() => { + const handleScroll = () => { + setScrolled(window.scrollY > 20) + } + window.addEventListener("scroll", handleScroll) + return () => window.removeEventListener("scroll", handleScroll) + }, []) + + return ( + + ) +} diff --git a/packages/web/components/landing/cursor-stage.tsx b/packages/web/components/landing/cursor-stage.tsx new file mode 100644 index 0000000..ab2d493 --- /dev/null +++ b/packages/web/components/landing/cursor-stage.tsx @@ -0,0 +1,59 @@ +import type { ReactNode } from "react" + +type CursorStageTone = "dune" | "mist" | "sage" + +const toneStyles: Record< + CursorStageTone, + { backgroundColor: string; backgroundImage: string; shadow: string } +> = { + dune: { + backgroundColor: "#c9c2b4", + backgroundImage: + "radial-gradient(900px 420px at 26% 18%, rgba(255,255,255,0.55) 0%, rgba(255,255,255,0) 62%), radial-gradient(720px 520px at 82% 72%, rgba(0,0,0,0.18) 0%, rgba(0,0,0,0) 58%), linear-gradient(180deg, rgba(0,0,0,0.12) 0%, rgba(0,0,0,0) 55%)", + shadow: + "0 50px 90px rgba(0, 0, 0, 0.55), 0 2px 0 rgba(255, 255, 255, 0.06) inset, 0 -1px 0 rgba(0, 0, 0, 0.24) inset", + }, + mist: { + backgroundColor: "#cecac2", + backgroundImage: + "radial-gradient(900px 420px at 22% 20%, rgba(255,255,255,0.6) 0%, rgba(255,255,255,0) 62%), radial-gradient(820px 560px at 80% 74%, rgba(0,0,0,0.16) 0%, rgba(0,0,0,0) 60%), linear-gradient(180deg, rgba(0,0,0,0.1) 0%, rgba(0,0,0,0) 55%)", + shadow: + "0 50px 90px rgba(0, 0, 0, 0.55), 0 2px 0 rgba(255, 255, 255, 0.06) inset, 0 -1px 0 rgba(0, 0, 0, 0.24) inset", + }, + sage: { + backgroundColor: "#cfcac3", + backgroundImage: + "radial-gradient(900px 420px at 24% 18%, rgba(255,255,255,0.55) 0%, rgba(255,255,255,0) 62%), radial-gradient(780px 560px at 82% 72%, rgba(0,0,0,0.17) 0%, rgba(0,0,0,0) 60%), linear-gradient(180deg, rgba(0,0,0,0.12) 0%, rgba(0,0,0,0) 55%)", + shadow: + "0 50px 90px rgba(0, 0, 0, 0.55), 0 2px 0 rgba(255, 255, 255, 0.06) inset, 0 -1px 0 rgba(0, 0, 0, 0.24) inset", + }, +} + +export function CursorStage({ + tone = "dune", + square = false, + children, +}: { + readonly tone?: CursorStageTone + readonly square?: boolean + readonly children: ReactNode +}) { + const styles = toneStyles[tone] + + return ( +
+
+
+
{children}
+
+ ) +} diff --git a/packages/web/components/landing/cursor-terminal.tsx b/packages/web/components/landing/cursor-terminal.tsx new file mode 100644 index 0000000..2fe8b4b --- /dev/null +++ b/packages/web/components/landing/cursor-terminal.tsx @@ -0,0 +1,141 @@ +"use client" + +import { useMemo, useState } from "react" + +type tone = "input" | "plain" | "dim" | "ok" | "warn" | "err" + +type line = { + readonly tone: tone + readonly text: string +} + +type scene = { + readonly name: string + readonly seed: number + readonly data: readonly line[] +} + +const scenes: readonly scene[] = [ + { + name: "1: presets", + seed: 42, + data: [ + { tone: "input", text: "$ cruel presets" }, + { tone: "plain", text: "available presets:" }, + { tone: "dim", text: "development fail=0.01 delay=[10,100]" }, + { tone: "dim", text: "staging fail=0.05 delay=[50,500] timeout=0.02" }, + { tone: "dim", text: "production fail=0.10 delay=[100,1000] timeout=0.05" }, + { tone: "dim", text: "harsh fail=0.20 delay=[500,2000] timeout=0.10" }, + { tone: "dim", text: "nightmare fail=0.40 delay=[1000,5000] timeout=0.20" }, + { tone: "plain", text: "" }, + { tone: "input", text: "$ cruel preset nightmare --seed 42" }, + { tone: "ok", text: "preset locked" }, + ], + }, + { + name: "2: core", + seed: 77, + data: [ + { tone: "plain", text: 'import { cruel } from "cruel"' }, + { tone: "plain", text: "const api = cruel(fetch, {" }, + { tone: "plain", text: " fail: 0.1," }, + { tone: "plain", text: " delay: [120, 900]," }, + { tone: "plain", text: " timeout: 0.05," }, + { tone: "plain", text: "})" }, + { tone: "plain", text: "" }, + { tone: "input", text: 'const res = await api("https://api.example.com")' }, + { tone: "ok", text: "request chaos injected" }, + ], + }, + { + name: "3: ai-sdk", + seed: 12, + data: [ + { tone: "plain", text: 'import { gateway } from "@ai-sdk/gateway"' }, + { tone: "plain", text: 'import { cruelModel } from "cruel/ai-sdk"' }, + { tone: "plain", text: 'import { generateText } from "ai"' }, + { tone: "plain", text: "" }, + { tone: "plain", text: 'const model = cruelModel(gateway("openai/gpt-4o"), {' }, + { tone: "plain", text: " rateLimit: 0.1," }, + { tone: "plain", text: " slowTokens: [40, 120]," }, + { tone: "plain", text: "})" }, + { tone: "plain", text: "await generateText({ model, prompt })" }, + ], + }, +] + +function style(tone: tone): string { + switch (tone) { + case "input": + return "text-white/78" + case "dim": + return "text-white/42" + case "ok": + return "text-[#8FCF84]" + case "warn": + return "text-[#FFC66D]" + case "err": + return "text-[#FF6B68]" + default: + return "text-white/68" + } +} + +export function CursorTerminal() { + const [slot, setslot] = useState(1) + const active = scenes[slot] + const rows = useMemo(() => active.data, [active]) + + return ( +
+
+
+
+ session {active.name} +
+
+ seed {active.seed} +
+
+
+ + stable +
+
+ +
+ {rows.map((row, index) => ( +
+ {row.text || "\u00A0"} +
+ ))} +
+ +
+
+ {scenes.map((scene, index) => { + const current = index === slot + return ( + + ) + })} +
+
+
+ ) +} diff --git a/packages/web/components/landing/features.tsx b/packages/web/components/landing/features.tsx new file mode 100644 index 0000000..ee4e701 --- /dev/null +++ b/packages/web/components/landing/features.tsx @@ -0,0 +1,102 @@ +export function Features() { + const features = [ + { + title: "Network Chaos", + description: "Simulate latency, packet loss, and disconnections.", + icon: ( + + ), + }, + { + title: "HTTP Failures", + description: "Inject 4xx/5xx errors, rate limits, and timeouts.", + icon: ( + + ), + }, + { + title: "Stream Interruption", + description: "Cut streams, pause chunks, and corrupt data.", + icon: ( + + ), + }, + { + title: "AI Specific", + description: "Test token limits, context overflow, and hallucinations.", + icon: ( + + ), + }, + ] + + return ( +
+ {features.map((feature) => ( +
+
+ {feature.icon} +
+

{feature.title}

+

{feature.description}

+
+
+ ))} +
+ ) +} diff --git a/packages/web/components/landing/hero.tsx b/packages/web/components/landing/hero.tsx new file mode 100644 index 0000000..fad64f7 --- /dev/null +++ b/packages/web/components/landing/hero.tsx @@ -0,0 +1,144 @@ +"use client" + +import { useEffect, useRef } from "react" + +export function Hero() { + const canvasRef = useRef(null) + + useEffect(() => { + const canvas = canvasRef.current + if (!canvas) return + + const ctx = canvas.getContext("2d") + if (!ctx) return + + let animationFrameId: number + let width = window.innerWidth + let height = window.innerHeight + + const resize = () => { + width = window.innerWidth + height = window.innerHeight + canvas.width = width + canvas.height = height + } + + window.addEventListener("resize", resize) + resize() + + const particles: Array<{ + x: number + y: number + vx: number + vy: number + size: number + color: string + life: number + }> = [] + + const createParticle = (x: number, y: number) => { + return { + x, + y, + vx: (Math.random() - 0.5) * 0.5, + vy: (Math.random() - 0.5) * 0.5, + size: Math.random() * 1.5 + 0.5, + color: Math.random() > 0.9 ? "#ff4444" : "#ffffff", // Occasional red "error" particle + life: Math.random() * 100 + 100, + } + } + + // Initialize grid points + const gridSpacing = 40 + const cols = Math.ceil(width / gridSpacing) + const rows = Math.ceil(height / gridSpacing) + + for (let i = 0; i < cols; i++) { + for (let j = 0; j < rows; j++) { + if (Math.random() > 0.85) { + particles.push( + createParticle(i * gridSpacing + gridSpacing / 2, j * gridSpacing + gridSpacing / 2), + ) + } + } + } + + const draw = () => { + ctx.clearRect(0, 0, width, height) + + // Draw subtle grid + ctx.strokeStyle = "rgba(255, 255, 255, 0.03)" + ctx.lineWidth = 1 + ctx.beginPath() + for (let i = 0; i < cols; i++) { + ctx.moveTo(i * gridSpacing, 0) + ctx.lineTo(i * gridSpacing, height) + } + for (let j = 0; j < rows; j++) { + ctx.moveTo(0, j * gridSpacing) + ctx.lineTo(width, j * gridSpacing) + } + ctx.stroke() + + // Update and draw particles + particles.forEach((p, index) => { + p.x += p.vx + p.y += p.vy + p.life-- + + if (p.life <= 0 || p.x < 0 || p.x > width || p.y < 0 || p.y > height) { + // Reset particle to a random grid position + const col = Math.floor(Math.random() * cols) + const row = Math.floor(Math.random() * rows) + p.x = col * gridSpacing + gridSpacing / 2 + p.y = row * gridSpacing + gridSpacing / 2 + p.life = Math.random() * 100 + 100 + p.vx = (Math.random() - 0.5) * 0.5 + p.vy = (Math.random() - 0.5) * 0.5 + } + + ctx.fillStyle = + p.color === "#ff4444" ? "rgba(255, 68, 68, 0.8)" : "rgba(255, 255, 255, 0.4)" + ctx.beginPath() + ctx.arc(p.x, p.y, p.size, 0, Math.PI * 2) + ctx.fill() + + // Connect nearby particles + for (let j = index + 1; j < particles.length; j++) { + const p2 = particles[j] + const dx = p.x - p2.x + const dy = p.y - p2.y + const dist = Math.sqrt(dx * dx + dy * dy) + + if (dist < 60) { + ctx.strokeStyle = + p.color === "#ff4444" || p2.color === "#ff4444" + ? "rgba(255, 68, 68, 0.15)" + : "rgba(255, 255, 255, 0.05)" + ctx.lineWidth = 0.5 + ctx.beginPath() + ctx.moveTo(p.x, p.y) + ctx.lineTo(p2.x, p2.y) + ctx.stroke() + } + } + }) + + animationFrameId = requestAnimationFrame(draw) + } + + draw() + + return () => { + window.removeEventListener("resize", resize) + cancelAnimationFrame(animationFrameId) + } + }, []) + + return ( + + ) +} diff --git a/packages/web/package.json b/packages/web/package.json index ee10204..d784ce6 100644 --- a/packages/web/package.json +++ b/packages/web/package.json @@ -7,7 +7,7 @@ "build": "next build", "start": "next start", "source": "bun ../../node_modules/fumadocs-mdx/dist/bin.js", - "typecheck": "bun run source && tsc --noEmit" + "typecheck": "bun run source && bunx tsc --noEmit" }, "dependencies": { "@orama/core": "1.2.18", @@ -20,7 +20,8 @@ "geist": "^1.4.0", "next": "^16.0.0", "react": "^19.0.0", - "react-dom": "^19.0.0" + "react-dom": "^19.0.0", + "simple-icons": "^16.9.0" }, "devDependencies": { "@types/node": "^22.0.0",