diff --git a/packages/web/app/docs.md/route.ts b/packages/web/app/docs.md/route.ts index 52a3d5c..f66bffb 100644 --- a/packages/web/app/docs.md/route.ts +++ b/packages/web/app/docs.md/route.ts @@ -32,7 +32,7 @@ function name(path: string): string { export async function GET(): Promise { const paths = await files() - const chunks: string[] = ["# cruel docs", ""] + const chunks: string[] = ["# Cruel Docs", ""] for (const path of paths) { const raw = await readFile(path, "utf8") diff --git a/packages/web/app/docs/layout.tsx b/packages/web/app/docs/layout.tsx index 965f949..823000d 100644 --- a/packages/web/app/docs/layout.tsx +++ b/packages/web/app/docs/layout.tsx @@ -10,12 +10,58 @@ export default function Layout({ children }: { children: ReactNode }) { + + + ), url: "/", }} links={[ - { text: "story", url: "/story" }, - { text: "github", url: "https://github.com/visible/cruel" }, + { text: "Story", url: "/story" }, + { text: "GitHub", url: "https://github.com/visible/cruel" }, ]} themeSwitch={{ enabled: false }} > diff --git a/packages/web/app/layout.tsx b/packages/web/app/layout.tsx index 5dac849..5e5a483 100644 --- a/packages/web/app/layout.tsx +++ b/packages/web/app/layout.tsx @@ -6,32 +6,32 @@ import type { Metadata, Viewport } from "next" import "./globals.css" export const metadata: Metadata = { - title: "cruel", - description: "chaos testing with zero mercy", + title: "Cruel", + description: "Chaos testing with zero mercy", metadataBase: new URL("https://cruel.dev"), icons: { icon: "/icon.svg", apple: "/apple-icon.png", }, openGraph: { - title: "cruel", - description: "chaos testing with zero mercy", + title: "Cruel", + description: "Chaos testing with zero mercy", url: "https://cruel.dev", - siteName: "cruel", + siteName: "Cruel", type: "website", images: [ { url: "/og.png", width: 1200, height: 630, - alt: "cruel", + alt: "Cruel", }, ], }, twitter: { card: "summary_large_image", - title: "cruel", - description: "chaos testing with zero mercy", + title: "Cruel", + description: "Chaos testing with zero mercy", images: ["/og.png"], }, } diff --git a/packages/web/app/llms.txt/route.ts b/packages/web/app/llms.txt/route.ts index 9bd6620..e4cd40e 100644 --- a/packages/web/app/llms.txt/route.ts +++ b/packages/web/app/llms.txt/route.ts @@ -33,7 +33,7 @@ export async function GET(): Promise { const lines: string[] = [ "project: cruel", "site: https://cruel.dev", - "summary: chaos engineering for ai sdk and async apis", + "summary: chaos engineering for AI SDK and async APIs", "", "docs_markdown: https://cruel.dev/docs.md", "docs_root: https://cruel.dev/docs", diff --git a/packages/web/app/story/page.tsx b/packages/web/app/story/page.tsx index 9da7dbf..60a450e 100644 --- a/packages/web/app/story/page.tsx +++ b/packages/web/app/story/page.tsx @@ -2,13 +2,13 @@ import type { Metadata } from "next" import { CursorNav } from "../../components/landing/cursor-nav" const intro = [ - "3am. phone buzzes. production alert. something is wrong, but not with our code - our code is perfect. passed every test, every lint check, every code review. the problem? the ai provider started rate limiting us and our retry logic had a subtle bug that nobody ever caught. because in development, the api never fails.", - "users see a blank screen. classic.", - "this happened more than once. different projects, different providers, same pattern. everything works beautifully in development. the streams flow smoothly, the tokens arrive one by one, the json parses clean, the error boundaries sit there looking pretty, completely untested against real failures.", - "then you deploy and the real world introduces itself.", - "rate limits hit during your busiest hour. streams cut mid-sentence on your longest responses. structured output comes back with malformed json. context length errors surface only on conversations from your most engaged users - the ones you really don't want to lose. content filters trigger on inputs nobody on the team ever thought to test against.", - "and it's not any one provider's fault. this is just the nature of building on top of ai apis. every provider - whether it's openai, anthropic, google, mistral, cohere, or anyone else - has their own failure modes, their own error formats, their own rate limit behaviors. they're all doing incredible work pushing the boundaries of what's possible. but distributed systems fail. that's not a bug, it's physics.", - "the question isn't whether your ai integration will encounter failures in production. the question is whether you've tested what happens when it does.", + "3am. Phone buzzes. Production alert. Something is wrong, but not with our code - our code is perfect. Passed every test, every lint check, every code review. The problem? The AI provider started rate limiting us and our retry logic had a subtle bug that nobody ever caught. Because in development, the API never fails.", + "Users see a blank screen. Classic.", + "This happened more than once. Different projects, different providers, same pattern. Everything works beautifully in development. The streams flow smoothly, the tokens arrive one by one, the JSON parses clean, the error boundaries sit there looking pretty, completely untested against real failures.", + "Then you deploy and the real world introduces itself.", + "Rate limits hit during your busiest hour. Streams cut mid-sentence on your longest responses. Structured output comes back with malformed JSON. Context length errors surface only on conversations from your most engaged users - the ones you really don't want to lose. Content filters trigger on inputs nobody on the team ever thought to test against.", + "And it's not any one provider's fault. This is just the nature of building on top of AI APIs. Every provider - whether it's OpenAI, Anthropic, Google, Mistral, Cohere, or anyone else - has their own failure modes, their own error formats, their own rate limit behaviors. They're all doing incredible work pushing the boundaries of what's possible. But distributed systems fail. That's not a bug, it's physics.", + "The question isn't whether your AI integration will encounter failures in production. The question is whether you've tested what happens when it does.", ] as const type part = { @@ -18,61 +18,61 @@ type part = { const parts: readonly part[] = [ { - title: "the duct tape era", + title: "The Duct Tape Era", text: [ - "for the longest time, my approach to this problem was embarrassingly manual. need to test a rate limit? hardcode a mock response that returns a 429. need to test a stream cut? write a custom readable stream that stops halfway through. need to test a timeout? add a setTimeout that never resolves.", - 'copy and paste between projects. slightly different each time. never quite matching the real error format. always incomplete. always the thing i\'d "get to later" and never actually finish.', - "and honestly? most of the time i just skipped it entirely. shipped the code, crossed my fingers, and hoped that the error handling i wrote based on reading the docs would actually work when a real failure hit.", - "spoiler: reading the docs is not the same as testing against real failures.", - "the retry logic that looks correct in a code review? it doesn't respect the retry-after header. the stream error handler that catches the right error type? it doesn't clean up the partial response in the ui. the circuit breaker pattern you implemented from that blog post? it's never actually been tripped.", - "you don't know if your parachute works until you jump. and we were all jumping without ever testing the chute.", + "For the longest time, my approach to this problem was embarrassingly manual. Need to test a rate limit? Hardcode a mock response that returns a 429. Need to test a stream cut? Write a custom readable stream that stops halfway through. Need to test a timeout? Add a setTimeout that never resolves.", + 'Copy and paste between projects. Slightly different each time. Never quite matching the real error format. Always incomplete. Always the thing I\'d "get to later" and never actually finish.', + "And honestly? Most of the time I just skipped it entirely. Shipped the code, crossed my fingers, and hoped that the error handling I wrote based on reading the docs would actually work when a real failure hit.", + "Spoiler: reading the docs is not the same as testing against real failures.", + "The retry logic that looks correct in a code review? It doesn't respect the retry-after header. The stream error handler that catches the right error type? It doesn't clean up the partial response in the UI. The circuit breaker pattern you implemented from that blog post? It's never actually been tripped.", + "You don't know if your parachute works until you jump. And we were all jumping without ever testing the chute.", ], }, { - title: "the idea", + title: "The Idea", text: [ - "i work on the ai sdk team at vercel. it's an incredible team - lars, nico, and everyone else shipping tools that millions of developers use every day. being part of this team means i get to see how ai integrations work across the entire ecosystem. every provider, every framework, every edge case.", - "and i kept seeing the same pattern: developers build amazing ai features, test them against the happy path, ship to production, and then discover their error handling has gaps when real-world chaos hits.", - "the ai sdk already does incredible work abstracting away provider differences. unified api, streaming support, structured output, tool calling - all the hard parts handled cleanly. but the one thing no sdk can do for you is test your app's resilience against failures that only happen in production.", - 'that\'s when it clicked. what if there was a library that could simulate every failure mode you\'d encounter in production? not mocking the entire api - just wrapping your existing code with configurable chaos. tell it "fail 10% of the time" or "add random latency" or "cut the stream halfway through" and let your error handling prove itself.', - "not a mock. not a test framework. just chaos. realistic, configurable, provider-accurate chaos that works with anything async.", + "I work on the AI SDK team at Vercel. It's an incredible team - Lars, Nico, and everyone else shipping tools that millions of developers use every day. Being part of this team means I get to see how AI integrations work across the entire ecosystem. Every provider, every framework, every edge case.", + "And I kept seeing the same pattern: developers build amazing AI features, test them against the happy path, ship to production, and then discover their error handling has gaps when real-world chaos hits.", + "The AI SDK already does incredible work abstracting away provider differences. Unified API, streaming support, structured output, tool calling - all the hard parts handled cleanly. But the one thing no SDK can do for you is test your app's resilience against failures that only happen in production.", + 'That\'s when it clicked. What if there was a library that could simulate every failure mode you\'d encounter in production? Not mocking the entire API - just wrapping your existing code with configurable chaos. Tell it "fail 10% of the time" or "add random latency" or "cut the stream halfway through" and let your error handling prove itself.', + "Not a mock. Not a test framework. Just chaos. Realistic, configurable, provider-accurate chaos that works with anything async.", ], }, { - title: "building it", + title: "Building It", text: [ - "the core took a weekend. wrap any async function, inject failures at a configurable rate, add random latency between two bounds, occasionally just... never resolve. the fundamentals of chaos in about 200 lines of typescript.", - "then came the network simulation layer. packet loss, dns failures, disconnects, slow connections. then http chaos - status codes, rate limits, server errors. then stream manipulation - cuts, pauses, corruption, truncation. each layer building on the core but targeting specific failure domains.", - "the ai sdk integration is where it got really interesting. i didn't want generic failures - i wanted failures that match real provider behavior exactly. when cruel simulates a rate limit, it returns the correct status code with a realistic retry-after header. when it simulates an overloaded error, the error object has the right shape, the right properties, the right behavior that the ai sdk's retry system expects.", - "this means your error handling code sees exactly what it would see from a real provider failure. no surprises in production because you already tested against the real thing - or at least something indistinguishable from it.", - "then resilience patterns. circuit breaker, retry with backoff, bulkhead isolation, timeout wrappers, fallbacks. not because cruel is trying to be a resilience library - there are great ones already - but because when you're chaos testing, you want to verify these patterns actually work under pressure.", - "zero dependencies. i was obsessive about this one. no runtime deps means no supply chain risk, no version conflicts, no transitive dependency nightmares. just typescript and your code. install it, import it, use it. nothing else comes along for the ride.", + "The core took a weekend. Wrap any async function, inject failures at a configurable rate, add random latency between two bounds, occasionally just... never resolve. The fundamentals of chaos in about 200 lines of TypeScript.", + "Then came the network simulation layer. Packet loss, DNS failures, disconnects, slow connections. Then HTTP chaos - status codes, rate limits, server errors. Then stream manipulation - cuts, pauses, corruption, truncation. Each layer building on the core but targeting specific failure domains.", + "The AI SDK integration is where it got really interesting. I didn't want generic failures - I wanted failures that match real provider behavior exactly. When Cruel simulates a rate limit, it returns the correct status code with a realistic retry-after header. When it simulates an overloaded error, the error object has the right shape, the right properties, the right behavior that the AI SDK's retry system expects.", + "This means your error handling code sees exactly what it would see from a real provider failure. No surprises in production because you already tested against the real thing - or at least something indistinguishable from it.", + "Then resilience patterns. Circuit breaker, retry with backoff, bulkhead isolation, timeout wrappers, fallbacks. Not because Cruel is trying to be a resilience library - there are great ones already - but because when you're chaos testing, you want to verify these patterns actually work under pressure.", + "Zero dependencies. I was obsessive about this one. No runtime deps means no supply chain risk, no version conflicts, no transitive dependency nightmares. Just TypeScript and your code. Install it, import it, use it. Nothing else comes along for the ride.", ], }, { - title: "the name", + title: "The Name", text: [ - 'i thought about this for longer than i\'d like to admit. tested a bunch of names. "chaos-inject" felt corporate. "fault-line" felt geological. "havoc" was taken.', - "then i just thought about what chaos testing should feel like. it should be uncomfortable. it should break things you thought were solid. it should find the bugs you didn't know existed. it should be relentless and thorough and completely without sympathy for your assumptions.", - "it should be cruel.", - "that's the whole philosophy in one word. if your tests are gentle, your production failures will be brutal. better to find out now - in development, with a stack trace and a debugger and a cup of coffee - than at 3am from a production alert while your users watch a loading spinner that never stops.", + 'I thought about this for longer than I\'d like to admit. Tested a bunch of names. "chaos-inject" felt corporate. "fault-line" felt geological. "havoc" was taken.', + "Then I just thought about what chaos testing should feel like. It should be uncomfortable. It should break things you thought were solid. It should find the bugs you didn't know existed. It should be relentless and thorough and completely without sympathy for your assumptions.", + "It should be Cruel.", + "That's the whole philosophy in one word. If your tests are gentle, your production failures will be brutal. Better to find out now - in development, with a stack trace and a debugger and a cup of coffee - than at 3am from a production alert while your users watch a loading spinner that never stops.", ], }, { - title: "what's next", + title: "What's Next", text: [ - "cruel is open source and i'm building it in public. the core is stable, the ai sdk integration works, the resilience patterns are solid. but there's so much more to do.", - "better test matchers. more realistic failure scenarios. deeper integration with vitest and jest. maybe a visual dashboard that shows you exactly how your app behaves under different chaos profiles. provider-specific failure libraries that evolve as the apis evolve.", - "if you're building ai apps and you've ever been bitten by a production failure you didn't test for, give cruel a try. break things on purpose. find the bugs before your users do.", - "and if you find a failure mode i haven't thought of yet, open an issue. the cruelest ideas come from real production pain.", - "zero mercy.", + "Cruel is open source and I'm building it in public. The core is stable, the AI SDK integration works, the resilience patterns are solid. But there's so much more to do.", + "Better test matchers. More realistic failure scenarios. Deeper integration with Vitest and Jest. Maybe a visual dashboard that shows you exactly how your app behaves under different chaos profiles. Provider-specific failure libraries that evolve as the APIs evolve.", + "If you're building AI apps and you've ever been bitten by a production failure you didn't test for, give Cruel a try. Break things on purpose. Find the bugs before your users do.", + "And if you find a failure mode I haven't thought of yet, open an issue. The cruelest ideas come from real production pain.", + "Zero mercy.", ], }, ] as const export const metadata: Metadata = { - title: "story - cruel", - description: "why i built cruel", + title: "Story - Cruel", + description: "Why I Built Cruel", } export default function Page() { @@ -89,9 +89,9 @@ export default function Page() {

- why i built cruel + Why I Built Cruel

-
february 2026
+
February 2026
{intro.map((line) => ( diff --git a/packages/web/components/landing/cursor-bento.tsx b/packages/web/components/landing/cursor-bento.tsx index b564c32..5901377 100644 --- a/packages/web/components/landing/cursor-bento.tsx +++ b/packages/web/components/landing/cursor-bento.tsx @@ -21,7 +21,7 @@ const items: readonly item[] = [ title: "Core and AI SDK", body: "Wrap fetch and async functions with cruel(...), or wrap providers and models with cruel/ai-sdk.", href: "/docs/core", - label: "Read core api", + label: "Read Core API", }, { id: "003", diff --git a/packages/web/content/docs/advanced.mdx b/packages/web/content/docs/advanced.mdx index 4ec176f..382e060 100644 --- a/packages/web/content/docs/advanced.mdx +++ b/packages/web/content/docs/advanced.mdx @@ -1,9 +1,9 @@ --- -title: advanced -description: presets, global mode, scenarios, fetch interception +title: Advanced +description: Presets, global mode, scenarios, and fetch interception --- -## presets +## Presets ```ts cruel.enable(cruel.presets.development) @@ -14,14 +14,14 @@ cruel.enable(cruel.presets.nightmare) cruel.enable(cruel.presets.apocalypse) ``` -## profiles +## Profiles ```ts cruel.profile("testing", { fail: 0.2, delay: 100 }) cruel.useProfile("testing") ``` -## global mode +## Global Mode ```ts cruel.enable({ fail: 0.1, delay: [100, 500] }) @@ -30,7 +30,7 @@ cruel.toggle() cruel.isEnabled() ``` -## scoped chaos +## Scoped Chaos ```ts await cruel.scope(async () => { @@ -38,7 +38,7 @@ await cruel.scope(async () => { }, { fail: 0.2 }) ``` -## fluent api +## Fluent API ```ts cruel.wrap(fn).fail(0.1) @@ -48,7 +48,7 @@ cruel.wrap(fn).flaky() cruel.wrap(fn).nightmare() ``` -## factory +## Factory ```ts import { createCruel } from "cruel" @@ -56,7 +56,7 @@ import { createCruel } from "cruel" const myCruel = createCruel({ delay: 100 }) ``` -## scenarios +## Scenarios ```ts cruel.scenario("outage", { @@ -73,7 +73,7 @@ await cruel.play("degraded") await cruel.play("recovery") ``` -## fetch interception +## Fetch Interception ```ts cruel.patchFetch() @@ -91,13 +91,13 @@ cruel.intercept(/api\.anthropic\.com/, { cruel.unpatchFetch() ``` -## production rollout checklist +## Production Rollout Checklist -for production-like chaos drills, keep blast radius controlled: +For production-like chaos drills, keep blast radius controlled: -1. start with low rates (`0.01` - `0.05`) and increase gradually -2. run chaos in a scoped block or dedicated test path first -3. attach `onChaos` and record event types, model ids, and latency -4. keep retry, fallback, and circuit-breaker logic enabled during tests -5. run deterministic passes with `cruel.seed(...)` before random passes -6. stop chaos immediately if error budget or latency thresholds are exceeded +1. Start with low rates (`0.01` - `0.05`) and increase gradually. +2. Run chaos in a scoped block or dedicated test path first. +3. Attach `onChaos` and record event types, model IDs, and latency. +4. Keep retry, fallback, and circuit-breaker logic enabled during tests. +5. Run deterministic passes with `cruel.seed(...)` before random passes. +6. Stop chaos immediately if error budget or latency thresholds are exceeded. diff --git a/packages/web/content/docs/aisdk.mdx b/packages/web/content/docs/aisdk.mdx index 9af6c4d..eb9143d 100644 --- a/packages/web/content/docs/aisdk.mdx +++ b/packages/web/content/docs/aisdk.mdx @@ -1,9 +1,9 @@ --- -title: ai sdk -description: chaos testing for ai providers, models, and tools +title: AI SDK +description: Chaos testing for AI providers, models, and tools --- -## ai failure modes +## AI Failure Modes ```ts cruel.ai.rateLimit(fn, 0.1) @@ -19,7 +19,7 @@ cruel.ai.realistic(fn) cruel.ai.nightmare(fn) ``` -## wrap a provider +## Wrap a Provider ```ts import { cruelProvider } from "cruel/ai-sdk" @@ -38,7 +38,7 @@ const result = await generateText({ }) ``` -## wrap a model +## Wrap a Model ```ts import { cruelModel, presets } from "cruel/ai-sdk" @@ -46,20 +46,20 @@ import { cruelModel, presets } from "cruel/ai-sdk" const model = cruelModel(openai("gpt-4o"), presets.realistic) ``` -## model override for tests +## Model Override for Tests -set `MODEL` to swap model ids without editing code: +Set `MODEL` to swap model IDs without editing code: ```bash MODEL=gpt-6 bun run your-script.ts ``` -this works for ids with and without a provider prefix: +This works for IDs with and without a provider prefix: - `gpt-4o` -> `gpt-6` - `openai/gpt-4o` -> `openai/gpt-6` -## streaming with chaos +## Streaming with Chaos ```ts import { cruelModel } from "cruel/ai-sdk" @@ -73,7 +73,7 @@ const result = streamText({ }) ``` -## middleware +## Middleware ```ts import { cruelMiddleware } from "cruel/ai-sdk" @@ -92,7 +92,7 @@ const model = wrapLanguageModel({ }) ``` -## presets +## Presets ```ts import { presets } from "cruel/ai-sdk" @@ -104,7 +104,7 @@ presets.nightmare // extreme chaos presets.apocalypse // everything fails ``` -## tool wrapping +## Tool Wrapping ```ts import { cruelTools } from "cruel/ai-sdk" @@ -119,9 +119,9 @@ const tools = cruelTools({ }) ``` -## error handling +## Error Handling -cruel errors are fully compatible with the ai sdk's `APICallError.isInstance()` check, so retries work automatically: +Cruel errors are fully compatible with the AI SDK's `APICallError.isInstance()` check, so retries work automatically: ```ts import { APICallError } from "ai" @@ -136,7 +136,7 @@ try { } ``` -## embeddings +## Embeddings ```ts import { cruelEmbeddingModel } from "cruel/ai-sdk" @@ -151,7 +151,7 @@ const model = cruelEmbeddingModel(openai.embedding("text-embedding-3-small"), { const { embedding } = await embed({ model, value: "hello" }) ``` -## images +## Images ```ts import { cruelImageModel } from "cruel/ai-sdk" diff --git a/packages/web/content/docs/chaos.mdx b/packages/web/content/docs/chaos.mdx index 2062409..2a49d49 100644 --- a/packages/web/content/docs/chaos.mdx +++ b/packages/web/content/docs/chaos.mdx @@ -1,9 +1,9 @@ --- -title: chaos injection -description: network, http, and stream failure simulation +title: Chaos Injection +description: Network, HTTP, and stream failure simulation --- -## network +## Network ```ts cruel.network.latency(fn, [100, 500]) @@ -15,7 +15,7 @@ cruel.network.unstable(fn) cruel.network.offline(fn) ``` -## http +## HTTP ```ts cruel.http.status(fn, 500, 0.1) @@ -28,7 +28,7 @@ cruel.http.serviceUnavailable(fn) cruel.http.gatewayTimeout(fn) ``` -## streams +## Streams ```ts cruel.stream.cut(fn, 0.1) diff --git a/packages/web/content/docs/core.mdx b/packages/web/content/docs/core.mdx index c1c7f38..939ffc0 100644 --- a/packages/web/content/docs/core.mdx +++ b/packages/web/content/docs/core.mdx @@ -1,9 +1,9 @@ --- -title: core api -description: the base cruel api for fetch, services, and async functions +title: Core API +description: The base Cruel API for fetch, services, and async functions --- -## quick start +## Quick Start ```ts import { cruel } from "cruel" @@ -19,16 +19,16 @@ const res = await api("https://api.example.com") `cruel(fn, options)` injects chaos into async functions. -## try without api keys +## Try Without API Keys ```bash cd packages/examples bun run run.ts core ``` -this runs local core examples only (`core/basic.ts`, `core/resilience.ts`, `core/control.ts`). +This runs local core examples only (`core/basic.ts`, `core/resilience.ts`, `core/control.ts`). -## chaos options +## Chaos Options ```ts type ChaosOptions = { @@ -50,7 +50,7 @@ type ChaosOptions = { - `spike`: occasional added latency - `enabled`: force-disable on a wrapper -## shortcut wrappers +## Shortcut Wrappers ```ts cruel.fail(fn, 0.1) @@ -61,9 +61,9 @@ cruel.unreliable(fn) cruel.nightmare(fn) ``` -## domains +## Domains -### network +### Network ```ts cruel.network.latency(fn, [100, 500]) @@ -76,7 +76,7 @@ cruel.network.unstable(fn) cruel.network.offline(fn) ``` -### http +### HTTP ```ts cruel.http.status(fn, 500, 0.1) @@ -89,7 +89,7 @@ cruel.http.serviceUnavailable(fn) cruel.http.gatewayTimeout(fn) ``` -### stream +### Stream ```ts cruel.stream.cut(fn, 0.1) @@ -104,7 +104,7 @@ cruel.stream.slow(fn) cruel.stream.flaky(fn) ``` -### ai +### AI ```ts cruel.ai.rateLimit(fn, 0.1) @@ -120,7 +120,7 @@ cruel.ai.realistic(fn) cruel.ai.nightmare(fn) ``` -## state and control +## State and Control ```ts cruel.enable({ fail: 0.1, delay: [100, 500] }) @@ -133,7 +133,7 @@ await cruel.scope(async () => { }, { fail: 0.2 }) ``` -## resilience wrappers +## Resilience Wrappers ```ts cruel.circuitBreaker(fn, { threshold: 5, timeout: 30000 }) @@ -147,7 +147,7 @@ cruel.cache(fn, { ttl: 30000 }) cruel.abort(fn, { signal }) ``` -## composition +## Composition ```ts const wrapped = cruel.compose(fetcher, { @@ -170,7 +170,7 @@ cruel.wrap(fetcher).slow([100, 500]) cruel.wrap(fetcher).timeout(0.05) ``` -## presets and scenarios +## Presets and Scenarios ```ts cruel.enable(cruel.presets.development) @@ -184,7 +184,7 @@ await cruel.play("recovery") cruel.stop() ``` -## diagnostics +## Diagnostics ```ts const off = cruel.on((event) => { @@ -196,7 +196,7 @@ cruel.resetStats() off() ``` -## fetch interception +## Fetch Interception ```ts cruel.patchFetch() @@ -214,7 +214,7 @@ cruel.unpatchFetch() cruel.clearIntercepts() ``` -## utilities +## Utilities ```ts cruel.seed(12345) diff --git a/packages/web/content/docs/gateway.mdx b/packages/web/content/docs/gateway.mdx index 26b1cb7..375430a 100644 --- a/packages/web/content/docs/gateway.mdx +++ b/packages/web/content/docs/gateway.mdx @@ -1,11 +1,11 @@ --- -title: ai gateway -description: chaos testing through the vercel ai gateway +title: AI Gateway +description: Chaos testing through the Vercel AI Gateway --- -cruel works with `@ai-sdk/gateway` to test how your app handles failures when routing through the vercel ai gateway. the gateway lets you use any provider through a single interface - cruel injects chaos at the model layer so it works identically regardless of which provider the gateway routes to. +Cruel works with `@ai-sdk/gateway` to test how your app handles failures when routing through the Vercel AI Gateway. The gateway lets you use any provider through a single interface - Cruel injects chaos at the model layer so it works identically regardless of which provider the gateway routes to. -## basic usage +## Basic Usage ```ts import { gateway } from "@ai-sdk/gateway" @@ -24,9 +24,9 @@ const result = await generateText({ }) ``` -## any provider +## Any Provider -the gateway uses `provider/model` format. cruel wraps the gateway model the same way it wraps any ai sdk model: +The gateway uses `provider/model` format. Cruel wraps the gateway model the same way it wraps any AI SDK model: ```ts cruelModel(gateway("openai/gpt-4o"), opts) @@ -37,7 +37,7 @@ cruelModel(gateway("xai/grok-3-fast"), opts) cruelModel(gateway("deepseek/deepseek-chat"), opts) ``` -## model override for tests +## Model Override for Tests `MODEL` can swap only the model segment while preserving the gateway provider prefix: @@ -48,7 +48,7 @@ MODEL=gpt-6 bun run your-script.ts - `openai/gpt-4o` -> `openai/gpt-6` - `anthropic/claude-sonnet-4-5-20250929` -> `anthropic/gpt-6` -## streaming +## Streaming ```ts import { gateway } from "@ai-sdk/gateway" @@ -73,7 +73,7 @@ for await (const chunk of result.fullStream) { } ``` -## structured output +## Structured Output ```ts import { gateway } from "@ai-sdk/gateway" @@ -99,7 +99,7 @@ const result = await generateText({ }) ``` -## tool calling +## Tool Calling ```ts import { gateway } from "@ai-sdk/gateway" @@ -129,7 +129,7 @@ const result = await generateText({ }) ``` -## embeddings +## Embeddings ```ts import { gateway } from "@ai-sdk/gateway" @@ -144,9 +144,9 @@ const model = cruelEmbeddingModel( const { embedding } = await embed({ model, value: "hello" }) ``` -## fallback pattern +## Fallback Pattern -test what happens when a provider fails and you need to fall back: +Test what happens when a provider fails and you need to fall back: ```ts import { gateway } from "@ai-sdk/gateway" @@ -172,9 +172,9 @@ try { } ``` -## multi-provider comparison +## Multi-provider Comparison -test the same prompt across multiple gateway providers under chaos: +Test the same prompt across multiple gateway providers under chaos: ```ts import { gateway } from "@ai-sdk/gateway" diff --git a/packages/web/content/docs/index.mdx b/packages/web/content/docs/index.mdx index 4c604c4..2da621b 100644 --- a/packages/web/content/docs/index.mdx +++ b/packages/web/content/docs/index.mdx @@ -1,13 +1,13 @@ --- -title: getting started -description: chaos testing with zero mercy +title: Getting Started +description: Chaos testing with zero mercy --- -cruel is a chaos engineering library for testing how your ai applications handle failures. it wraps ai sdk models with configurable fault injection - rate limits, timeouts, stream cuts, corrupt responses, and more. +Cruel is a chaos engineering library for testing how your AI applications handle failures. It wraps AI SDK models with configurable fault injection - rate limits, timeouts, stream cuts, corrupt responses, and more. -for the base `cruel(...)` function wrappers (fetch/services/core api), see the [core api](/docs/core) page. +For the base `cruel(...)` function wrappers (fetch/services/Core API), see the [Core API](/docs/core) page. -## install +## Install @@ -41,7 +41,7 @@ pnpm add cruel -## wrap a model +## Wrap a Model ```ts import { openai } from "@ai-sdk/openai" @@ -60,20 +60,20 @@ const result = await generateText({ }) ``` -this wraps the model so 20% of calls get a 429 rate limit error and every call has 100-500ms of added latency. the `onChaos` callback fires whenever chaos is injected. +This wraps the model so 20% of calls get a 429 rate limit error and every call has 100-500ms of added latency. The `onChaos` callback fires whenever chaos is injected. -## how it works +## How It Works -cruel sits between your app and the ai provider. when a call is made: +Cruel sits between your app and the AI provider. When a call is made: -1. **pre-call checks** - rate limit, overloaded, timeout, etc. fire before the api call -2. **api call** - if pre-call passes, the real api call happens -3. **post-call mutations** - partial response, token usage override, etc. modify the result -4. **stream transforms** - slow tokens, corrupt chunks, stream cuts modify the stream +1. **Pre-call checks** - rate limit, overloaded, timeout, etc. fire before the API call +2. **API call** - if pre-call passes, the real API call happens +3. **Post-call mutations** - partial response, token usage override, etc. modify the result +4. **Stream transforms** - slow tokens, corrupt chunks, stream cuts modify the stream -errors use the same format as real provider errors (`APICallError.isInstance()` returns true), so your retry logic works exactly like it would in production. +Errors use the same format as real provider errors (`APICallError.isInstance()` returns true), so your retry logic works exactly like it would in production. -## streaming +## Streaming ```ts import { streamText } from "ai" @@ -92,7 +92,7 @@ for await (const chunk of result.fullStream) { } ``` -## with the gateway +## With the Gateway ```ts import { gateway } from "@ai-sdk/gateway" @@ -104,7 +104,7 @@ const model = cruelModel(gateway("openai/gpt-4o"), { }) ``` -## presets +## Presets ```ts import { cruelModel, presets } from "cruel/ai-sdk" @@ -114,7 +114,7 @@ cruelModel(openai("gpt-4o"), presets.nightmare) cruelModel(openai("gpt-4o"), presets.apocalypse) ``` -## wrap everything +## Wrap Everything ```ts import { cruelProvider } from "cruel/ai-sdk" @@ -126,7 +126,7 @@ chaos.embeddingModel("text-embedding-3-small") // embedding model chaos.imageModel("dall-e-3") // image model ``` -## run the examples +## Run the Examples ```bash cd packages/examples @@ -138,4 +138,4 @@ bun run run.ts ai-sdk openai -m gpt-6 bun run run.ts ai-gateway openai --model gpt-6 ``` -`-m` / `--model` sets `MODEL` for each matched example process. this swaps the model id without changing example files. +`-m` / `--model` sets `MODEL` for each matched example process. This swaps the model ID without changing example files. diff --git a/packages/web/content/docs/meta.json b/packages/web/content/docs/meta.json index ed68e1d..d0b8cb4 100644 --- a/packages/web/content/docs/meta.json +++ b/packages/web/content/docs/meta.json @@ -1,5 +1,5 @@ { - "title": "docs", + "title": "Docs", "pages": [ "index", "core", diff --git a/packages/web/content/docs/reference.mdx b/packages/web/content/docs/reference.mdx index 9191b11..f17d955 100644 --- a/packages/web/content/docs/reference.mdx +++ b/packages/web/content/docs/reference.mdx @@ -1,13 +1,13 @@ --- -title: api reference -description: all chaos options and model wrappers +title: API Reference +description: All chaos options and model wrappers --- -for the base `cruel(...)` function api, see [core api](/docs/core). this page focuses on `cruel/ai-sdk`. +For the base `cruel(...)` function API, see [Core API](/docs/core). This page focuses on `cruel/ai-sdk`. ## cruelModel -wraps a language model with chaos injection +Wraps a language model with chaos injection. ```ts import { cruelModel } from "cruel/ai-sdk" @@ -20,7 +20,7 @@ const model = cruelModel(openai("gpt-4o"), { ## cruelEmbeddingModel -wraps an embedding model +Wraps an embedding model. ```ts import { cruelEmbeddingModel } from "cruel/ai-sdk" @@ -32,7 +32,7 @@ const model = cruelEmbeddingModel(openai.embedding("text-embedding-3-small"), { ## cruelImageModel -wraps an image model +Wraps an image model. ```ts import { cruelImageModel } from "cruel/ai-sdk" @@ -44,7 +44,7 @@ const model = cruelImageModel(openai.image("dall-e-3"), { ## cruelSpeechModel -wraps a speech model +Wraps a speech model. ```ts import { cruelSpeechModel } from "cruel/ai-sdk" @@ -56,7 +56,7 @@ const model = cruelSpeechModel(openai.speech("tts-1"), { ## cruelTranscriptionModel -wraps a transcription model +Wraps a transcription model. ```ts import { cruelTranscriptionModel } from "cruel/ai-sdk" @@ -68,7 +68,7 @@ const model = cruelTranscriptionModel(openai.transcription("whisper-1"), { ## cruelVideoModel -wraps a video model +Wraps a video model. ```ts import { cruelVideoModel } from "cruel/ai-sdk" @@ -80,7 +80,7 @@ const model = cruelVideoModel(google.video("veo-2.0-generate-001"), { ## cruelProvider -wraps an entire provider - automatically dispatches to the correct wrapper based on model type +Wraps an entire provider and automatically dispatches to the correct wrapper based on model type. ```ts import { cruelProvider } from "cruel/ai-sdk" @@ -99,7 +99,7 @@ chaos.imageModel("dall-e-3") // cruelImageModel ## cruelMiddleware -creates ai sdk middleware for chaos injection +Creates AI SDK middleware for chaos injection. ```ts import { cruelMiddleware } from "cruel/ai-sdk" @@ -113,7 +113,7 @@ const model = wrapLanguageModel({ ## cruelTool / cruelTools -wraps tool execution with chaos +Wraps tool execution with chaos. ```ts import { cruelTool, cruelTools } from "cruel/ai-sdk" @@ -122,9 +122,9 @@ const tool = cruelTool(myTool, { toolFailure: 0.2 }) const tools = cruelTools({ search, calc }, { toolFailure: 0.1 }) ``` -## model id override +## Model ID Override -if `MODEL` is set in the environment, cruel swaps the model id used by wrappers: +If `MODEL` is set in the environment, Cruel swaps the model ID used by wrappers: ```bash MODEL=gpt-6 bun run your-script.ts @@ -133,13 +133,13 @@ MODEL=gpt-6 bun run your-script.ts - `gpt-4o` -> `gpt-6` - `openai/gpt-4o` -> `openai/gpt-6` -## chaos options +## Chaos Options -all options are probabilities between 0 and 1 (0 = never, 1 = always) +All options are probabilities between 0 and 1 (0 = never, 1 = always). -### pre-call failures +### Pre-call Failures -these fire before the api request is made +These fire before the API request is made. | option | type | description | |--------|------|-------------| @@ -155,9 +155,9 @@ these fire before the api request is made | `timeout` | `number` | hangs forever (never resolves) | | `delay` | `number \| [min, max]` | adds latency in ms before the call | -### post-call mutations +### Post-call Mutations -these modify the response after a successful api call +These modify the response after a successful API call. | option | type | description | |--------|------|-------------| @@ -165,9 +165,9 @@ these modify the response after a successful api call | `finishReason` | `string` | overrides the finish reason | | `tokenUsage` | `{ inputTokens?, outputTokens? }` | overrides token counts | -### stream transforms +### Stream Transforms -these modify the token stream in real-time +These modify the token stream in real-time. | option | type | description | |--------|------|-------------| @@ -175,14 +175,14 @@ these modify the token stream in real-time | `streamCut` | `number` | kills the stream mid-transfer | | `corruptChunks` | `number` | replaces random characters with the replacement character | -### tool options +### Tool Options | option | type | description | |--------|------|-------------| | `toolFailure` | `number` | tool execution throws an error | | `toolTimeout` | `number` | tool execution hangs forever | -## presets +## Presets ```ts import { presets } from "cruel/ai-sdk" @@ -196,9 +196,9 @@ import { presets } from "cruel/ai-sdk" | `nightmare` | 0.3 | 0.15 | 0.15 | 500-2000ms | | `apocalypse` | 0.4 | 0.2 | 0.2 | 1000-5000ms | -## onChaos callback +## onChaos Callback -every chaos event fires a callback with the event type and model id +Every chaos event fires a callback with the event type and model ID. ```ts const model = cruelModel(openai("gpt-4o"), { @@ -209,11 +209,11 @@ const model = cruelModel(openai("gpt-4o"), { }) ``` -event types: `rateLimit`, `overloaded`, `contextLength`, `contentFilter`, `modelUnavailable`, `invalidApiKey`, `quotaExceeded`, `emptyResponse`, `fail`, `timeout`, `delay`, `streamCut`, `slowTokens`, `corruptChunk`, `partialResponse`, `toolFailure`, `toolTimeout` +Event types: `rateLimit`, `overloaded`, `contextLength`, `contentFilter`, `modelUnavailable`, `invalidApiKey`, `quotaExceeded`, `emptyResponse`, `fail`, `timeout`, `delay`, `streamCut`, `slowTokens`, `corruptChunk`, `partialResponse`, `toolFailure`, `toolTimeout`. -## diagnostics +## Diagnostics -programmatic chaos reporting for test suites. track events, record results, compute stats, print reports +Programmatic chaos reporting for test suites. Track events, record results, compute stats, and print reports. ```ts import { cruelModel, diagnostics } from "cruel/ai-sdk" @@ -240,7 +240,7 @@ for (let i = 1; i <= 10; i++) { diagnostics.print(ctx) ``` -### raw stats for assertions +### Raw Stats for Assertions ```ts const s = diagnostics.stats(ctx) diff --git a/packages/web/content/docs/resilience.mdx b/packages/web/content/docs/resilience.mdx index 282d572..4937029 100644 --- a/packages/web/content/docs/resilience.mdx +++ b/packages/web/content/docs/resilience.mdx @@ -1,9 +1,9 @@ --- -title: resilience -description: circuit breaker, retry, bulkhead, timeout, fallback +title: Resilience +description: Circuit breaker, retry, bulkhead, timeout, and fallback --- -## circuit breaker +## Circuit Breaker ```ts const api = cruel.circuitBreaker(fetch, { @@ -17,7 +17,7 @@ await api("...") api.getState() // { state: "closed", failures: 0 } ``` -## retry with backoff +## Retry with Backoff ```ts const api = cruel.retry(fetch, { @@ -30,7 +30,7 @@ const api = cruel.retry(fetch, { }) ``` -## bulkhead +## Bulkhead ```ts const api = cruel.bulkhead(fetch, { @@ -40,7 +40,7 @@ const api = cruel.bulkhead(fetch, { }) ``` -## timeout +## Timeout ```ts const api = cruel.withTimeout(fetch, { @@ -49,7 +49,7 @@ const api = cruel.withTimeout(fetch, { }) ``` -## fallback +## Fallback ```ts const api = cruel.fallback(fetch, { @@ -58,7 +58,7 @@ const api = cruel.fallback(fetch, { }) ``` -## combine patterns +## Combine Patterns ```ts import { cruel } from "cruel" diff --git a/packages/web/content/docs/tooling.mdx b/packages/web/content/docs/tooling.mdx index 34c9484..cc0c77f 100644 --- a/packages/web/content/docs/tooling.mdx +++ b/packages/web/content/docs/tooling.mdx @@ -1,9 +1,9 @@ --- -title: tooling -description: utilities, cli, testing, and error types +title: Tooling +description: Utilities, CLI, testing, and error types --- -## utilities +## Utilities ```ts cruel.coin(0.5) @@ -13,21 +13,21 @@ cruel.maybe(value, 0.5) await cruel.delay(500) ``` -## stats +## Stats ```ts cruel.stats() cruel.resetStats() ``` -## deterministic mode +## Deterministic Mode ```ts cruel.seed(12345) cruel.coin(0.5) // same result every time ``` -## cli +## CLI ```bash cruel test https://api.example.com --fail 0.1 --count 20 @@ -36,15 +36,15 @@ cruel scenario outage --duration 5000 cruel presets ``` -## quality gate +## Quality Gate ```bash bun run qa ``` -this runs lint, type checks, package build/test, docs build, and no-key user smoke examples. +This runs lint, type checks, package build/test, docs build, and no-key user smoke examples. -## testing +## Testing ```ts import { describe, test, beforeEach } from "bun:test" @@ -61,7 +61,7 @@ test("handles failures", async () => { }) ``` -## error types +## Error Types ```ts import {