{message.content}
+ {info && ( +{Math.round(usagePercent * 100)}% of context used
+{totalTokens} tokens / {toolCount} tools
+ {lastResponseUsage && ( +Last turn: {lastResponseUsage.total_tokens} tokens
+ )} +{JSON.stringify(result, null, 2)}
+ )}
+/>
+```
+
+---
+
+## 8. Message Grouping
+
+`groupConsecutiveMessages` groups consecutive messages of the same role into visual clusters. Useful for building custom chat UIs where adjacent user or assistant messages should appear as one block.
+
+Available from the message-utils module:
+
+```typescript
+import {
+ toLLMMessages,
+ toLLMMessage,
+ keepToolPairsAtomic,
+} from "@yourgpt/copilot-sdk-react";
+```
+
+Core invariant: **tool-call pairs are always atomic.** An assistant message with `tool_calls` is never separated from its corresponding tool-result messages during any windowing or pruning operation.
+
+---
+
+## 9. Server: compactSession
+
+The `compactSession` utility powers the `/api/compact` endpoint for `summary-buffer` compaction. It calls Claude (defaults to `claude-haiku-4-5`) to produce a structured summary that preserves:
+
+- User goals and requests
+- Technical decisions and chosen approaches
+- Tool call outcomes (name, key args, result status)
+- Errors and resolutions
+- Pending tasks and current work state
+
+```typescript
+// app/api/compact/route.ts
+import { compactSession } from "@yourgpt/copilot-sdk/server";
+
+export async function POST(req: Request) {
+ const { messages, existingSummary, workingMemory } = await req.json();
+
+ const { summary } = await compactSession({
+ messages,
+ existingSummary, // Passed in subsequent compactions for rolling summaries
+ workingMemory, // User-pinned facts (addToWorkingMemory)
+ model: "claude-haiku-4-5", // default
+ maxSummaryTokens: 1024, // default
+ apiKey: process.env.ANTHROPIC_API_KEY,
+ });
+
+ return Response.json({ summary });
+}
+```
+
+### CompactSessionOptions
+
+```typescript
+interface CompactSessionOptions {
+ messages: Array<{ role: string; content?: string | null }>;
+ existingSummary?: string | null;
+ workingMemory?: string[];
+ model?: string; // default: "claude-haiku-4-5"
+ maxSummaryTokens?: number; // default: 1024
+ apiKey?: string; // fallback: process.env.ANTHROPIC_API_KEY
+ apiBaseUrl?: string; // default: "https://api.anthropic.com"
+ fetchImpl?: typeof fetch;
+}
+```
+
+---
+
+## Quick-start: Full Setup
+
+```tsx
+// app/layout.tsx
+import { CopilotProvider } from "@yourgpt/copilot-sdk-react";
+
+export default function RootLayout({ children }) {
+ return (
+ + {Math.round(usagePercent * 100)}% context used · {toolCount} tools +
+ {tokenUsage.isApproaching && ( + + )} + {isCompacting && Summarizing history…} +{count} skill(s) active
+ {has("code-review") &&{count} skills active
+{message.content}
+ {info && ( +{Math.round(usagePercent * 100)}% of context used
+{totalTokens} tokens / {toolCount} tools
+ {lastResponseUsage && ( +Last turn: {lastResponseUsage.total_tokens} tokens
+ )} ++ {Math.round(usagePercent * 100)}% context used · {toolCount} tools +
+ {tokenUsage.isApproaching && ( + + )} + {isCompacting && Summarizing history…} +