Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions .prompts/project-info.prompttemplate
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,20 @@ Tests are located in the same directory as the components under test.
If you want to compile something, run the linter or tests, prefer to execute them for changed packages first, as they will run faster. Only build the full project once you are
done for a final validation.

### Building and Running the Demo App

The main example applications are in `/examples/`:

| Command (from root) | Purpose |
|---------------------|---------|
| `npm ci` | Install dependencies (required first) |
| `npm run build:browser` | Build all packages + browser app |
| `npm run start:browser` | Start browser example at localhost:3000 |
| `npm run start:electron` | Start Electron desktop app |
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
| `npm run start:electron` | Start Electron desktop app |

| `npm run watch` | Watch mode for development |

**Requirements:** Node.js ≥18.17.0, <21

### Styling

Theia permits extensive color theming and makes extensive use of CSS variables. Styles are typically located either in an `index.css` file for an entire package or in a
Expand Down
53 changes: 28 additions & 25 deletions packages/ai-anthropic/src/node/anthropic-language-model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import {
LanguageModelStreamResponsePart,
LanguageModelTextResponse,
TokenUsageService,
TokenUsageParams,
UserRequest,
ImageContent,
ToolCallResult,
Expand Down Expand Up @@ -263,7 +262,6 @@ export class AnthropicModel implements LanguageModel {

const asyncIterator = {
async *[Symbol.asyncIterator](): AsyncIterator<LanguageModelStreamResponsePart> {

const toolCalls: ToolCallback[] = [];
let toolCall: ToolCallback | undefined;
const currentMessages: Message[] = [];
Expand Down Expand Up @@ -313,25 +311,40 @@ export class AnthropicModel implements LanguageModel {
} else if (event.type === 'message_start') {
currentMessages.push(event.message);
currentMessage = event.message;
// Yield initial usage data (input tokens known, output tokens = 0)
if (event.message.usage) {
yield {
input_tokens: event.message.usage.input_tokens,
output_tokens: 0,
cache_creation_input_tokens: event.message.usage.cache_creation_input_tokens ?? undefined,
cache_read_input_tokens: event.message.usage.cache_read_input_tokens ?? undefined
};
}
} else if (event.type === 'message_stop') {
if (currentMessage) {
yield { input_tokens: currentMessage.usage.input_tokens, output_tokens: currentMessage.usage.output_tokens };
// Record token usage if token usage service is available
if (that.tokenUsageService && currentMessage.usage) {
const tokenUsageParams: TokenUsageParams = {
inputTokens: currentMessage.usage.input_tokens,
outputTokens: currentMessage.usage.output_tokens,
cachedInputTokens: currentMessage.usage.cache_creation_input_tokens || undefined,
readCachedInputTokens: currentMessage.usage.cache_read_input_tokens || undefined,
requestId: request.requestId
};
await that.tokenUsageService.recordTokenUsage(that.id, tokenUsageParams);
}
// Yield final output tokens only (input/cached tokens already yielded at message_start)
yield {
input_tokens: 0,
output_tokens: currentMessage.usage.output_tokens
};
}

}
}
if (toolCalls.length > 0) {
// singleRoundTrip mode: Return tool calls to caller without executing them.
// This allows external tool loop management (e.g., for budget-aware summarization).
// When enabled, we yield the tool_calls and return immediately; the caller
// handles tool execution and decides whether to continue the conversation.
if (request.singleRoundTrip) {
const pendingCalls = toolCalls.map(tc => ({
finished: true,
id: tc.id,
function: { name: tc.name, arguments: tc.args.length === 0 ? '{}' : tc.args }
}));
yield { tool_calls: pendingCalls };
return;
}

const toolResult = await Promise.all(toolCalls.map(async tc => {
const tool = request.tools?.find(t => t.name === tc.name);
const argsObject = tc.args.length === 0 ? '{}' : tc.args;
Expand Down Expand Up @@ -410,16 +423,6 @@ export class AnthropicModel implements LanguageModel {
const response = await anthropic.messages.create(params);
const textContent = response.content[0];

// Record token usage if token usage service is available
if (this.tokenUsageService && response.usage) {
const tokenUsageParams: TokenUsageParams = {
inputTokens: response.usage.input_tokens,
outputTokens: response.usage.output_tokens,
requestId: request.requestId
};
await this.tokenUsageService.recordTokenUsage(this.id, tokenUsageParams);
}

if (textContent?.type === 'text') {
return { text: textContent.text };
}
Expand Down
2 changes: 2 additions & 0 deletions packages/ai-chat-ui/src/browser/ai-chat-ui-frontend-module.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import {
TextPartRenderer,
} from './chat-response-renderer';
import { UnknownPartRenderer } from './chat-response-renderer/unknown-part-renderer';
import { SummaryPartRenderer } from './chat-response-renderer/summary-part-renderer';
import {
GitHubSelectionResolver,
TextFragmentSelectionResolver,
Expand Down Expand Up @@ -139,6 +140,7 @@ export default new ContainerModule((bind, _unbind, _isBound, rebind) => {
bind(ChatResponsePartRenderer).to(TextPartRenderer).inSingletonScope();
bind(ChatResponsePartRenderer).to(DelegationResponseRenderer).inSingletonScope();
bind(ChatResponsePartRenderer).to(UnknownPartRenderer).inSingletonScope();
bind(ChatResponsePartRenderer).to(SummaryPartRenderer).inSingletonScope();
[CommandContribution, MenuContribution].forEach(serviceIdentifier =>
bind(serviceIdentifier).to(ChatViewMenuContribution).inSingletonScope()
);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************

import { ChatResponsePartRenderer } from '../chat-response-part-renderer';
import { inject, injectable } from '@theia/core/shared/inversify';
import { ChatResponseContent, SummaryChatResponseContent } from '@theia/ai-chat/lib/common';
import { ReactNode } from '@theia/core/shared/react';
import { nls } from '@theia/core/lib/common/nls';
import * as React from '@theia/core/shared/react';
import { OpenerService } from '@theia/core/lib/browser';
import { useMarkdownRendering } from './markdown-part-renderer';

/**
* Renderer for SummaryChatResponseContent.
* Displays the summary in a collapsible section that is collapsed by default.
*/
@injectable()
export class SummaryPartRenderer implements ChatResponsePartRenderer<SummaryChatResponseContent> {

@inject(OpenerService)
protected readonly openerService: OpenerService;

canHandle(response: ChatResponseContent): number {
if (SummaryChatResponseContent.is(response)) {
return 10;
}
return -1;
}

render(response: SummaryChatResponseContent): ReactNode {
return <SummaryContent content={response.content} openerService={this.openerService} />;
}
}

interface SummaryContentProps {
content: string;
openerService: OpenerService;
}

const SummaryContent: React.FC<SummaryContentProps> = ({ content, openerService }) => {
const contentRef = useMarkdownRendering(content, openerService);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Likely a follow up but it would be amazing if the summary was editable in case the user is not satisfied with the summary afterwards and for example wants to highlight a specific fact.


return (
<div className='theia-chat-summary'>
<details>
<summary>
<span className='codicon codicon-bookmark'></span>
{nls.localize('theia/ai/chat-ui/summary-part-renderer/conversationSummary', 'Conversation Summary')}
</summary>
<div className='theia-chat-summary-content' ref={contentRef}></div>
</details>
</div>
);
};
Loading