Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 4 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -158,10 +158,11 @@
"enum": [
"openai",
"anthropic",
"openrouter"
"openrouter",
"vscodelm"
],
"default": "anthropic",
"description": "AI model provider for custom agent (OpenAI, Anthropic, or OpenRouter)",
"description": "AI model provider for custom agent (OpenAI, Anthropic, OpenRouter, or VS Code LM)",
"scope": "application"
},
"superdesign.aiModel": {
Expand All @@ -179,6 +180,7 @@
"watch:esbuild": "node esbuild.js --watch",
"watch:tsc": "tsc --noEmit --watch --project tsconfig.json",
"package": "npm run check-types && npm run lint && node esbuild.js --production",
"build-vsix": "npm run package && vsce package",
"compile-tests": "tsc -p . --outDir out",
"watch-tests": "tsc -p . -w --outDir out",
"pretest": "npm run compile-tests && npm run compile && npm run lint",
Expand Down
62 changes: 49 additions & 13 deletions src/providers/chatSidebarProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ export class ChatSidebarProvider implements vscode.WebviewViewProvider {
case 'stopChat':
await this.messageHandler.stopCurrentChat(webviewView.webview);
break;
case 'getVsCodeLMModels':
await this.handleGetVsCodeLMModels(webviewView.webview);
break;
case 'executeAction':
// Execute command from error action buttons
console.log('Executing action:', message.actionCommand, message.actionArgs);
Expand All @@ -95,6 +98,28 @@ export class ChatSidebarProvider implements vscode.WebviewViewProvider {
);
}

private async handleGetVsCodeLMModels(webview: vscode.Webview) {
try {
const lm: any = (vscode as any).lm;
if (!lm || typeof lm.selectChatModels !== 'function') {
throw new Error('VS Code LM API 不可用');
}
const models: any[] = await lm.selectChatModels({} as any);
const items = (models || []).map((m: any) => {
const vendor = m.vendor || 'unknown';
const family = m.family || '';
const name = m.name || '';
const idPart = family || name || 'model';
const id = `vscodelm/${vendor}/${idPart}`;
const label = `${vendor}${family ? ' / ' + family : (name ? ' / ' + name : '')}`;
return { id, label, vendor, family, name };
});
webview.postMessage({ command: 'vsCodeLmModels', models: items });
} catch (error) {
webview.postMessage({ command: 'vsCodeLmModels', models: [], error: String(error) });
}
}

private async handleGetCurrentProvider(webview: vscode.Webview) {
const config = vscode.workspace.getConfiguration('superdesign');
const currentProvider = config.get<string>('aiModelProvider', 'anthropic');
Expand All @@ -106,6 +131,9 @@ export class ChatSidebarProvider implements vscode.WebviewViewProvider {
case 'openai':
defaultModel = 'gpt-4o';
break;
case 'vscodelm':
defaultModel = 'vscodelm/auto';
break;
case 'openrouter':
defaultModel = 'anthropic/claude-3-7-sonnet-20250219';
break;
Expand All @@ -132,7 +160,12 @@ export class ChatSidebarProvider implements vscode.WebviewViewProvider {
let configureCommand: string;
let displayName: string;

if (model.includes('/')) {
if (model === 'vscodelm' || model.startsWith('vscodelm')) {
provider = 'vscodelm';
apiKeyKey = '';
configureCommand = '';
displayName = `VS Code LM API (${this.getModelDisplayName(model)})`;
} else if (model.includes('/')) {
// OpenRouter model (contains slash like "openai/gpt-4o")
provider = 'openrouter';
apiKeyKey = 'openrouterApiKey';
Expand All @@ -154,18 +187,19 @@ export class ChatSidebarProvider implements vscode.WebviewViewProvider {
await config.update('aiModelProvider', provider, vscode.ConfigurationTarget.Global);
await config.update('aiModel', model, vscode.ConfigurationTarget.Global);

// Check if the API key is configured for the selected provider
const apiKey = config.get<string>(apiKeyKey);

if (!apiKey) {
const result = await vscode.window.showWarningMessage(
`${displayName} selected, but API key is not configured. Would you like to configure it now?`,
'Configure API Key',
'Later'
);

if (result === 'Configure API Key') {
await vscode.commands.executeCommand(configureCommand);
// Check if the API key is configured for the selected provider (skip for vscodelm)
if (provider !== 'vscodelm') {
const apiKey = apiKeyKey ? config.get<string>(apiKeyKey) : undefined;
if (!apiKey) {
const result = await vscode.window.showWarningMessage(
`${displayName} selected, but API key is not configured. Would you like to configure it now?`,
'Configure API Key',
'Later'
);

if (result === 'Configure API Key' && configureCommand) {
await vscode.commands.executeCommand(configureCommand);
}
}
}

Expand All @@ -183,6 +217,8 @@ export class ChatSidebarProvider implements vscode.WebviewViewProvider {

private getModelDisplayName(model: string): string {
const modelNames: { [key: string]: string } = {
// VS Code LM API
'vscodelm/auto': 'VS Code LM (Auto)',
// OpenAI models
'gpt-4.1': 'GPT-4.1',
'gpt-4.1-mini': 'GPT-4.1 Mini',
Expand Down
11 changes: 10 additions & 1 deletion src/services/chatMessageService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,9 @@ export class ChatMessageService {
let configureCommand = 'superdesign.configureApiKey';

if (specificModel) {
if (specificModel.includes('/')) {
if (specificModel === 'vscodelm' || specificModel.startsWith('vscodelm')) {
effectiveProvider = 'vscodelm';
} else if (specificModel.includes('/')) {
effectiveProvider = 'openrouter';
} else if (specificModel.startsWith('claude-')) {
effectiveProvider = 'anthropic';
Expand All @@ -148,6 +150,13 @@ export class ChatMessageService {
}

switch (effectiveProvider) {
case 'vscodelm':
// VS Code LM does not require an API Key. Show an English error message and return.
webview.postMessage({
command: 'chatError',
error: 'VS Code LM is unavailable (no accessible language model or permission detected). Please ensure you have installed and signed in to the relevant provider extension (e.g., GitHub Copilot) in VS Code.'
});
return;
case 'openrouter':
providerName = 'OpenRouter';
configureCommand = 'superdesign.configureOpenRouterApiKey';
Expand Down
110 changes: 106 additions & 4 deletions src/services/customAgentService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,9 @@ export class CustomAgentService implements AgentService {
// Determine provider from model name if specific model is set
let effectiveProvider = provider;
if (specificModel) {
if (specificModel.includes('/')) {
if (specificModel === 'vscodelm' || specificModel.startsWith('vscodelm')) {
effectiveProvider = 'vscodelm';
} else if (specificModel.includes('/')) {
effectiveProvider = 'openrouter';
} else if (specificModel.startsWith('claude-')) {
effectiveProvider = 'anthropic';
Expand All @@ -100,6 +102,10 @@ export class CustomAgentService implements AgentService {
}

switch (effectiveProvider) {
case 'vscodelm':
// For VS Code LM, we do not return an AI SDK model; handled in query()
// Return a sentinel value to indicate vscodelm flow
return { _provider: 'vscodelm', _model: specificModel || 'vscodelm/auto' } as any;
case 'openrouter':
const openrouterKey = config.get<string>('openrouterApiKey');
if (!openrouterKey) {
Expand Down Expand Up @@ -621,14 +627,31 @@ I've created the html design, please reveiw and let me know if you need any chan
generateTheme: createThemeTool(executionContext)
};

// Prepare model/provider
const selectedModel = this.getModel();

// VS Code LM API branch
if ((selectedModel as any)?._provider === 'vscodelm') {
this.outputChannel.appendLine('Using VS Code LM API provider flow');
await this.queryViaVsCodeLM(
usingConversationHistory ? undefined : prompt,
usingConversationHistory ? conversationHistory : undefined,
abortController,
onMessage
);
this.outputChannel.appendLine(`Query completed successfully. Total messages: ${responseMessages.length}`);
this.outputChannel.appendLine(`Complete response: "${messageBuffer}"`);
return responseMessages;
}

// Prepare AI SDK input based on available data
const streamTextConfig: any = {
model: this.getModel(),
model: selectedModel,
system: this.getSystemPrompt(),
tools: tools,
toolCallStreaming: true,
maxSteps: 10, // Enable multi-step reasoning with tools
maxTokens: 8192 // Increase token limit to prevent truncation
maxSteps: 10,
maxTokens: 8192
};

if (usingConversationHistory) {
Expand Down Expand Up @@ -868,6 +891,85 @@ I've created the html design, please reveiw and let me know if you need any chan
}
}

private async queryViaVsCodeLM(
prompt?: string,
conversationHistory?: CoreMessage[],
abortController?: AbortController,
onMessage?: (message: any) => void
): Promise<void> {
// Minimal integration per VSCodeLMAPI.md guidance
// Note: VS Code LM API is experimental and available only in VS Code runtime
try {
const selector: any = {} as any; // use default selection
const models = await (vscode as any).lm.selectChatModels(selector);
if (!models || models.length === 0) {
throw new Error('No available VS Code language models found (please ensure you have installed and signed in to a provider extension).');
}
const client = models[0];

// Build messages: put system as Assistant (compatible approach), then history/prompt
const messages: any[] = [];
const systemPrompt = this.getSystemPrompt();
messages.push((vscode as any).LanguageModelChatMessage.Assistant(systemPrompt));

if (conversationHistory && conversationHistory.length > 0) {
for (const msg of conversationHistory) {
if (msg.role === 'user') {
if (typeof msg.content === 'string') {
messages.push((vscode as any).LanguageModelChatMessage.User(msg.content));
} else if (Array.isArray(msg.content)) {
const textParts = msg.content
.filter((p: any) => p.type === 'text' && typeof p.text === 'string')
.map((p: any) => new (vscode as any).LanguageModelTextPart(p.text));
if (textParts.length > 0) {
messages.push((vscode as any).LanguageModelChatMessage.User(textParts));
}
}
} else if (msg.role === 'assistant') {
if (typeof msg.content === 'string') {
messages.push((vscode as any).LanguageModelChatMessage.Assistant(msg.content));
} else if (Array.isArray(msg.content)) {
const textParts = msg.content
.filter((p: any) => p.type === 'text' && typeof p.text === 'string')
.map((p: any) => new (vscode as any).LanguageModelTextPart(p.text));
if (textParts.length > 0) {
messages.push((vscode as any).LanguageModelChatMessage.Assistant(textParts));
}
}
}
}
} else if (prompt) {
messages.push((vscode as any).LanguageModelChatMessage.User(prompt));
}

const token = (abortController as any)?.token;
const response = await client.sendRequest(messages, { justification: 'Superdesign wants to use VS Code LM model' }, token);

for await (const chunk of response.stream) {
if (chunk instanceof (vscode as any).LanguageModelTextPart) {
const text = chunk.value;
const textMessage: CoreMessage = { role: 'assistant', content: text };
onMessage?.(textMessage);
} else if (chunk instanceof (vscode as any).LanguageModelToolCallPart) {
const toolCallMessage: CoreMessage = {
role: 'assistant',
content: [{
type: 'tool-call',
toolCallId: chunk.callId,
toolName: chunk.name,
args: chunk.input || {}
}]
} as any;
onMessage?.(toolCallMessage);
}
}

} catch (err) {
this.outputChannel.appendLine(`VS Code LM flow error: ${err}`);
throw err;
}
}

get isReady(): boolean {
return this.isInitialized;
}
Expand Down
Loading