diff --git a/README.md b/README.md index 184db529..48a18a38 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ Easy LLM CLI supports connecting to any OpenAI-compatible LLM API. You can confi # Enable custom LLM support export USE_CUSTOM_LLM=true -export CUSTOM_LLM_PROVIDER="openai" # LLM provider +export CUSTOM_LLM_PROVIDER="openai" # LLM provider, if you use azure openai, set to "azure" export CUSTOM_LLM_API_KEY="your-api-key" # Your LLM provider API key export CUSTOM_LLM_ENDPOINT="https://api.your-llm-provider.com/v1" # API endpoint export CUSTOM_LLM_MODEL_NAME="your-model-name" # Model name diff --git a/README.zh-CN.md b/README.zh-CN.md index 7d974af9..6964035f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -66,6 +66,8 @@ Easy LLM 命令行工具支持连接到任何兼容 OpenAI 的 LLM API。你可 ```bash # 启用自定义 LLM 支持 export USE_CUSTOM_LLM=true + +export CUSTOM_LLM_PROVIDER="openai" # LLM provider, 如果你使用Azure OpneAI, set to "azure" export CUSTOM_LLM_API_KEY="your-api-key" # 你的 LLM 提供商 API 密钥 export CUSTOM_LLM_ENDPOINT="https://api.your-llm-provider.com/v1" # API 端点 export CUSTOM_LLM_MODEL_NAME="your-model-name" # 模型名称 diff --git a/packages/core/src/custom_llm/converter.ts b/packages/core/src/custom_llm/converter.ts index a26ed104..fa33c2ce 100644 --- a/packages/core/src/custom_llm/converter.ts +++ b/packages/core/src/custom_llm/converter.ts @@ -341,7 +341,7 @@ export class ModelConverter { } } - if (choice.finish_reason === 'tool_calls' && toolCallMap.size > 0) { + if (choice?.finish_reason === 'tool_calls' && toolCallMap.size > 0) { const response = this.toGeminiStreamToolCallsResponse(toolCallMap); toolCallMap.clear(); return { diff --git a/packages/core/src/custom_llm/index.ts b/packages/core/src/custom_llm/index.ts index f36a5186..d214a8d7 100644 --- a/packages/core/src/custom_llm/index.ts +++ b/packages/core/src/custom_llm/index.ts @@ -12,7 +12,7 @@ import { EmbedContentResponse, EmbedContentParameters, } from '@google/genai'; -import OpenAI from 'openai'; +import { OpenAI, AzureOpenAI } from 'openai'; import { ContentGenerator } from '../core/contentGenerator.js'; import { CustomLLMContentGeneratorConfig, ToolCallMap } from './types.js'; import { extractToolFunctions } from './util.js'; @@ -23,6 +23,7 @@ export class CustomLLMContentGenerator implements ContentGenerator { private apiKey: string = process.env.CUSTOM_LLM_API_KEY || ''; private baseURL: string = process.env.CUSTOM_LLM_ENDPOINT || ''; private modelName: string = process.env.CUSTOM_LLM_MODEL_NAME || ''; + private apiVersion: string = process.env.OPENAI_API_VERSION || '2024-12-01-preview'; private temperature: number = Number(process.env.CUSTOM_LLM_TEMPERATURE || 0); private maxTokens: number = Number(process.env.CUSTOM_LLM_MAX_TOKENS || 8192); private topP: number = Number(process.env.CUSTOM_LLM_TOP_P || 1); @@ -34,11 +35,20 @@ export class CustomLLMContentGenerator implements ContentGenerator { }; constructor() { - this.model = new OpenAI({ - apiKey: this.apiKey, - baseURL: this.baseURL, - }); - } + const provider = process.env.CUSTOM_LLM_PROVIDER || 'openai'; + if (provider === 'azure') { + this.model = new AzureOpenAI({ + apiKey: this.apiKey, + endpoint: this.baseURL, + apiVersion: this.apiVersion + }); + } else { + this.model = new OpenAI({ + apiKey: this.apiKey, + baseURL: this.baseURL + }); + } + } /** * Asynchronously generates content responses in a streaming fashion.