diff --git a/AGENTS.md b/AGENTS.md index 4b1ec02..41296b6 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,4 +1,4 @@ -# AGENTS.md - Commit Sage Developer Guide +# Commit Sage Developer Guide This file provides guidelines for agents working on the Commit Sage codebase. diff --git a/CHANGELOG.md b/CHANGELOG.md index e82d85f..eab6664 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,50 @@ # Changelog +## [Unreleased] + +### Features + +- **New AI Providers**: Add support for four additional AI providers + - **Moonshot AI (Kimi models)** via `@ai-sdk/moonshotai` (official SDK) + - Set `provider.type = "moonshotai"` and `MOONSHOTAI_API_KEY` + - Default model: `kimi-k2.5` (flagship multimodal + reasoning model) + - Reasoning mode supported via `` block middleware + - Register at + - **Z.AI (GLM models)** via `@ai-sdk/openai` adapter with custom base URL + - Set `provider.type = "zai"` and `ZAI_API_KEY` + - Default model: `glm-4.5-flash` (fast GLM model) + - Uses the international Z.AI platform (`api.z.ai`) — no extra dependency + - Register at + - **MiniMax** via `vercel-minimax-ai-provider` (provider-maintained SDK) + - Set `provider.type = "minimax"` and `MINIMAX_API_KEY` + - Default model: `MiniMax-M2.5` + - Uses OpenAI-compatible adapter + - Register at + - **OpenRouter** via `@openrouter/ai-sdk-provider` (OpenRouter-maintained SDK) + - Set `provider.type = "openrouter"` and `OPENROUTER_API_KEY` + - Requires a dedicated `openrouter` config section with `model` and optional `baseUrl` + - Default model: `openai/gpt-4.1-mini` (model IDs are provider-prefixed, e.g. + `anthropic/claude-opus-4-5`, `meta-llama/llama-3.1-405b-instruct`) + - Routes to hundreds of models from a single API key + - Register at + +- **DeepSeek integration confirmed**: The existing DeepSeek integration + (introduced in v1.6.0) has been audited and verified complete across all + integration touchpoints (types, registry, config migration, validation). + +### Configuration + +- New `openrouter` config section added to `config.json` (alongside the existing + `ollama` section) for OpenRouter-specific settings +- Default values added to `DEFAULT_CONFIG` for all new providers +- Config migration (`migrateConfig`) handles automatic model-to-provider + detection for Kimi (`kimi-*`), GLM (`glm-*`), and MiniMax (`MiniMax-*`) models + +### Validation + +- `OPENROUTER_API_KEY` is validated against the known `sk-or-v1-` prefix format +- `config.schema.json` updated with new provider types and `openrouter` section + ## [1.6.0](https://github.com/AhmedOsman101/commit-sage-cli/compare/v1.5.0...v1.6.0) (2026-02-26) ### Features diff --git a/config.schema.json b/config.schema.json index 9eb5bcc..c19a970 100644 --- a/config.schema.json +++ b/config.schema.json @@ -30,6 +30,21 @@ } } }, + "openrouter": { + "type": "object", + "required": ["model"], + "properties": { + "model": { + "type": "string", + "description": "OpenRouter model ID, e.g. 'openai/gpt-4.1-mini' or 'anthropic/claude-opus-4-5'" + }, + "baseUrl": { + "type": "string", + "format": "uri", + "description": "OpenRouter API base URL (defaults to https://openrouter.ai/api/v1)" + } + } + }, "commit": { "type": "object", "required": ["commitFormat", "onlyStagedChanges", "commitLanguage"], @@ -69,7 +84,11 @@ "deepseek", "mistral", "xai", - "ollama" + "ollama", + "moonshotai", + "zai", + "minimax", + "openrouter" ], "description": "AI provider type" }, diff --git a/deno.json b/deno.json index 8090d8d..39c605e 100644 --- a/deno.json +++ b/deno.json @@ -18,8 +18,10 @@ "@ai-sdk/deepseek": "npm:@ai-sdk/deepseek@^2.0.20", "@ai-sdk/google": "npm:@ai-sdk/google@^3.0.33", "@ai-sdk/mistral": "npm:@ai-sdk/mistral@^3.0.20", + "@ai-sdk/moonshotai": "npm:@ai-sdk/moonshotai@^2.0.10", "@ai-sdk/openai": "npm:@ai-sdk/openai@^3.0.36", "@ai-sdk/xai": "npm:@ai-sdk/xai@^3.0.59", + "@openrouter/ai-sdk-provider": "npm:@openrouter/ai-sdk-provider@^2.2.5", "@arrirpc/schema": "npm:@arrirpc/schema@^0.81.2", "@cliffy/prompt": "jsr:@cliffy/prompt@^1.0.0", "@std/fmt": "jsr:@std/fmt@1.0.9", @@ -28,6 +30,7 @@ "husky": "npm:husky@^9.1.7", "lib-result": "npm:lib-result@^3.2.2", "ollama-ai-provider-v2": "npm:ollama-ai-provider-v2@^3.3.1", + "vercel-minimax-ai-provider": "npm:vercel-minimax-ai-provider@^0.0.2", "@/": "./src/" }, "lint": { diff --git a/deno.lock b/deno.lock index 1f28420..9f4653d 100644 --- a/deno.lock +++ b/deno.lock @@ -17,15 +17,20 @@ "npm:@ai-sdk/deepseek@^2.0.20": "2.0.20_zod@4.1.12", "npm:@ai-sdk/google@^3.0.33": "3.0.33_zod@4.1.12", "npm:@ai-sdk/mistral@^3.0.20": "3.0.20_zod@4.1.12", + "npm:@ai-sdk/moonshotai@^2.0.10": "2.0.10_zod@4.1.12", "npm:@ai-sdk/openai@^3.0.36": "3.0.36_zod@4.1.12", "npm:@ai-sdk/xai@^3.0.59": "3.0.59_zod@4.1.12", "npm:@arrirpc/schema@~0.81.2": "0.81.2", + "npm:@openrouter/ai-sdk-provider@2.2.5": "2.2.5_ai@6.0.103__zod@4.1.12_zod@4.1.12", + "npm:@openrouter/ai-sdk-provider@^2.2.5": "2.2.5_ai@6.0.103__zod@4.1.12_zod@4.1.12", "npm:@types/node@*": "22.15.15", "npm:ai@^6.0.103": "6.0.103_zod@4.1.12", "npm:axios@1.13.5": "1.13.5", "npm:husky@^9.1.7": "9.1.7", "npm:lib-result@^3.2.2": "3.2.2", - "npm:ollama-ai-provider-v2@^3.3.1": "3.3.1_ai@6.0.103__zod@4.1.12_zod@4.1.12" + "npm:ollama-ai-provider-v2@^3.3.1": "3.3.1_ai@6.0.103__zod@4.1.12_zod@4.1.12", + "npm:vercel-minimax-ai-provider@0.0.2": "0.0.2_zod@4.1.12", + "npm:vercel-minimax-ai-provider@^0.0.2": "0.0.2_zod@4.1.12" }, "jsr": { "@cliffy/ansi@1.0.0": { @@ -84,24 +89,32 @@ "@ai-sdk/anthropic@3.0.47_zod@4.1.12": { "integrity": "sha512-E6Z3i/xvxGDxRskMMbuX9+xDK4l5LesrP2O7YQ0CcbAkYP25qTo/kYGf/AsJrLkNIY23HeO/kheUWtG1XZllDA==", "dependencies": [ - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", + "zod" + ] + }, + "@ai-sdk/anthropic@3.0.6_zod@4.1.12": { + "integrity": "sha512-Ns5OOPHXbODzitvqCySnAFZCAm9ldpx+fdbC0c/f9QwX5b4MQtQJIQ0xZyKm+tB/ynBoeV6zhtyWDXjYeVEWIw==", + "dependencies": [ + "@ai-sdk/provider@3.0.1", + "@ai-sdk/provider-utils@4.0.3_zod@4.1.12", "zod" ] }, "@ai-sdk/deepseek@2.0.20_zod@4.1.12": { "integrity": "sha512-MAL04sDTOWUiBjAGWaVgyeE4bYRb9QpKYRlIeCTZFga6I8yQs50XakhWEssrmvVihdpHGkqpDtCHsFqCydsWLA==", "dependencies": [ - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", "zod" ] }, "@ai-sdk/gateway@3.0.57_zod@4.1.12": { "integrity": "sha512-3MugqOlGfCOjlsBGGARJ5Zrioh78X3+rulHCayCMPySYKY+wc8GGFlFCCh4mleWQFShjMyqWT7eeLTuVSj/WSg==", "dependencies": [ - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", "@vercel/oidc", "zod" ] @@ -109,44 +122,100 @@ "@ai-sdk/google@3.0.33_zod@4.1.12": { "integrity": "sha512-ElHkhMGMJ1MY5AlwLljWWE1jj+Bs3cMyq0KbeWUu2H89OsMAORiE4cB3xhfLlSIEnVmVKx/YHjoW3bN+DFI24A==", "dependencies": [ - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", "zod" ] }, "@ai-sdk/mistral@3.0.20_zod@4.1.12": { "integrity": "sha512-oZcx2pE6nJ+Qj/U6HFV5mJ52jXJPBSpvki/NtIocZkI/rKxphKBaecOH1h0Y7yK3HIbBxsMqefB1pb72cAHGVg==", "dependencies": [ - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", + "zod" + ] + }, + "@ai-sdk/moonshotai@2.0.10_zod@4.1.12": { + "integrity": "sha512-XtBqVQHb6069XQQARtjOq1MxbrA56Ox2hTP3tmsnFVUlXMvS+SINCL6mU7Lq3oFQKADXjjEQibq49e7Gee9n1A==", + "dependencies": [ + "@ai-sdk/openai-compatible@2.0.35_zod@4.1.12", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.19_zod@4.1.12", "zod" ] }, "@ai-sdk/openai-compatible@2.0.30_zod@4.1.12": { "integrity": "sha512-iTjumHf1/u4NhjXYFn/aONM2GId3/o7J1Lp5ql8FCbgIMyRwrmanR5xy1S3aaVkfTscuDvLTzWiy1mAbGzK3nQ==", "dependencies": [ - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", + "zod" + ] + }, + "@ai-sdk/openai-compatible@2.0.35_zod@4.1.12": { + "integrity": "sha512-g3wA57IAQFb+3j4YuFndgkUdXyRETZVvbfAWM+UX7bZSxA3xjes0v3XKgIdKdekPtDGsh4ZX2byHD0gJIMPfiA==", + "dependencies": [ + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.19_zod@4.1.12", "zod" ] }, "@ai-sdk/openai@3.0.36_zod@4.1.12": { "integrity": "sha512-foY3onGY8l3q9niMw0Cwe9xrYnm46keIWL57NRw6F3DKzSW9TYTfx0cQJs/j8lXJ8lPzqNxpMO/zXOkqCUt3IQ==", "dependencies": [ - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", "zod" ] }, "@ai-sdk/provider-utils@4.0.15_zod@4.1.12": { "integrity": "sha512-8XiKWbemmCbvNN0CLR9u3PQiet4gtEVIrX4zzLxnCj06AwsEDJwJVBbKrEI4t6qE8XRSIvU2irka0dcpziKW6w==", "dependencies": [ - "@ai-sdk/provider", + "@ai-sdk/provider@3.0.8", + "@standard-schema/spec@1.1.0", + "eventsource-parser", + "zod" + ] + }, + "@ai-sdk/provider-utils@4.0.19_zod@4.1.12": { + "integrity": "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg==", + "dependencies": [ + "@ai-sdk/provider@3.0.8", + "@standard-schema/spec@1.1.0", + "eventsource-parser", + "zod" + ] + }, + "@ai-sdk/provider-utils@4.0.3_zod@4.1.12": { + "integrity": "sha512-Vo2p61dDld8Dy/O66zKQpE4nqHojiEEYEjZcSbICjE7h8Z6QmHzBfd+ss/paIDdyXyS0yHmC1GoRYYKo89cqZQ==", + "dependencies": [ + "@ai-sdk/provider@3.0.1", "@standard-schema/spec@1.1.0", "eventsource-parser", "zod" ] }, + "@ai-sdk/provider-utils@4.0.4_zod@4.1.12": { + "integrity": "sha512-VxhX0B/dWGbpNHxrKCWUAJKXIXV015J4e7qYjdIU9lLWeptk0KMLGcqkB4wFxff5Njqur8dt8wRi1MN9lZtDqg==", + "dependencies": [ + "@ai-sdk/provider@3.0.2", + "@standard-schema/spec@1.1.0", + "eventsource-parser", + "zod" + ] + }, + "@ai-sdk/provider@3.0.1": { + "integrity": "sha512-2lR4w7mr9XrydzxBSjir4N6YMGdXD+Np1Sh0RXABh7tWdNFFwIeRI1Q+SaYZMbfL8Pg8RRLcrxQm51yxTLhokg==", + "dependencies": [ + "json-schema" + ] + }, + "@ai-sdk/provider@3.0.2": { + "integrity": "sha512-HrEmNt/BH/hkQ7zpi2o6N3k1ZR1QTb7z85WYhYygiTxOQuaml4CMtHCWRbric5WPU+RNsYI7r1EpyVQMKO1pYw==", + "dependencies": [ + "json-schema" + ] + }, "@ai-sdk/provider@3.0.8": { "integrity": "sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==", "dependencies": [ @@ -156,9 +225,9 @@ "@ai-sdk/xai@3.0.59_zod@4.1.12": { "integrity": "sha512-lwY3yTZ43rmLsnffGLxEKF7ikuxKhqWLFnZdglwV4mgGiwbExLrIpHkMQUTXis5OmlJb66r+gfu+udoWdRKDrA==", "dependencies": [ - "@ai-sdk/openai-compatible", - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/openai-compatible@2.0.30_zod@4.1.12", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", "zod" ] }, @@ -177,6 +246,13 @@ "scule" ] }, + "@openrouter/ai-sdk-provider@2.2.5_ai@6.0.103__zod@4.1.12_zod@4.1.12": { + "integrity": "sha512-IgM96gPvpxMZYYJQSIuXqvHX0mUXHEvsa/AtIlfb1VK4ek584ydAzc/wf3IuKxNof15o38WZMpCwfsOFHv96Jg==", + "dependencies": [ + "ai", + "zod" + ] + }, "@opentelemetry/api@1.9.0": { "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==" }, @@ -199,8 +275,8 @@ "integrity": "sha512-4eY6Ut4u41zKH+P2S/oLlZrwxeWQh4kIV1FjE34Jhoiwg+v1AyfSYM8FslXk9rTAtIIaOBimrCUqXacC5RBqJw==", "dependencies": [ "@ai-sdk/gateway", - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.15_zod@4.1.12", "@opentelemetry/api", "zod" ] @@ -345,8 +421,8 @@ "ollama-ai-provider-v2@3.3.1_ai@6.0.103__zod@4.1.12_zod@4.1.12": { "integrity": "sha512-j4BBqqQnvf/uDz+aPYcgU4/MQZERw087Fn1DMGtViA/PgahBq36jHKHVoZfx8mxj+w8cxsKd3eYaDgyZPhE6YA==", "dependencies": [ - "@ai-sdk/provider", - "@ai-sdk/provider-utils", + "@ai-sdk/provider@3.0.8", + "@ai-sdk/provider-utils@4.0.19_zod@4.1.12", "ai", "zod" ] @@ -363,6 +439,15 @@ "undici-types@6.21.0": { "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==" }, + "vercel-minimax-ai-provider@0.0.2_zod@4.1.12": { + "integrity": "sha512-h9QzLL7RBmOreqWfr2fcoFVNTJgusENJVagVm8vAi+DBfd+1t+sVJZ/hAhKrtuCKCrm33BlOSWVdJehQFju5jQ==", + "dependencies": [ + "@ai-sdk/anthropic@3.0.6_zod@4.1.12", + "@ai-sdk/provider@3.0.2", + "@ai-sdk/provider-utils@4.0.4_zod@4.1.12", + "zod" + ] + }, "zod@4.1.12": { "integrity": "sha512-JInaHOamG8pt5+Ey8kGmdcAcg3OL9reK8ltczgHTAwNhMys/6ThXHityHxVV2p3fkw/c+MAvBHFVYHFZDmjMCQ==" } @@ -375,14 +460,17 @@ "npm:@ai-sdk/deepseek@^2.0.20", "npm:@ai-sdk/google@^3.0.33", "npm:@ai-sdk/mistral@^3.0.20", + "npm:@ai-sdk/moonshotai@^2.0.10", "npm:@ai-sdk/openai@^3.0.36", "npm:@ai-sdk/xai@^3.0.59", "npm:@arrirpc/schema@~0.81.2", + "npm:@openrouter/ai-sdk-provider@^2.2.5", "npm:ai@^6.0.103", "npm:axios@1.13.5", "npm:husky@^9.1.7", "npm:lib-result@^3.2.2", - "npm:ollama-ai-provider-v2@^3.3.1" + "npm:ollama-ai-provider-v2@^3.3.1", + "npm:vercel-minimax-ai-provider@^0.0.2" ] } } diff --git a/src/lib/configServiceTypes.d.ts b/src/lib/configServiceTypes.d.ts index 8ce5032..f9fdbed 100644 --- a/src/lib/configServiceTypes.d.ts +++ b/src/lib/configServiceTypes.d.ts @@ -7,7 +7,13 @@ type GeneralConfig = { // Configuration for the Ollama provider (self-hosted, requires baseUrl) type OllamaConfig = { model: string; - baseUrl: "http://localhost:11434" | (string & {}); + baseUrl?: "http://localhost:11434" | (string & {}); +}; + +// Configuration for the OpenRouter meta-provider +type OpenRouterConfig = { + model: string; + baseUrl?: "https://openrouter.ai/api/v1" | (string & {}); }; // Configuration for commit-related settings @@ -28,7 +34,11 @@ export type ProviderType = | "deepseek" | "mistral" | "xai" - | "ollama"; + | "ollama" + | "moonshotai" + | "zai" + | "minimax" + | "openrouter"; // Configuration for the provider selection type ProviderConfig = { @@ -43,6 +53,7 @@ export type Config = { readonly $schema: "https://raw.githubusercontent.com/AhmedOsman101/commit-sage-cli/refs/heads/main/config.schema.json"; general: GeneralConfig; ollama: OllamaConfig; + openrouter: OpenRouterConfig; commit: CommitConfig; provider: ProviderConfig; }; @@ -53,7 +64,11 @@ export type ApiService = | "Anthropic" | "DeepSeek" | "Mistral" - | "Xai"; + | "Xai" + | "MoonshotAI" + | "Zai" + | "MiniMax" + | "OpenRouter"; export type ConfigSection = keyof Config; export type ConfigKey = keyof Config[T]; diff --git a/src/lib/constants.ts b/src/lib/constants.ts index 26e8728..82d5b12 100644 --- a/src/lib/constants.ts +++ b/src/lib/constants.ts @@ -41,6 +41,10 @@ export const DEFAULT_CONFIG: Readonly = { model: "llama3.2", baseUrl: "http://localhost:11434/api", }, + openrouter: { + model: "openai/gpt-4.1-mini", + baseUrl: "https://openrouter.ai/api/v1", + }, commit: { autoCommit: false, autoPush: false, diff --git a/src/lib/logger.ts b/src/lib/logger.ts index b18895a..95943be 100644 --- a/src/lib/logger.ts +++ b/src/lib/logger.ts @@ -14,6 +14,9 @@ import FileLogger from "@/services/fileLogger.ts"; const encoder = new TextEncoder(); +// Cached debug flag - checked once at module load +const DEBUG_ENABLED = Deno.env.get("DEBUG") === "1"; + function toCustomString(value: unknown, indentLevel = 0): string { const indent = " ".repeat(indentLevel); // 2 spaces for indentation @@ -104,7 +107,10 @@ export function logSuccess(...data: unknown[]): void { } export function logDebug(...data: unknown[]): void { + if (!DEBUG_ENABLED) return; + + const timestamp = new Date().toISOString().replace("T", "@").substring(0, 22); const message = makeOutput(...data); - console.log(`${magenta("[DEBUG]")} ${message}`); + console.log(`${magenta("[DEBUG]")} [${timestamp}] ${message}`); FileLogger.debug(message); } diff --git a/src/services/aiService.ts b/src/services/aiService.ts index 5b1ae5f..5e48030 100644 --- a/src/services/aiService.ts +++ b/src/services/aiService.ts @@ -2,9 +2,11 @@ import { Err, ErrFromText, ErrFromUnknown, Ok, type Result } from "lib-result"; import type { ProviderType } from "@/lib/configServiceTypes.d.ts"; import { ERROR_MESSAGES } from "@/lib/constants.ts"; import type { CommitMessage } from "@/lib/index.d.ts"; +import { logDebug } from "@/lib/logger.ts"; import ConfigService from "./configService.ts"; import GitBlameAnalyzer from "./gitBlameAnalyzer.ts"; import GitService from "./gitService.ts"; +import OpenRouterService from "./openrouterService.ts"; import PromptService from "./promptService.ts"; import { getProviderService } from "./providerRegistry.ts"; @@ -21,30 +23,63 @@ const AiService = { diff: string, blameAnalysis: string ): Promise> { + logDebug( + `[aiService.generateCommitMessage] ENTRY diff.length=${diff.length}, hasBlame=${!!blameAnalysis}` + ); + if (!diff) return ErrFromText(ERROR_MESSAGES.noChanges); const truncatedDiff = this.truncateDiff(diff); + logDebug( + `[aiService.generateCommitMessage] STEP truncated diff, length=${truncatedDiff.length}` + ); + const prompt = await PromptService.generatePrompt( truncatedDiff, blameAnalysis ); + logDebug( + `[aiService.generateCommitMessage] STEP prompt generated, length=${prompt.length}` + ); const providerResult = await ConfigService.get("provider", "type"); if (providerResult.isError()) return Err(providerResult.error); const providerType = providerResult.ok as ProviderType; + logDebug(`[aiService.generateCommitMessage] STEP provider=${providerType}`); try { + // OpenRouter reads from its own config section (not provider.model) + if (providerType === "openrouter") { + logDebug("[aiService.generateCommitMessage] CALL OpenRouterService"); + const commitMessage = await OpenRouterService.generateCommitMessage( + prompt, + 1 + ); + logDebug( + `[aiService.generateCommitMessage] EXIT message="${commitMessage.message.substring(0, 50)}..."` + ); + return Ok(commitMessage); + } + const Service = getProviderService(providerType); + logDebug(`[aiService.generateCommitMessage] CALL ${Service.name}`); const commitMessage = await Service.generateCommitMessage(prompt, 1); + logDebug( + `[aiService.generateCommitMessage] EXIT message="${commitMessage.message.substring(0, 50)}..."` + ); return Ok(commitMessage); } catch (error) { + logDebug(`[aiService.generateCommitMessage] ERROR ${error}`); return ErrFromUnknown(error); } }, async generateAndApplyMessage(): Promise> { + logDebug("[aiService.generateAndApplyMessage] ENTRY"); + GitService.initialize(); + logDebug("[aiService.generateAndApplyMessage] STEP git initialized"); const onlyStagedResult = await ConfigService.get( "commit", @@ -56,16 +91,25 @@ const AiService = { const hasStagedChanges = GitService.hasChanges("staged"); const useStagedChanges = onlyStagedSetting || hasStagedChanges; + logDebug( + `[aiService.generateAndApplyMessage] STEP useStagedChanges=${useStagedChanges}` + ); const diffResult = await GitService.getDiff(useStagedChanges); if (diffResult.isError()) return Err(diffResult.error); const diff = diffResult.ok; + logDebug( + `[aiService.generateAndApplyMessage] STEP diff length=${diff.length}` + ); const changedFilesResult = GitService.getChangedFiles(useStagedChanges); if (changedFilesResult.isError()) return Err(changedFilesResult.error); const changedFiles = changedFilesResult.ok; + logDebug( + `[aiService.generateAndApplyMessage] STEP changed files=${changedFiles.length}` + ); const analysesPromises = changedFiles.map(file => GitBlameAnalyzer.analyzeChanges(file) @@ -82,7 +126,26 @@ const AiService = { } } - return await this.generateCommitMessage(diff, blameAnalysis.join("\n\n")); + logDebug( + `[aiService.generateAndApplyMessage] STEP blame analyses=${blameAnalysis.length}` + ); + + const result = await this.generateCommitMessage( + diff, + blameAnalysis.join("\n\n") + ); + + if (result.isOk()) { + logDebug( + `[aiService.generateAndApplyMessage] EXIT success message="${result.ok.message.substring(0, 50)}..."` + ); + } else { + logDebug( + `[aiService.generateAndApplyMessage] EXIT error=${result.error.message}` + ); + } + + return result; }, }; diff --git a/src/services/configService.ts b/src/services/configService.ts index 1621561..a4e38d5 100644 --- a/src/services/configService.ts +++ b/src/services/configService.ts @@ -7,6 +7,7 @@ import type { ConfigKey, ConfigSection, ConfigValue, + ProviderType, } from "@/lib/configServiceTypes.d.ts"; import { CONFIG_PATH, DEFAULT_CONFIG, OS } from "@/lib/constants.ts"; import { AiServiceError, ConfigurationError } from "@/lib/errors.ts"; @@ -18,17 +19,17 @@ import KeyValidationService from "./keyValidationService.ts"; class ConfigService { protected static shell = ""; - static migrateConfig(config: Record): Config { + static async migrateConfig( + config: Record + ): Promise> { const provider = config.provider as Record | undefined; - if (!provider) { - return config as Config; - } + if (!provider) return Ok(true); const hasType = "type" in provider; const hasModel = "model" in provider; - const modelMap: Record = { + const modelMap: Record = { gemini: "gemini-2.5-flash-lite", openai: "gpt-5-nano", anthropic: "claude-sonnet-4-5", @@ -36,23 +37,33 @@ class ConfigService { mistral: "mistral-small-latest", xai: "grok-3-mini", ollama: "llama3.2", + moonshotai: "kimi-k2.5", + zai: "glm-4.5-flash", + minimax: "MiniMax-M2.5", + openrouter: "openai/gpt-4.1-mini", }; // Case 1: Has type but no model - add default model if (hasType && !hasModel) { - const oldType = provider.type as string; + const oldType = provider.type as ProviderType; const newModel = modelMap[oldType] || "gemini-2.5-flash-lite"; logInfo("Migrating config: adding provider.model..."); logInfo(` type="${oldType}", model="${newModel}"`); + const updateTypeResult = await ConfigService.set( + "provider", + "type", + oldType as ProviderType + ); + if (updateTypeResult.isError()) return Err(updateTypeResult.error); + const updateModelResult = await ConfigService.set( + "provider", + "model", + newModel + ); + if (updateModelResult.isError()) return Err(updateModelResult.error); - return { - ...config, - provider: { - type: oldType, - model: newModel, - }, - } as Config; + return Ok(true); } // Case 2: Has model but no type - try to detect type from model string @@ -60,7 +71,7 @@ class ConfigService { const model = provider.model as string; // Detect provider from model string (e.g., "google/gemini-2.5-flash-lite" -> "gemini") - let detectedType = "gemini"; + let detectedType: ProviderType = "gemini"; if ( model.startsWith("gpt-") || model.startsWith("o1") || @@ -78,13 +89,19 @@ class ConfigService { detectedType = "mistral"; } else if (model.startsWith("grok-")) { detectedType = "xai"; + } else if (model.startsWith("kimi-")) { + detectedType = "moonshotai"; + } else if (model.startsWith("glm-")) { + detectedType = "zai"; + } else if (model.startsWith("MiniMax-")) { + detectedType = "minimax"; } else if (model.includes("/")) { // Handle "google/gemini-2.5-flash-lite" format - detectedType = model.split("/")[0]; + detectedType = model.split("/")[0] as ProviderType; } // Fix common provider names - if (detectedType === "google") detectedType = "gemini"; + if ((detectedType as string) === "google") detectedType = "gemini"; const newModel = modelMap[detectedType] || model; @@ -93,16 +110,23 @@ class ConfigService { ` model="${model}", detected type="${detectedType}", using model="${newModel}"` ); - return { - ...config, - provider: { - type: detectedType, - model: newModel, - }, - } as Config; + const updateTypeResult = await ConfigService.set( + "provider", + "type", + detectedType as ProviderType + ); + if (updateTypeResult.isError()) return Err(updateTypeResult.error); + const updateModelResult = await ConfigService.set( + "provider", + "model", + newModel + ); + if (updateModelResult.isError()) return Err(updateModelResult.error); + + return Ok(true); } - return config as Config; + return Ok(true); } static async createConfigFile(): Promise> { @@ -142,7 +166,15 @@ class ConfigService { } const parsedConfig = JSON.parse(configContents); - const migratedConfig = await ConfigService.migrateConfig(parsedConfig); + const migrationResult = await ConfigService.migrateConfig(parsedConfig); + if (migrationResult.isError()) { + return Err(migrationResult.error); + } + + // Convert parsed config to Config type for validation + // Note: we use the parsed config directly rather than re-loading, + // to avoid infinite migration loop + const migratedConfig = parsedConfig as unknown as Config; const validation = ConfigValidationService.validate(migratedConfig); if (validation.isError()) logError(validation.error.message); @@ -160,7 +192,11 @@ class ConfigService { const configResult = await ConfigService.load(); if (configResult.isError()) return Err(configResult.error); - const value = configResult.ok[section][key] ?? DEFAULT_CONFIG[section][key]; + const sectionValue = configResult.ok[section]; + const value = + sectionValue && typeof sectionValue === "object" && key in sectionValue + ? sectionValue[key] + : DEFAULT_CONFIG[section]?.[key]; return Ok(value); } @@ -291,7 +327,17 @@ After adding the line, restart your terminal or run 'source ${shellConfigFile}' case "Anthropic": case "DeepSeek": case "Mistral": - case "Xai": { + case "Xai": + case "MoonshotAI": + case "Zai": + case "MiniMax": { + break; + } + case "OpenRouter": { + const { error } = KeyValidationService.validateOpenRouterApiKey(key); + if (error !== undefined) { + throw new AiServiceError(error.message, { cause: error }); + } break; } } diff --git a/src/services/configValidationService.ts b/src/services/configValidationService.ts index 845c7b0..aaecc4e 100644 --- a/src/services/configValidationService.ts +++ b/src/services/configValidationService.ts @@ -15,6 +15,10 @@ const SUPPORTED_PROVIDERS: ProviderType[] = [ "mistral", "xai", "ollama", + "moonshotai", + "zai", + "minimax", + "openrouter", ]; const ConfigSchema = a.object( @@ -28,10 +32,18 @@ const ConfigSchema = a.object( initialRetryDelayMs: a.uint16(), }) ), - ollama: a.object({ - model: a.string(), - baseUrl: a.optional(a.string()), - }), + ollama: a.optional( + a.object({ + model: a.string(), + baseUrl: a.optional(a.string()), + }) + ), + openrouter: a.optional( + a.object({ + model: a.string(), + baseUrl: a.optional(a.string()), + }) + ), commit: a.object({ autoCommit: a.optional(a.boolean()), autoPush: a.optional(a.boolean()), @@ -126,7 +138,10 @@ const ConfigValidationService = { } return Ok(true); }, - validateModelUrl(model: object, name: "ollama"): Result { + validateModelUrl( + model: object, + name: "ollama" | "openrouter" + ): Result { if ("baseUrl" in model) { const baseUrl = this.validateUrl(model.baseUrl); if (baseUrl.isError()) { @@ -198,6 +213,15 @@ const ConfigValidationService = { this.validateModelUrl(configContent.ollama, "ollama"); } } + + if ("openrouter" in configContent) { + if ( + typeof configContent.openrouter === "object" && + configContent.openrouter !== null + ) { + this.validateModelUrl(configContent.openrouter, "openrouter"); + } + } } return Ok(configContent as Config); diff --git a/src/services/gitBlameAnalyzer.ts b/src/services/gitBlameAnalyzer.ts index 3f3e55d..fc555fc 100644 --- a/src/services/gitBlameAnalyzer.ts +++ b/src/services/gitBlameAnalyzer.ts @@ -1,6 +1,7 @@ import * as path from "node:path"; import { Err, ErrFromText, Ok, type Result } from "lib-result"; import { ERROR_MESSAGES, REPO_PATH } from "@/lib/constants.ts"; +import { logDebug } from "@/lib/logger.ts"; import CommandService from "./commandService.ts"; import FileSystemService from "./fileSystemService.ts"; import GitService from "./gitService.ts"; @@ -93,6 +94,7 @@ class GitBlameAnalyzer { static async analyzeChanges( filePath: string ): Promise> { + logDebug(`[gitBlameAnalyzer.analyzeChanges] ENTRY filePath=${filePath}`); const normalizedPath = path.normalize(filePath.replace(/^\/+/, "")); // First check if file is deleted or new, as these don't need blame analysis diff --git a/src/services/gitService.ts b/src/services/gitService.ts index d2537d6..7c79716 100644 --- a/src/services/gitService.ts +++ b/src/services/gitService.ts @@ -7,7 +7,7 @@ import { NoRepositoriesFoundError, } from "@/lib/errors.ts"; import type { CommandOutput } from "@/lib/index.d.ts"; -import { logError } from "@/lib/logger.ts"; +import { logDebug, logError } from "@/lib/logger.ts"; import CommandService from "./commandService.ts"; import FileSystemService from "./fileSystemService.ts"; @@ -33,10 +33,12 @@ class GitService { static repoPath = ""; static initialize(): string { + logDebug("[gitService.initialize] ENTRY"); const repoPath = GitService.getRepoPath(); if (repoPath.isError()) logError(repoPath.error.message); GitService.setRepoPath(repoPath.ok); + logDebug(`[gitService.initialize] EXIT repoPath=${repoPath.ok}`); return repoPath.ok; } static execGit(args: string[]): Result { @@ -99,6 +101,9 @@ class GitService { static async getDiff( onlyStagedChanges: boolean ): Promise> { + logDebug( + `[gitService.getDiff] ENTRY onlyStagedChanges=${onlyStagedChanges}` + ); try { const hasHead = GitService.hasHead(); @@ -300,6 +305,7 @@ class GitService { } } static getChangedFiles(onlyStaged = false): Result { + logDebug(`[gitService.getChangedFiles] ENTRY onlyStaged=${onlyStaged}`); try { const outputResult = GitService.execGit(["status", "--porcelain"]); if (outputResult.isError()) return Err(outputResult.error); diff --git a/src/services/keyValidationService.ts b/src/services/keyValidationService.ts index 6291191..45282d3 100644 --- a/src/services/keyValidationService.ts +++ b/src/services/keyValidationService.ts @@ -15,6 +15,8 @@ const apiValidation = { `Custom API validation failed: ${status}`, invalidOpenaiKey: 'Invalid OpenAI API key format. Key should start with "sk-"', + invalidOpenRouterKey: + 'Invalid OpenRouter API key format. Key should start with "sk-or-v1-"', }, } as const; @@ -49,6 +51,15 @@ const KeyValidationService = { } return Ok(key); }, + validateOpenRouterApiKey(key: string): Result { + if (!key) { + return ErrFromText(apiValidation.errorMessages.emptyKey); + } + if (!key.startsWith("sk-or-v1-")) { + return ErrFromText(apiValidation.errorMessages.invalidOpenRouterKey); + } + return Ok(key); + }, }; export default KeyValidationService; diff --git a/src/services/minimaxService.ts b/src/services/minimaxService.ts new file mode 100644 index 0000000..6d2e4b7 --- /dev/null +++ b/src/services/minimaxService.ts @@ -0,0 +1,59 @@ +import { + extractReasoningMiddleware, + generateText, + wrapLanguageModel, +} from "ai"; +import { createMinimaxOpenAI } from "vercel-minimax-ai-provider"; +import type { CommitMessage } from "@/lib/index.d.ts"; +import { logDebug } from "@/lib/logger.ts"; +import ConfigService from "./configService.ts"; +import { ModelService } from "./modelService.ts"; + +class MinimaxService extends ModelService { + static override async generateCommitMessage( + prompt: string, + attempt = 1 + ): Promise { + logDebug( + `[minimaxService.generateCommitMessage] ENTRY attempt=${attempt}, prompt.length=${prompt.length}` + ); + try { + const apiKey = await ConfigService.getApiKey("MiniMax"); + const model = (await ConfigService.get("provider", "model")).unwrap(); + const maxRetries = await ModelService.getMaxRetries(); + + logDebug( + `[minimaxService.generateCommitMessage] CALL API model=${model}` + ); + + const client = createMinimaxOpenAI({ apiKey }); + + const wrappedModel = wrapLanguageModel({ + model: client(model), + middleware: extractReasoningMiddleware({ tagName: "think" }), + }); + + const { text } = await generateText({ + model: wrappedModel, + prompt, + temperature: 0.7, + maxRetries, + }); + + logDebug( + `[minimaxService.generateCommitMessage] EXIT message="${text.substring(0, 50)}..."` + ); + return { message: text, model }; + } catch (error) { + logDebug(`[minimaxService.generateCommitMessage] ERROR ${error}`); + return await MinimaxService.handleGenerationError( + error, + prompt, + attempt, + MinimaxService.generateCommitMessage.bind(MinimaxService) + ); + } + } +} + +export default MinimaxService; diff --git a/src/services/moonshotService.ts b/src/services/moonshotService.ts new file mode 100644 index 0000000..ab1bad9 --- /dev/null +++ b/src/services/moonshotService.ts @@ -0,0 +1,59 @@ +import { createMoonshotAI } from "@ai-sdk/moonshotai"; +import { + extractReasoningMiddleware, + generateText, + wrapLanguageModel, +} from "ai"; +import type { CommitMessage } from "@/lib/index.d.ts"; +import { logDebug } from "@/lib/logger.ts"; +import ConfigService from "./configService.ts"; +import { ModelService } from "./modelService.ts"; + +class MoonshotService extends ModelService { + static override async generateCommitMessage( + prompt: string, + attempt = 1 + ): Promise { + logDebug( + `[moonshotService.generateCommitMessage] ENTRY attempt=${attempt}, prompt.length=${prompt.length}` + ); + try { + const apiKey = await ConfigService.getApiKey("MoonshotAI"); + const model = (await ConfigService.get("provider", "model")).unwrap(); + const maxRetries = await ModelService.getMaxRetries(); + + logDebug( + `[moonshotService.generateCommitMessage] CALL API model=${model}, maxRetries=${maxRetries}` + ); + + const client = createMoonshotAI({ apiKey }); + + const wrappedModel = wrapLanguageModel({ + model: client(model), + middleware: extractReasoningMiddleware({ tagName: "think" }), + }); + + const { text } = await generateText({ + model: wrappedModel, + prompt, + temperature: 0.7, + maxRetries, + }); + + logDebug( + `[moonshotService.generateCommitMessage] EXIT message="${text.substring(0, 50)}..."` + ); + return { message: text, model }; + } catch (error) { + logDebug(`[moonshotService.generateCommitMessage] ERROR ${error}`); + return await MoonshotService.handleGenerationError( + error, + prompt, + attempt, + MoonshotService.generateCommitMessage.bind(MoonshotService) + ); + } + } +} + +export default MoonshotService; diff --git a/src/services/ollamaService.ts b/src/services/ollamaService.ts index dd757bd..dc51111 100644 --- a/src/services/ollamaService.ts +++ b/src/services/ollamaService.ts @@ -4,7 +4,9 @@ import { wrapLanguageModel, } from "ai"; import { createOllama } from "ollama-ai-provider-v2"; +import { DEFAULT_CONFIG } from "@/lib/constants.ts"; import type { CommitMessage } from "@/lib/index.d.ts"; +import { logDebug } from "@/lib/logger.ts"; import ConfigService from "./configService.ts"; import { ModelService } from "./modelService.ts"; @@ -13,10 +15,30 @@ class OllamaService extends ModelService { prompt: string, attempt = 1 ): Promise { - const baseURL = (await ConfigService.get("ollama", "baseUrl")).unwrap(); - const model = (await ConfigService.get("ollama", "model")).unwrap(); + logDebug( + `[ollamaService.generateCommitMessage] ENTRY attempt=${attempt}, prompt.length=${prompt.length}` + ); + + const baseURLResult = await ConfigService.get("ollama", "baseUrl"); + const baseURL = + baseURLResult.isOk() && baseURLResult.ok + ? baseURLResult.ok + : (DEFAULT_CONFIG.ollama.baseUrl as string); + + const modelResult = await ConfigService.get("provider", "model"); + if (modelResult.isError()) { + throw new Error( + "provider.model is required for Ollama. Please set it in your config." + ); + } + const model = modelResult.ok; + const maxRetries = await ModelService.getMaxRetries(); + logDebug( + `[ollamaService.generateCommitMessage] CALL API model=${model}, baseURL=${baseURL}` + ); + const ollama = createOllama({ baseURL }); try { diff --git a/src/services/openrouterService.ts b/src/services/openrouterService.ts new file mode 100644 index 0000000..40efe63 --- /dev/null +++ b/src/services/openrouterService.ts @@ -0,0 +1,87 @@ +/** + * OpenRouter Service — meta-provider routing to hundreds of AI models. + * Model IDs are provider-prefixed: e.g. "anthropic/claude-opus-4-5" or + * "openai/gpt-4.1-mini". Config lives in the dedicated 'openrouter' section. + * Required headers (HTTP-Referer, X-Title) satisfy OpenRouter usage policy. + * Register at https://openrouter.ai and set OPENROUTER_API_KEY. + */ +import { createOpenRouter } from "@openrouter/ai-sdk-provider"; +import { + extractReasoningMiddleware, + generateText, + wrapLanguageModel, +} from "ai"; +import { DEFAULT_CONFIG } from "@/lib/constants.ts"; +import type { CommitMessage } from "@/lib/index.d.ts"; +import { logDebug } from "@/lib/logger.ts"; +import ConfigService from "./configService.ts"; +import { ModelService } from "./modelService.ts"; + +class OpenRouterService extends ModelService { + static override async generateCommitMessage( + prompt: string, + attempt = 1 + ): Promise { + logDebug( + `[openrouterService.generateCommitMessage] ENTRY attempt=${attempt}, prompt.length=${prompt.length}` + ); + try { + const apiKey = await ConfigService.getApiKey("OpenRouter"); + + const modelResult = await ConfigService.get("provider", "model"); + if (modelResult.isError()) { + throw new Error( + "provider.model is required for OpenRouter. Please set it in your config." + ); + } + const model = modelResult.ok; + + const baseURLResult = await ConfigService.get("openrouter", "baseUrl"); + const baseURL = + baseURLResult.isOk() && baseURLResult.ok + ? baseURLResult.ok + : (DEFAULT_CONFIG.openrouter.baseUrl as string); + const maxRetries = await ModelService.getMaxRetries(); + + logDebug( + `[openrouterService.generateCommitMessage] CALL API model=${model}, baseURL=${baseURL}` + ); + + const client = createOpenRouter({ + apiKey, + baseURL, + headers: { + "HTTP-Referer": "https://github.com/AhmedOsman101/commit-sage-cli", + "X-Title": "Commit Sage", + }, + }); + + const wrappedModel = wrapLanguageModel({ + model: client(model), + middleware: extractReasoningMiddleware({ tagName: "think" }), + }); + + const { text } = await generateText({ + model: wrappedModel, + prompt, + temperature: 0.7, + maxRetries, + }); + + logDebug( + `[openrouterService.generateCommitMessage] EXIT message="${text.substring(0, 50)}..."` + ); + return { message: text, model }; + } catch (error) { + logDebug(`[openrouterService.generateCommitMessage] ERROR ${error}`); + return await OpenRouterService.handleGenerationError( + error, + prompt, + attempt, + OpenRouterService.generateCommitMessage.bind(OpenRouterService) + ); + } + } +} + +export default OpenRouterService; diff --git a/src/services/promptService.ts b/src/services/promptService.ts index 593af38..ef38271 100644 --- a/src/services/promptService.ts +++ b/src/services/promptService.ts @@ -1,9 +1,13 @@ import type { CommitLanguage } from "@/lib/configServiceTypes.d.ts"; +import { logDebug } from "@/lib/logger.ts"; import { getTemplate } from "@/templates/index.ts"; import ConfigService from "./configService.ts"; const PromptService = { async generatePrompt(diff: string, blameAnalysis: string): Promise { + logDebug( + `[promptService.generatePrompt] ENTRY diff.length=${diff.length}, blame.length=${blameAnalysis.length}` + ); const format = await ConfigService.get("commit", "commitFormat").then( result => result.unwrap() ); diff --git a/src/services/providerRegistry.ts b/src/services/providerRegistry.ts index 40be109..7357ab2 100644 --- a/src/services/providerRegistry.ts +++ b/src/services/providerRegistry.ts @@ -2,11 +2,15 @@ import type { ProviderType } from "@/lib/configServiceTypes.d.ts"; import AnthropicService from "./anthropicService.ts"; import DeepseekService from "./deepseekService.ts"; import GeminiService from "./geminiService.ts"; +import MinimaxService from "./minimaxService.ts"; import MistralService from "./mistralService.ts"; import type { ModelService } from "./modelService.ts"; +import MoonshotService from "./moonshotService.ts"; import OllamaService from "./ollamaService.ts"; import OpenAiService from "./openaiService.ts"; +import OpenRouterService from "./openrouterService.ts"; import XaiService from "./xaiService.ts"; +import ZaiService from "./zaiService.ts"; const providers: Record = { gemini: GeminiService, @@ -16,6 +20,10 @@ const providers: Record = { mistral: MistralService, xai: XaiService, ollama: OllamaService, + moonshotai: MoonshotService, + zai: ZaiService, + minimax: MinimaxService, + openrouter: OpenRouterService, }; export function getProviderService(type: ProviderType): typeof ModelService { diff --git a/src/services/zaiService.ts b/src/services/zaiService.ts new file mode 100644 index 0000000..93d7c77 --- /dev/null +++ b/src/services/zaiService.ts @@ -0,0 +1,67 @@ +import { createOpenAI } from "@ai-sdk/openai"; +import { + extractReasoningMiddleware, + generateText, + wrapLanguageModel, +} from "ai"; +import type { CommitMessage } from "@/lib/index.d.ts"; +import { logDebug } from "@/lib/logger.ts"; +import ConfigService from "./configService.ts"; +import { ModelService } from "./modelService.ts"; + +const ZAI_BASE_URL = "https://api.z.ai/api/paas/v4/"; + +/** + * Z.AI Service — GLM models via the international Z.AI platform (Zhipu AI). + * Uses @ai-sdk/openai with a custom baseURL (no new package dependency). + * The trailing slash on ZAI_BASE_URL is required per Z.AI documentation. + * Register at https://z.ai and set ZAI_API_KEY. + */ +class ZaiService extends ModelService { + static override async generateCommitMessage( + prompt: string, + attempt = 1 + ): Promise { + logDebug( + `[zaiService.generateCommitMessage] ENTRY attempt=${attempt}, prompt.length=${prompt.length}` + ); + try { + const apiKey = await ConfigService.getApiKey("Zai"); + const model = (await ConfigService.get("provider", "model")).unwrap(); + const maxRetries = await ModelService.getMaxRetries(); + + logDebug( + `[zaiService.generateCommitMessage] CALL API model=${model}, baseURL=${ZAI_BASE_URL}` + ); + + const client = createOpenAI({ baseURL: ZAI_BASE_URL, apiKey }); + + const wrappedModel = wrapLanguageModel({ + model: client(model), + middleware: extractReasoningMiddleware({ tagName: "think" }), + }); + + const { text } = await generateText({ + model: wrappedModel, + prompt, + temperature: 0.7, + maxRetries, + }); + + logDebug( + `[zaiService.generateCommitMessage] EXIT message="${text.substring(0, 50)}..."` + ); + return { message: text, model }; + } catch (error) { + logDebug(`[zaiService.generateCommitMessage] ERROR ${error}`); + return await ZaiService.handleGenerationError( + error, + prompt, + attempt, + ZaiService.generateCommitMessage.bind(ZaiService) + ); + } + } +} + +export default ZaiService;