From 8df3ee38218a2bc3447f5654aa48a45bdf47dc8a Mon Sep 17 00:00:00 2001 From: floory <67979730+flooryyyy@users.noreply.github.com> Date: Sat, 4 Apr 2026 13:59:18 +0100 Subject: [PATCH] feat(ai): add server-side provider registry and openai adapter --- src/app/lib/ai/index.ts | 13 ++++ src/app/lib/ai/providers/openai-compatible.ts | 38 ++++++++++++ src/app/lib/ai/registry.test.ts | 60 +++++++++++++++++++ src/app/lib/ai/registry.ts | 32 ++++++++++ src/app/lib/ai/types.ts | 36 +++++++++++ 5 files changed, 179 insertions(+) create mode 100644 src/app/lib/ai/index.ts create mode 100644 src/app/lib/ai/providers/openai-compatible.ts create mode 100644 src/app/lib/ai/registry.test.ts create mode 100644 src/app/lib/ai/registry.ts create mode 100644 src/app/lib/ai/types.ts diff --git a/src/app/lib/ai/index.ts b/src/app/lib/ai/index.ts new file mode 100644 index 0000000..0ca0f06 --- /dev/null +++ b/src/app/lib/ai/index.ts @@ -0,0 +1,13 @@ +import { registerAIProvider, resolveAIProvider } from "./registry"; +import { createOpenAICompatibleProvider } from "./providers/openai-compatible"; + +registerAIProvider("openai_compatible", createOpenAICompatibleProvider); + +export { resolveAIProvider, registerAIProvider }; +export type { + AIProviderAdapter, + AIProviderChatRequest, + AIProviderConfig, + AIProviderType, + OpenAICompatibleProviderConfig, +} from "./types"; diff --git a/src/app/lib/ai/providers/openai-compatible.ts b/src/app/lib/ai/providers/openai-compatible.ts new file mode 100644 index 0000000..8ed717c --- /dev/null +++ b/src/app/lib/ai/providers/openai-compatible.ts @@ -0,0 +1,38 @@ +import type { + AIProviderAdapter, + AIProviderChatRequest, + OpenAICompatibleProviderConfig, +} from "../types"; + +function normalizeBaseUrl(baseUrl: string): string { + return baseUrl.trim().replace(/\/+$/, ""); +} + +export function createOpenAICompatibleProvider( + config: OpenAICompatibleProviderConfig, +): AIProviderAdapter { + const baseUrl = normalizeBaseUrl(config.baseUrl); + + return { + type: "openai_compatible", + model: config.model, + async chatCompletions( + request: AIProviderChatRequest, + init?: Omit, + ): Promise { + return fetch(`${baseUrl}/chat/completions`, { + ...init, + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${config.apiKey}`, + ...config.headers, + }, + body: JSON.stringify({ + model: config.model, + ...request, + }), + }); + }, + }; +} diff --git a/src/app/lib/ai/registry.test.ts b/src/app/lib/ai/registry.test.ts new file mode 100644 index 0000000..47f22fa --- /dev/null +++ b/src/app/lib/ai/registry.test.ts @@ -0,0 +1,60 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + registerAIProvider, + resolveAIProvider, + getRegisteredAIProviders, +} from "./registry"; +import { createOpenAICompatibleProvider } from "./providers/openai-compatible"; + +describe("AI provider registry", () => { + beforeEach(() => { + registerAIProvider("openai_compatible", createOpenAICompatibleProvider); + }); + + it("resolves openai_compatible provider and forwards configured request fields", async () => { + const fetchMock = vi.fn().mockResolvedValue(new Response("{}")); + vi.stubGlobal("fetch", fetchMock); + + const provider = resolveAIProvider({ + type: "openai_compatible", + baseUrl: "https://api.example.com/v1/", + apiKey: "secret", + model: "gpt-4.1-mini", + headers: { + "X-Custom": "yes", + }, + }); + + await provider.chatCompletions({ + messages: [{ role: "user", content: "hello" }], + temperature: 0.2, + }); + + expect(provider.type).toBe("openai_compatible"); + expect(provider.model).toBe("gpt-4.1-mini"); + expect(fetchMock).toHaveBeenCalledOnce(); + expect(fetchMock).toHaveBeenCalledWith( + "https://api.example.com/v1/chat/completions", + expect.objectContaining({ + method: "POST", + headers: expect.objectContaining({ + "Content-Type": "application/json", + Authorization: "Bearer secret", + "X-Custom": "yes", + }), + }), + ); + + const fetchCall = fetchMock.mock.calls[0]; + const body = JSON.parse(fetchCall?.[1]?.body as string); + expect(body).toMatchObject({ + model: "gpt-4.1-mini", + messages: [{ role: "user", content: "hello" }], + temperature: 0.2, + }); + }); + + it("exposes registered provider types", () => { + expect(getRegisteredAIProviders()).toContain("openai_compatible"); + }); +}); diff --git a/src/app/lib/ai/registry.ts b/src/app/lib/ai/registry.ts new file mode 100644 index 0000000..d317efa --- /dev/null +++ b/src/app/lib/ai/registry.ts @@ -0,0 +1,32 @@ +import type { + AIProviderAdapter, + AIProviderConfig, + AIProviderType, +} from "./types"; + +export type AIProviderFactory = ( + config: T, +) => AIProviderAdapter; + +const providerRegistry = new Map(); + +export function registerAIProvider( + type: AIProviderType, + factory: AIProviderFactory, +): void { + providerRegistry.set(type, factory); +} + +export function resolveAIProvider(config: AIProviderConfig): AIProviderAdapter { + const factory = providerRegistry.get(config.type); + + if (!factory) { + throw new Error(`No AI provider registered for type: ${config.type}`); + } + + return factory(config); +} + +export function getRegisteredAIProviders(): AIProviderType[] { + return Array.from(providerRegistry.keys()); +} diff --git a/src/app/lib/ai/types.ts b/src/app/lib/ai/types.ts new file mode 100644 index 0000000..d59cd26 --- /dev/null +++ b/src/app/lib/ai/types.ts @@ -0,0 +1,36 @@ +export type AIProviderType = "openai_compatible"; + +export interface AIProviderConfigBase { + type: AIProviderType; +} + +export interface OpenAICompatibleProviderConfig extends AIProviderConfigBase { + type: "openai_compatible"; + baseUrl: string; + apiKey: string; + model: string; + headers?: Record; +} + +export type AIProviderConfig = OpenAICompatibleProviderConfig; + +export interface AIChatMessage { + role: "system" | "user" | "assistant"; + content: string; +} + +export interface AIProviderChatRequest { + messages: AIChatMessage[]; + temperature?: number; + max_tokens?: number; + [key: string]: unknown; +} + +export interface AIProviderAdapter { + readonly type: AIProviderType; + readonly model: string; + chatCompletions( + request: AIProviderChatRequest, + init?: Omit, + ): Promise; +}