diff --git a/src/modules/integrations.types.ts b/src/modules/integrations.types.ts index ca42ef0..58e493d 100644 --- a/src/modules/integrations.types.ts +++ b/src/modules/integrations.types.ts @@ -23,7 +23,7 @@ export type IntegrationEndpointFunction = ( * ```typescript * await base44.integrations.Core.InvokeLLM({ * prompt: 'Explain quantum computing', - * model: 'gpt-4' + * model: 'gpt_5' * }); * ``` * @@ -46,6 +46,11 @@ export type IntegrationPackage = { export interface InvokeLLMParams { /** The prompt text to send to the model */ prompt: string; + /** Optionally specify a model to override the app-level model setting for this specific call. + * + * Options: `"gpt_5_mini"`, `"gemini_3_flash"`, `"gpt_5"`, `"gemini_3_pro"`, `"claude_sonnet_4_6"`, `"claude_opus_4_6"` + */ + model?: 'gpt_5_mini' | 'gemini_3_flash' | 'gpt_5' | 'gemini_3_pro' | 'claude_sonnet_4_6' | 'claude_opus_4_6'; /** If set to `true`, the LLM will use Google Search, Maps, and News to gather real-time context before answering. * @default false */ @@ -393,7 +398,7 @@ export type IntegrationsModule = { * ```typescript * const response = await base44.integrations.Core.InvokeLLM({ * prompt: 'Explain quantum computing', - * model: 'gpt-4' + * model: 'gpt_5' * }); * ``` */ @@ -423,7 +428,7 @@ export type IntegrationsModule = { * ```typescript * const response = await base44.integrations.Core.InvokeLLM({ * prompt: 'Explain quantum computing', - * model: 'gpt-4' + * model: 'gpt_5' * }); * ``` * diff --git a/tests/unit/integrations.test.ts b/tests/unit/integrations.test.ts new file mode 100644 index 0000000..88fca9b --- /dev/null +++ b/tests/unit/integrations.test.ts @@ -0,0 +1,75 @@ +import { describe, test, expect, beforeEach, afterEach } from 'vitest'; +import nock from 'nock'; +import { createClient } from '../../src/index.ts'; + +describe('Core Integrations - InvokeLLM', () => { + let base44: ReturnType; + let scope: nock.Scope; + const appId = 'test-app-id'; + const serverUrl = 'https://base44.app'; + + beforeEach(() => { + base44 = createClient({ + serverUrl, + appId, + }); + + scope = nock(serverUrl); + }); + + afterEach(() => { + nock.cleanAll(); + }); + + test('InvokeLLM should pass model parameter to the API', async () => { + const params = { + prompt: 'Explain quantum computing', + model: 'gpt_5', + }; + + scope + .post(`/api/apps/${appId}/integration-endpoints/Core/InvokeLLM`, params) + .reply(200, 'Quantum computing uses qubits...'); + + const result = await base44.integrations.Core.InvokeLLM(params); + expect(result).toBe('Quantum computing uses qubits...'); + expect(scope.isDone()).toBe(true); + }); + + test('InvokeLLM should work without model parameter', async () => { + const params = { + prompt: 'Explain quantum computing', + }; + + scope + .post(`/api/apps/${appId}/integration-endpoints/Core/InvokeLLM`, params) + .reply(200, 'Quantum computing uses qubits...'); + + const result = await base44.integrations.Core.InvokeLLM(params); + expect(result).toBe('Quantum computing uses qubits...'); + expect(scope.isDone()).toBe(true); + }); + + test('InvokeLLM should pass model alongside other optional parameters', async () => { + const params = { + prompt: 'Analyze this text', + model: 'claude_sonnet_4_6' as const, + response_json_schema: { + type: 'object', + properties: { + sentiment: { type: 'string' }, + }, + }, + }; + + const mockResponse = { sentiment: 'positive' }; + + scope + .post(`/api/apps/${appId}/integration-endpoints/Core/InvokeLLM`, params) + .reply(200, mockResponse); + + const result = await base44.integrations.Core.InvokeLLM(params); + expect(result).toEqual(mockResponse); + expect(scope.isDone()).toBe(true); + }); +});