Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions src/modules/integrations.types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ export type IntegrationEndpointFunction = (
* ```typescript
* await base44.integrations.Core.InvokeLLM({
* prompt: 'Explain quantum computing',
* model: 'gpt-4'
* model: 'gpt_5'
* });
* ```
*
Expand All @@ -46,6 +46,11 @@ export type IntegrationPackage = {
export interface InvokeLLMParams {
/** The prompt text to send to the model */
prompt: string;
/** Optionally specify a model to override the app-level model setting for this specific call.
*
* Options: `"gpt_5_mini"`, `"gemini_3_flash"`, `"gpt_5"`, `"gemini_3_pro"`, `"claude_sonnet_4_6"`, `"claude_opus_4_6"`
*/
model?: 'gpt_5_mini' | 'gemini_3_flash' | 'gpt_5' | 'gemini_3_pro' | 'claude_sonnet_4_6' | 'claude_opus_4_6';
/** If set to `true`, the LLM will use Google Search, Maps, and News to gather real-time context before answering.
* @default false
*/
Expand Down Expand Up @@ -393,7 +398,7 @@ export type IntegrationsModule = {
* ```typescript
* const response = await base44.integrations.Core.InvokeLLM({
* prompt: 'Explain quantum computing',
* model: 'gpt-4'
* model: 'gpt_5'
* });
* ```
*/
Expand Down Expand Up @@ -423,7 +428,7 @@ export type IntegrationsModule = {
* ```typescript
* const response = await base44.integrations.Core.InvokeLLM({
* prompt: 'Explain quantum computing',
* model: 'gpt-4'
* model: 'gpt_5'
* });
* ```
*
Expand Down
75 changes: 75 additions & 0 deletions tests/unit/integrations.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import { describe, test, expect, beforeEach, afterEach } from 'vitest';
import nock from 'nock';
import { createClient } from '../../src/index.ts';

describe('Core Integrations - InvokeLLM', () => {
let base44: ReturnType<typeof createClient>;
let scope: nock.Scope;
const appId = 'test-app-id';
const serverUrl = 'https://base44.app';

beforeEach(() => {
base44 = createClient({
serverUrl,
appId,
});

scope = nock(serverUrl);
});

afterEach(() => {
nock.cleanAll();
});

test('InvokeLLM should pass model parameter to the API', async () => {
const params = {
prompt: 'Explain quantum computing',
model: 'gpt_5',
};

scope
.post(`/api/apps/${appId}/integration-endpoints/Core/InvokeLLM`, params)
.reply(200, 'Quantum computing uses qubits...');

const result = await base44.integrations.Core.InvokeLLM(params);
expect(result).toBe('Quantum computing uses qubits...');
expect(scope.isDone()).toBe(true);
});

test('InvokeLLM should work without model parameter', async () => {
const params = {
prompt: 'Explain quantum computing',
};

scope
.post(`/api/apps/${appId}/integration-endpoints/Core/InvokeLLM`, params)
.reply(200, 'Quantum computing uses qubits...');

const result = await base44.integrations.Core.InvokeLLM(params);
expect(result).toBe('Quantum computing uses qubits...');
expect(scope.isDone()).toBe(true);
});

test('InvokeLLM should pass model alongside other optional parameters', async () => {
const params = {
prompt: 'Analyze this text',
model: 'claude_sonnet_4_6' as const,
response_json_schema: {
type: 'object',
properties: {
sentiment: { type: 'string' },
},
},
};

const mockResponse = { sentiment: 'positive' };

scope
.post(`/api/apps/${appId}/integration-endpoints/Core/InvokeLLM`, params)
.reply(200, mockResponse);

const result = await base44.integrations.Core.InvokeLLM(params);
expect(result).toEqual(mockResponse);
expect(scope.isDone()).toBe(true);
});
});