From 3eb9f7a5838072c41e811688755c95050d4a7d73 Mon Sep 17 00:00:00 2001 From: AssemblyAI Date: Wed, 3 Sep 2025 10:15:15 -0600 Subject: [PATCH] Project import generated by Copybara. GitOrigin-RevId: c9e29e4462d40b419dc71754610dbda09fe3fa4b --- package.json | 2 +- src/types/openapi.generated.ts | 66 ++++++++++++++++ tests/unit/lemur.test.ts | 138 +++++++++++++++++++++++++++++++++ 3 files changed, 205 insertions(+), 1 deletion(-) diff --git a/package.json b/package.json index fcf3468..5fd695c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "assemblyai", - "version": "4.15.0", + "version": "4.16.0", "description": "The AssemblyAI JavaScript SDK provides an easy-to-use interface for interacting with the AssemblyAI API, which supports async and real-time transcription, as well as the latest LeMUR models.", "engines": { "node": ">=18" diff --git a/src/types/openapi.generated.ts b/src/types/openapi.generated.ts index 5225fd5..8f04ca7 100644 --- a/src/types/openapi.generated.ts +++ b/src/types/openapi.generated.ts @@ -672,6 +672,71 @@ export type LemurBaseParams = { * } * ``` */ + +export type LemurRequestDetails = { + /** + * The endpoint used for the leMUR request + */ + request_endpoint: string; + /** + * The temperature to use for the model. + * Higher values result in answers that are more creative, lower values are more conservative. + * Can be any value between 0.0 and 1.0 inclusive. + * + * @defaultValue 0 + */ + temperature: number; + /** + * The model that was used for the final prompt after compression is performed. + * + * @defaultValue "default" + */ + final_model: LiteralUnion; + /** + * Max output size in tokens, up to 4000 + * @defaultValue 2000 + */ + max_output_size: number; + /** + * The date when the request was created + */ + created_at: Date; + /** + * A list of completed transcripts with text. + * Use either transcript_ids or input_text as input into LeMUR. + */ + transcript_ids?: string[]; + /** + * Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000". + * Use either transcript_ids or input_text as input into LeMUR. + */ + input_text?: string; + /** + * A list of questions asked in the request + * Each question can have its own context and answer format. + */ + questions?: LemurQuestion[]; + /** + * The prompt used for the model. + */ + prompt?: string; + /** + * Context provided to the model. This can be a string or a free-form JSON value. + */ + context?: OneOf< + [ + string, + { + [key: string]: unknown; + }, + ] + >; + /** + * The format to use for the model's answers. + */ + answer_format?: string; +}; + export type LemurBaseResponse = { /** * The ID of the LeMUR request @@ -681,6 +746,7 @@ export type LemurBaseResponse = { * The usage numbers for the LeMUR request */ usage: LemurUsage; + request?: LemurRequestDetails; }; /** diff --git a/tests/unit/lemur.test.ts b/tests/unit/lemur.test.ts index 16ad809..4d5753e 100644 --- a/tests/unit/lemur.test.ts +++ b/tests/unit/lemur.test.ts @@ -149,6 +149,144 @@ describe("lemur", () => { expect(response.response).toBe("some response"); }); + it("should return response with request details", async () => { + const responseWithDetails = { + request_id: knownLemurRequestId, + response: "detailed response", + usage: { + input_tokens: 250, + output_tokens: 75, + }, + request: { + request_endpoint: "/lemur/v3/generate/task", + temperature: 0.7, + final_model: "anthropic/claude-3-5-sonnet", + max_output_size: 1500, + created_at: "2024-01-01T10:30:00Z", + transcript_ids: knownTranscriptIds, + prompt: "Analyze the key themes in this conversation", + context: "Focus on business decisions and action items", + }, + }; + + fetchMock.doMockOnceIf( + requestMatches({ + method: "GET", + url: `/lemur/v3/${knownLemurRequestId}`, + }), + JSON.stringify(responseWithDetails), + ); + + const response = await assembly.lemur.getResponse(knownLemurRequestId); + expect(response.request_id).toBe(knownLemurRequestId); + expect(response.request).toBeDefined(); + expect(response.request?.request_endpoint).toBe("/lemur/v3/generate/task"); + expect(response.request?.temperature).toBe(0.7); + expect(response.request?.final_model).toBe("anthropic/claude-3-5-sonnet"); + expect(response.request?.max_output_size).toBe(1500); + expect(response.request?.prompt).toBe( + "Analyze the key themes in this conversation", + ); + expect(response.usage.input_tokens).toBe(250); + expect(response.usage.output_tokens).toBe(75); + }); + + it("should return response with question-answer request details", async () => { + const qaResponseWithDetails = { + request_id: knownLemurRequestId, + response: [ + { question: "What was discussed?", answer: "Project updates" }, + ], + usage: { + input_tokens: 300, + output_tokens: 100, + }, + request: { + request_endpoint: "/lemur/v3/generate/question-answer", + temperature: 0.3, + final_model: "anthropic/claude-3-opus", + max_output_size: 2500, + created_at: "2024-01-01T14:15:00Z", + input_text: "Custom transcript content...", + questions: [ + { + question: "What was discussed?", + answer_format: "concise summary", + context: "Meeting notes", + }, + { + question: "Was the date of the next meeting called out?", + answer_options: ["Yes", "No", "Not mentioned"], + }, + ], + }, + }; + + fetchMock.doMockOnceIf( + requestMatches({ + method: "GET", + url: `/lemur/v3/${knownLemurRequestId}`, + }), + JSON.stringify(qaResponseWithDetails), + ); + + const response = await assembly.lemur.getResponse(knownLemurRequestId); + expect(response.request?.request_endpoint).toBe( + "/lemur/v3/generate/question-answer", + ); + expect(response.request?.input_text).toBe("Custom transcript content..."); + expect(response.request?.questions).toHaveLength(2); + expect(response.request?.questions?.[0].question).toBe( + "What was discussed?", + ); + expect(response.request?.questions?.[0].context).toBe("Meeting notes"); + expect(response.request?.questions?.[1].answer_options).toEqual([ + "Yes", + "No", + "Not mentioned", + ]); + }); + + it("should return response with context as object in request details", async () => { + const responseWithObjectContext = { + request_id: knownLemurRequestId, + response: "context-aware response", + usage: { + input_tokens: 180, + output_tokens: 60, + }, + request: { + request_endpoint: "/lemur/v3/generate/summary", + temperature: 0.5, + final_model: "default", + max_output_size: 2000, + created_at: "2024-01-01T16:45:00Z", + transcript_ids: knownTranscriptIds, + context: { + meeting_type: "standup", + team: "engineering", + date: "2024-01-01", + }, + answer_format: "bullet points", + }, + }; + + fetchMock.doMockOnceIf( + requestMatches({ + method: "GET", + url: `/lemur/v3/${knownLemurRequestId}`, + }), + JSON.stringify(responseWithObjectContext), + ); + + const response = await assembly.lemur.getResponse(knownLemurRequestId); + expect(response.request?.context).toEqual({ + meeting_type: "standup", + team: "engineering", + date: "2024-01-01", + }); + }); + it("should purge request data", async () => { fetchMock.doMockOnceIf( requestMatches({