From 9ba80f6d9e84653c99cd56385c83a09e996632ac Mon Sep 17 00:00:00 2001
From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 11 Mar 2026 02:08:08 +0000
Subject: [PATCH 1/2] SDK regeneration
---
poetry.lock | 6 +-
pyproject.toml | 2 +-
reference.md | 296 ++----------
src/twelvelabs/__init__.py | 32 +-
src/twelvelabs/base_client.py | 307 +------------
src/twelvelabs/core/client_wrapper.py | 4 +-
src/twelvelabs/embed/client.py | 8 +-
src/twelvelabs/embed/raw_client.py | 8 +-
src/twelvelabs/embed/tasks/client.py | 8 +-
src/twelvelabs/embed/tasks/raw_client.py | 8 +-
src/twelvelabs/embed/v_2/client.py | 19 +-
src/twelvelabs/embed/v_2/raw_client.py | 23 +-
.../create_embeddings_request_input_type.py | 2 +-
.../indexes_create_request_models_item.py | 4 +-
src/twelvelabs/raw_base_client.py | 423 +-----------------
src/twelvelabs/search/client.py | 34 +-
src/twelvelabs/search/raw_client.py | 34 +-
src/twelvelabs/types/__init__.py | 34 +-
src/twelvelabs/types/audio_input_request.py | 16 +-
...audio_input_request_embedding_type_item.py | 5 +
src/twelvelabs/types/chunk_info.py | 2 +-
src/twelvelabs/types/completed_chunk.py | 2 +-
.../types/create_asset_upload_response.py | 2 +-
src/twelvelabs/types/embedding_data.py | 13 +-
.../types/embedding_data_embedding_option.py | 2 +-
.../types/embedding_media_metadata.py | 26 +-
.../types/embedding_multi_input_metadata.py | 21 +
src/twelvelabs/types/gist.py | 44 --
.../types/gist_request_types_item.py | 5 -
src/twelvelabs/types/image_input_request.py | 2 +-
.../types/multi_input_media_source.py | 55 +++
.../multi_input_media_source_media_type.py | 5 +
src/twelvelabs/types/multi_input_request.py | 37 ++
.../types/summarize_chapter_result.py | 6 +
...summarize_chapter_result_summarize_type.py | 5 +
.../types/summarize_highlight_result.py | 6 +
...mmarize_highlight_result_summarize_type.py | 5 +
src/twelvelabs/types/summarize_response.py | 62 ---
.../types/summarize_summary_result.py | 6 +
...summarize_summary_result_summarize_type.py | 5 +
.../types/text_image_input_request.py | 2 +-
src/twelvelabs/types/text_input_request.py | 2 +-
src/twelvelabs/types/video_input_request.py | 16 +-
...video_input_request_embedding_type_item.py | 5 +
44 files changed, 406 insertions(+), 1203 deletions(-)
create mode 100644 src/twelvelabs/types/audio_input_request_embedding_type_item.py
create mode 100644 src/twelvelabs/types/embedding_multi_input_metadata.py
delete mode 100644 src/twelvelabs/types/gist.py
delete mode 100644 src/twelvelabs/types/gist_request_types_item.py
create mode 100644 src/twelvelabs/types/multi_input_media_source.py
create mode 100644 src/twelvelabs/types/multi_input_media_source_media_type.py
create mode 100644 src/twelvelabs/types/multi_input_request.py
create mode 100644 src/twelvelabs/types/summarize_chapter_result_summarize_type.py
create mode 100644 src/twelvelabs/types/summarize_highlight_result_summarize_type.py
delete mode 100644 src/twelvelabs/types/summarize_response.py
create mode 100644 src/twelvelabs/types/summarize_summary_result_summarize_type.py
create mode 100644 src/twelvelabs/types/video_input_request_embedding_type_item.py
diff --git a/poetry.lock b/poetry.lock
index 3cc8b76..2744b38 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -38,13 +38,13 @@ trio = ["trio (>=0.26.1)"]
[[package]]
name = "certifi"
-version = "2026.1.4"
+version = "2026.2.25"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.7"
files = [
- {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"},
- {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"},
+ {file = "certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa"},
+ {file = "certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index d60065f..85994d5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "twelvelabs"
[tool.poetry]
name = "twelvelabs"
-version = "1.2.0"
+version = "1.2.1"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 43487d1..121761c 100644
--- a/reference.md
+++ b/reference.md
@@ -1,238 +1,4 @@
# Reference
-client.summarize(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-
-
- This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
-This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
-
-
-This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from twelvelabs import TwelveLabs
-
-client = TwelveLabs(
- api_key="YOUR_API_KEY",
-)
-client.summarize(
- video_id="6298d673f1090f1100476d4c",
- type="summary",
- prompt="Generate a summary of this video for a social media post, up to two sentences.",
- temperature=0.2,
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**video_id:** `str` — The unique identifier of the video that you want to summarize.
-
-
-
-
-
--
-
-**type:** `str`
-
-Specifies the type of summary. Use one of the following values:
- - `summary`: A brief that encapsulates the key points of a video, presenting the most important information clearly and concisely.
- - `chapter`: A chronological list of all the chapters in a video, providing a granular breakdown of its content. For each chapter, the platform returns its starting and end times, measured in seconds from the beginning of the video clip, a descriptive headline that offers a brief of the events or activities within that part of the video, and an accompanying summary that elaborates on the headline.
- - `highlight`: A chronologically ordered list of the most important events within a video. Unlike chapters, highlights only capture the key moments, providing a snapshot of the video's main topics. For each highlight, the platform returns its starting and end times, measured in seconds from the beginning of the video, a title, and a brief description that captures the essence of this part of the video.
-
-
-
-
-
--
-
-**prompt:** `typing.Optional[str]`
-
-Use this field to provide context for the summarization task, such as the target audience, style, tone of voice, and purpose.
-
-
-- Your prompts can be instructive or descriptive, or you can also phrase them as questions.
-- The maximum length of a prompt is 2,000 tokens.
-
-
-**Example**: Generate a summary of this video for a social media post, up to two sentences.
-
-
-
-
-
--
-
-**temperature:** `typing.Optional[float]`
-
-Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
-**Default:** 0.2
-**Min:** 0
-**Max:** 1
-
-
-
-
-
--
-
-**response_format:** `typing.Optional[ResponseFormat]`
-
-Use this parameter to specify the format of the response.
-This parameter is only valid when the `type` parameter is set to `summary`.
-If you omit this parameter, the platform returns unstructured text.
-
-
-
-
-
--
-
-**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate.
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
-client.gist(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-
- This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
-This method analyzes videos and generates titles, topics, and hashtags.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from twelvelabs import TwelveLabs
-
-client = TwelveLabs(
- api_key="YOUR_API_KEY",
-)
-client.gist(
- video_id="6298d673f1090f1100476d4c",
- types=["title", "topic"],
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**video_id:** `str` — The unique identifier of the video that you want to generate a gist for.
-
-
-
-
-
--
-
-**types:** `typing.Sequence[GistRequestTypesItem]`
-
-Specifies the type of gist. Use one of the following values:
- - `title`: A title succinctly captures a video's main theme, such as "From Consumerism to Minimalism: A Journey Toward Sustainable Living," guiding viewers to its content and themes.
- - `topic`: A topic is the central theme of a video, such as "Shopping Vlog Lifestyle", summarizing its content for efficient categorization and reference.
- - `hashtag`: A hashtag, like "#BlackFriday", represents key themes in a video, enhancing its discoverability and categorization on social media platforms.
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
-
-
-
-
client.analyze_stream(...)
-
@@ -249,7 +15,7 @@ This endpoint analyzes your videos and creates fully customizable text based on
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-- This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+- This endpoint supports streaming responses.
@@ -391,7 +157,7 @@ This endpoint analyzes your videos and creates fully customizable text based on
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-- This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+- This endpoint supports streaming responses.
@@ -2877,8 +2643,8 @@ client.embed.create(
**model_name:** `str`
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
@@ -2998,20 +2764,17 @@ Use this endpoint to search for relevant matches in an index using text, media,
**Media queries**:
- Set the `query_media_type` parameter to the corresponding media type (example: `image`).
-- Specify either one of the following parameters:
+- Provide up to 10 images by specifying the following parameters multiple times:
- `query_media_url`: Publicly accessible URL of your media file.
- `query_media_file`: Local media file.
- If both `query_media_url` and `query_media_file` are specified in the same request, `query_media_url` takes precedence.
+- Marengo 2.7 supports a single image per request.
**Composed text and media queries** (Marengo 3.0 only):
- Use the `query_text` parameter for your text query.
- Set `query_media_type` to `image`.
-- Specify the image using either the `query_media_url` or the `query_media_file` parameter.
-
- Example: Provide an image of a car and include "red color" in your query to find red instances of that car model.
+- Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times.
**Entity search** (Marengo 3.0 only and in beta):
-
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
@@ -3098,7 +2861,15 @@ For detailed guidance and version-specific behavior, see the [Search options](/v
-
-**query_media_url:** `typing.Optional[str]` — The publicly accessible URL of the media file you wish to use. This parameter is required for media queries if `query_media_file` is not provided.
+**query_media_url:** `typing.Optional[str]`
+
+The publicly accessible URL of a media file to use as a query. This parameter is required for media queries if `query_media_file` is not provided.
+
+You can provide up to 10 images by specifying this parameter multiple times (Marengo 3.0 only):
+```
+--form query_media_url=https://example.com/image1.jpg \
+--form query_media_url=https://example.com/image2.jpg
+```
@@ -3591,8 +3362,8 @@ client.embed.tasks.create(
**model_name:** `str`
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
@@ -3889,7 +3660,7 @@ This endpoint synchronously creates embeddings for multimodal content and return
**When to use this endpoint**:
- Create embeddings for text, images, audio, or video content
-- Get immediate results without waiting for background processing
+- Retrieve immediate results without waiting for background processing
- Process audio or video content up to 10 minutes in duration
**Do not use this endpoint for**:
@@ -3967,7 +3738,8 @@ The type of content for the embeddings.
- `video`: Creates embeddings for a video file
- `image`: Creates embeddings for an image file
- `text`: Creates embeddings for text input
-- `text_image`: Creates embeddings for text and an image.
+- `text_image`: Creates embeddings for text and an image
+- `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source
@@ -4023,6 +3795,14 @@ The type of content for the embeddings.
-
+**multi_input:** `typing.Optional[MultiInputRequest]`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4221,13 +4001,7 @@ This endpoint creates embeddings for audio and video content asynchronously.
-
```python
-from twelvelabs import (
- MediaSource,
- TwelveLabs,
- VideoInputRequest,
- VideoSegmentation_Dynamic,
- VideoSegmentationDynamicDynamic,
-)
+from twelvelabs import MediaSource, TwelveLabs, VideoInputRequest
client = TwelveLabs(
api_key="YOUR_API_KEY",
@@ -4239,15 +4013,9 @@ client.embed.v_2.tasks.create(
media_source=MediaSource(
url="https://user-bucket.com/video/long-video.mp4",
),
- start_sec=0.0,
- end_sec=7200.0,
- segmentation=VideoSegmentation_Dynamic(
- dynamic=VideoSegmentationDynamicDynamic(
- min_duration_sec=4,
- ),
- ),
- embedding_option=["visual", "audio", "transcription"],
+ embedding_option=["visual", "audio"],
embedding_scope=["clip", "asset"],
+ embedding_type=["separate_embedding", "fused_embedding"],
),
)
diff --git a/src/twelvelabs/__init__.py b/src/twelvelabs/__init__.py
index dbfb888..cec5af3 100644
--- a/src/twelvelabs/__init__.py
+++ b/src/twelvelabs/__init__.py
@@ -11,6 +11,7 @@
AudioInputRequest,
AudioInputRequestEmbeddingOptionItem,
AudioInputRequestEmbeddingScopeItem,
+ AudioInputRequestEmbeddingTypeItem,
AudioSegment,
AudioSegmentation,
AudioSegmentationFixed,
@@ -37,8 +38,10 @@
EmbeddingMediaMetadata,
EmbeddingMediaMetadata_Audio,
EmbeddingMediaMetadata_Image,
+ EmbeddingMediaMetadata_MultiInput,
EmbeddingMediaMetadata_TextImage,
EmbeddingMediaMetadata_Video,
+ EmbeddingMultiInputMetadata,
EmbeddingResponse,
EmbeddingSuccessResponse,
EmbeddingTaskMediaMetadata,
@@ -60,8 +63,6 @@
FinishReason,
ForbiddenErrorBody,
GetUploadStatusResponse,
- Gist,
- GistRequestTypesItem,
HlsObject,
HlsObjectStatus,
ImageEmbeddingResult,
@@ -86,6 +87,9 @@
MediaEmbeddingTaskAudioEmbedding,
MediaEmbeddingTaskVideoEmbedding,
MediaSource,
+ MultiInputMediaSource,
+ MultiInputMediaSourceMediaType,
+ MultiInputRequest,
MultipartUploadStatusType,
NextPageToken,
NonStreamAnalyzeResponse,
@@ -118,13 +122,12 @@
StreamTextResponse,
SummarizeChapterResult,
SummarizeChapterResultChaptersItem,
+ SummarizeChapterResultSummarizeType,
SummarizeHighlightResult,
SummarizeHighlightResultHighlightsItem,
- SummarizeResponse,
- SummarizeResponse_Chapter,
- SummarizeResponse_Highlight,
- SummarizeResponse_Summary,
+ SummarizeHighlightResultSummarizeType,
SummarizeSummaryResult,
+ SummarizeSummaryResultSummarizeType,
TextEmbeddingResult,
TextImageInputRequest,
TextInputRequest,
@@ -146,6 +149,7 @@
VideoInputRequest,
VideoInputRequestEmbeddingOptionItem,
VideoInputRequestEmbeddingScopeItem,
+ VideoInputRequestEmbeddingTypeItem,
VideoItem,
VideoItemFailed,
VideoSegment,
@@ -199,6 +203,7 @@
"AudioInputRequest",
"AudioInputRequestEmbeddingOptionItem",
"AudioInputRequestEmbeddingScopeItem",
+ "AudioInputRequestEmbeddingTypeItem",
"AudioSegment",
"AudioSegmentation",
"AudioSegmentationFixed",
@@ -227,8 +232,10 @@
"EmbeddingMediaMetadata",
"EmbeddingMediaMetadata_Audio",
"EmbeddingMediaMetadata_Image",
+ "EmbeddingMediaMetadata_MultiInput",
"EmbeddingMediaMetadata_TextImage",
"EmbeddingMediaMetadata_Video",
+ "EmbeddingMultiInputMetadata",
"EmbeddingResponse",
"EmbeddingSuccessResponse",
"EmbeddingTaskMediaMetadata",
@@ -253,8 +260,6 @@
"ForbiddenError",
"ForbiddenErrorBody",
"GetUploadStatusResponse",
- "Gist",
- "GistRequestTypesItem",
"HlsObject",
"HlsObjectStatus",
"ImageEmbeddingResult",
@@ -283,6 +288,9 @@
"MediaEmbeddingTaskAudioEmbedding",
"MediaEmbeddingTaskVideoEmbedding",
"MediaSource",
+ "MultiInputMediaSource",
+ "MultiInputMediaSourceMediaType",
+ "MultiInputRequest",
"MultipartUploadStatusType",
"NextPageToken",
"NonStreamAnalyzeResponse",
@@ -324,13 +332,12 @@
"StreamTextResponse",
"SummarizeChapterResult",
"SummarizeChapterResultChaptersItem",
+ "SummarizeChapterResultSummarizeType",
"SummarizeHighlightResult",
"SummarizeHighlightResultHighlightsItem",
- "SummarizeResponse",
- "SummarizeResponse_Chapter",
- "SummarizeResponse_Highlight",
- "SummarizeResponse_Summary",
+ "SummarizeHighlightResultSummarizeType",
"SummarizeSummaryResult",
+ "SummarizeSummaryResultSummarizeType",
"TasksCreateResponse",
"TasksListRequestStatusItem",
"TasksListResponse",
@@ -360,6 +367,7 @@
"VideoInputRequest",
"VideoInputRequestEmbeddingOptionItem",
"VideoInputRequestEmbeddingScopeItem",
+ "VideoInputRequestEmbeddingTypeItem",
"VideoItem",
"VideoItemFailed",
"VideoSegment",
diff --git a/src/twelvelabs/base_client.py b/src/twelvelabs/base_client.py
index 5bf85e3..8d690a6 100644
--- a/src/twelvelabs/base_client.py
+++ b/src/twelvelabs/base_client.py
@@ -14,12 +14,9 @@
from .raw_base_client import AsyncRawBaseClient, RawBaseClient
from .search.client import AsyncSearchClient, SearchClient
from .tasks.client import AsyncTasksClient, TasksClient
-from .types.gist import Gist
-from .types.gist_request_types_item import GistRequestTypesItem
from .types.non_stream_analyze_response import NonStreamAnalyzeResponse
from .types.response_format import ResponseFormat
from .types.stream_analyze_response import StreamAnalyzeResponse
-from .types.summarize_response import SummarizeResponse
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -110,146 +107,6 @@ def with_raw_response(self) -> RawBaseClient:
"""
return self._raw_client
- def summarize(
- self,
- *,
- video_id: str,
- type: str,
- prompt: typing.Optional[str] = OMIT,
- temperature: typing.Optional[float] = OMIT,
- response_format: typing.Optional[ResponseFormat] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> SummarizeResponse:
- """
-
-
- This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
- This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video that you want to summarize.
-
- type : str
- Specifies the type of summary. Use one of the following values:
- - `summary`: A brief that encapsulates the key points of a video, presenting the most important information clearly and concisely.
- - `chapter`: A chronological list of all the chapters in a video, providing a granular breakdown of its content. For each chapter, the platform returns its starting and end times, measured in seconds from the beginning of the video clip, a descriptive headline that offers a brief of the events or activities within that part of the video, and an accompanying summary that elaborates on the headline.
- - `highlight`: A chronologically ordered list of the most important events within a video. Unlike chapters, highlights only capture the key moments, providing a snapshot of the video's main topics. For each highlight, the platform returns its starting and end times, measured in seconds from the beginning of the video, a title, and a brief description that captures the essence of this part of the video.
-
- prompt : typing.Optional[str]
- Use this field to provide context for the summarization task, such as the target audience, style, tone of voice, and purpose.
-
-
- - Your prompts can be instructive or descriptive, or you can also phrase them as questions.
- - The maximum length of a prompt is 2,000 tokens.
-
-
- **Example**: Generate a summary of this video for a social media post, up to two sentences.
-
- temperature : typing.Optional[float]
- Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
- **Default:** 0.2
- **Min:** 0
- **Max:** 1
-
- response_format : typing.Optional[ResponseFormat]
- Use this parameter to specify the format of the response.
- This parameter is only valid when the `type` parameter is set to `summary`.
- If you omit this parameter, the platform returns unstructured text.
-
- max_tokens : typing.Optional[int]
- The maximum number of tokens to generate.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- SummarizeResponse
- The specified video has successfully been summarized.
-
- Examples
- --------
- from twelvelabs import TwelveLabs
-
- client = TwelveLabs(
- api_key="YOUR_API_KEY",
- )
- client.summarize(
- video_id="6298d673f1090f1100476d4c",
- type="summary",
- prompt="Generate a summary of this video for a social media post, up to two sentences.",
- temperature=0.2,
- )
- """
- _response = self._raw_client.summarize(
- video_id=video_id,
- type=type,
- prompt=prompt,
- temperature=temperature,
- response_format=response_format,
- max_tokens=max_tokens,
- request_options=request_options,
- )
- return _response.data
-
- def gist(
- self,
- *,
- video_id: str,
- types: typing.Sequence[GistRequestTypesItem],
- request_options: typing.Optional[RequestOptions] = None,
- ) -> Gist:
- """
-
- This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
- This method analyzes videos and generates titles, topics, and hashtags.
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video that you want to generate a gist for.
-
- types : typing.Sequence[GistRequestTypesItem]
- Specifies the type of gist. Use one of the following values:
- - `title`: A title succinctly captures a video's main theme, such as "From Consumerism to Minimalism: A Journey Toward Sustainable Living," guiding viewers to its content and themes.
- - `topic`: A topic is the central theme of a video, such as "Shopping Vlog Lifestyle", summarizing its content for efficient categorization and reference.
- - `hashtag`: A hashtag, like "#BlackFriday", represents key themes in a video, enhancing its discoverability and categorization on social media platforms.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- Gist
- The gist of the specified video has successfully been generated.
-
- Examples
- --------
- from twelvelabs import TwelveLabs
-
- client = TwelveLabs(
- api_key="YOUR_API_KEY",
- )
- client.gist(
- video_id="6298d673f1090f1100476d4c",
- types=["title", "topic"],
- )
- """
- _response = self._raw_client.gist(video_id=video_id, types=types, request_options=request_options)
- return _response.data
-
def analyze_stream(
self,
*,
@@ -265,7 +122,7 @@ def analyze_stream(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+ - This endpoint supports streaming responses.
Parameters
@@ -359,7 +216,7 @@ def analyze(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+ - This endpoint supports streaming responses.
Parameters
@@ -522,162 +379,6 @@ def with_raw_response(self) -> AsyncRawBaseClient:
"""
return self._raw_client
- async def summarize(
- self,
- *,
- video_id: str,
- type: str,
- prompt: typing.Optional[str] = OMIT,
- temperature: typing.Optional[float] = OMIT,
- response_format: typing.Optional[ResponseFormat] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> SummarizeResponse:
- """
-
-
- This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
- This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video that you want to summarize.
-
- type : str
- Specifies the type of summary. Use one of the following values:
- - `summary`: A brief that encapsulates the key points of a video, presenting the most important information clearly and concisely.
- - `chapter`: A chronological list of all the chapters in a video, providing a granular breakdown of its content. For each chapter, the platform returns its starting and end times, measured in seconds from the beginning of the video clip, a descriptive headline that offers a brief of the events or activities within that part of the video, and an accompanying summary that elaborates on the headline.
- - `highlight`: A chronologically ordered list of the most important events within a video. Unlike chapters, highlights only capture the key moments, providing a snapshot of the video's main topics. For each highlight, the platform returns its starting and end times, measured in seconds from the beginning of the video, a title, and a brief description that captures the essence of this part of the video.
-
- prompt : typing.Optional[str]
- Use this field to provide context for the summarization task, such as the target audience, style, tone of voice, and purpose.
-
-
- - Your prompts can be instructive or descriptive, or you can also phrase them as questions.
- - The maximum length of a prompt is 2,000 tokens.
-
-
- **Example**: Generate a summary of this video for a social media post, up to two sentences.
-
- temperature : typing.Optional[float]
- Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
- **Default:** 0.2
- **Min:** 0
- **Max:** 1
-
- response_format : typing.Optional[ResponseFormat]
- Use this parameter to specify the format of the response.
- This parameter is only valid when the `type` parameter is set to `summary`.
- If you omit this parameter, the platform returns unstructured text.
-
- max_tokens : typing.Optional[int]
- The maximum number of tokens to generate.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- SummarizeResponse
- The specified video has successfully been summarized.
-
- Examples
- --------
- import asyncio
-
- from twelvelabs import AsyncTwelveLabs
-
- client = AsyncTwelveLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.summarize(
- video_id="6298d673f1090f1100476d4c",
- type="summary",
- prompt="Generate a summary of this video for a social media post, up to two sentences.",
- temperature=0.2,
- )
-
-
- asyncio.run(main())
- """
- _response = await self._raw_client.summarize(
- video_id=video_id,
- type=type,
- prompt=prompt,
- temperature=temperature,
- response_format=response_format,
- max_tokens=max_tokens,
- request_options=request_options,
- )
- return _response.data
-
- async def gist(
- self,
- *,
- video_id: str,
- types: typing.Sequence[GistRequestTypesItem],
- request_options: typing.Optional[RequestOptions] = None,
- ) -> Gist:
- """
-
- This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
- This method analyzes videos and generates titles, topics, and hashtags.
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video that you want to generate a gist for.
-
- types : typing.Sequence[GistRequestTypesItem]
- Specifies the type of gist. Use one of the following values:
- - `title`: A title succinctly captures a video's main theme, such as "From Consumerism to Minimalism: A Journey Toward Sustainable Living," guiding viewers to its content and themes.
- - `topic`: A topic is the central theme of a video, such as "Shopping Vlog Lifestyle", summarizing its content for efficient categorization and reference.
- - `hashtag`: A hashtag, like "#BlackFriday", represents key themes in a video, enhancing its discoverability and categorization on social media platforms.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- Gist
- The gist of the specified video has successfully been generated.
-
- Examples
- --------
- import asyncio
-
- from twelvelabs import AsyncTwelveLabs
-
- client = AsyncTwelveLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.gist(
- video_id="6298d673f1090f1100476d4c",
- types=["title", "topic"],
- )
-
-
- asyncio.run(main())
- """
- _response = await self._raw_client.gist(video_id=video_id, types=types, request_options=request_options)
- return _response.data
-
async def analyze_stream(
self,
*,
@@ -693,7 +394,7 @@ async def analyze_stream(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+ - This endpoint supports streaming responses.
Parameters
@@ -796,7 +497,7 @@ async def analyze(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+ - This endpoint supports streaming responses.
Parameters
diff --git a/src/twelvelabs/core/client_wrapper.py b/src/twelvelabs/core/client_wrapper.py
index edd6c22..0523237 100644
--- a/src/twelvelabs/core/client_wrapper.py
+++ b/src/twelvelabs/core/client_wrapper.py
@@ -22,10 +22,10 @@ def __init__(
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "twelvelabs/1.2.0",
+ "User-Agent": "twelvelabs/1.2.1",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "twelvelabs",
- "X-Fern-SDK-Version": "1.2.0",
+ "X-Fern-SDK-Version": "1.2.1",
**(self.get_custom_headers() or {}),
}
headers["x-api-key"] = self.api_key
diff --git a/src/twelvelabs/embed/client.py b/src/twelvelabs/embed/client.py
index 7aa1bf7..d59d957 100644
--- a/src/twelvelabs/embed/client.py
+++ b/src/twelvelabs/embed/client.py
@@ -81,8 +81,8 @@ def create(
----------
model_name : str
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
text : typing.Optional[str]
The text for which you wish to create an embedding.
@@ -219,8 +219,8 @@ async def create(
----------
model_name : str
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
text : typing.Optional[str]
The text for which you wish to create an embedding.
diff --git a/src/twelvelabs/embed/raw_client.py b/src/twelvelabs/embed/raw_client.py
index b2a4994..77262e0 100644
--- a/src/twelvelabs/embed/raw_client.py
+++ b/src/twelvelabs/embed/raw_client.py
@@ -69,8 +69,8 @@ def create(
----------
model_name : str
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
text : typing.Optional[str]
The text for which you wish to create an embedding.
@@ -214,8 +214,8 @@ async def create(
----------
model_name : str
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
text : typing.Optional[str]
The text for which you wish to create an embedding.
diff --git a/src/twelvelabs/embed/tasks/client.py b/src/twelvelabs/embed/tasks/client.py
index abefc7d..4224c37 100644
--- a/src/twelvelabs/embed/tasks/client.py
+++ b/src/twelvelabs/embed/tasks/client.py
@@ -157,8 +157,8 @@ def create(
----------
model_name : str
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
video_file : typing.Optional[core.File]
See core.File for more documentation
@@ -478,8 +478,8 @@ async def create(
----------
model_name : str
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
video_file : typing.Optional[core.File]
See core.File for more documentation
diff --git a/src/twelvelabs/embed/tasks/raw_client.py b/src/twelvelabs/embed/tasks/raw_client.py
index 3e18801..6788ccd 100644
--- a/src/twelvelabs/embed/tasks/raw_client.py
+++ b/src/twelvelabs/embed/tasks/raw_client.py
@@ -175,8 +175,8 @@ def create(
----------
model_name : str
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
video_file : typing.Optional[core.File]
See core.File for more documentation
@@ -560,8 +560,8 @@ async def create(
----------
model_name : str
The name of the model you want to use. The following models are available:
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `Marengo-retrieval-2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
video_file : typing.Optional[core.File]
See core.File for more documentation
diff --git a/src/twelvelabs/embed/v_2/client.py b/src/twelvelabs/embed/v_2/client.py
index a6d8cb8..b94245a 100644
--- a/src/twelvelabs/embed/v_2/client.py
+++ b/src/twelvelabs/embed/v_2/client.py
@@ -7,6 +7,7 @@
from ...types.audio_input_request import AudioInputRequest
from ...types.embedding_success_response import EmbeddingSuccessResponse
from ...types.image_input_request import ImageInputRequest
+from ...types.multi_input_request import MultiInputRequest
from ...types.text_image_input_request import TextImageInputRequest
from ...types.text_input_request import TextInputRequest
from ...types.video_input_request import VideoInputRequest
@@ -45,6 +46,7 @@ def create(
text_image: typing.Optional[TextImageInputRequest] = OMIT,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
+ multi_input: typing.Optional[MultiInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EmbeddingSuccessResponse:
"""
@@ -56,7 +58,7 @@ def create(
**When to use this endpoint**:
- Create embeddings for text, images, audio, or video content
- - Get immediate results without waiting for background processing
+ - Retrieve immediate results without waiting for background processing
- Process audio or video content up to 10 minutes in duration
**Do not use this endpoint for**:
@@ -95,7 +97,8 @@ def create(
- `video`: Creates embeddings for a video file
- `image`: Creates embeddings for an image file
- `text`: Creates embeddings for text input
- - `text_image`: Creates embeddings for text and an image.
+ - `text_image`: Creates embeddings for text and an image
+ - `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source
model_name : CreateEmbeddingsRequestModelName
The video understanding model to use. Only "marengo3.0" is supported.
@@ -110,6 +113,8 @@ def create(
video : typing.Optional[VideoInputRequest]
+ multi_input : typing.Optional[MultiInputRequest]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -141,6 +146,7 @@ def create(
text_image=text_image,
audio=audio,
video=video,
+ multi_input=multi_input,
request_options=request_options,
)
return _response.data
@@ -172,6 +178,7 @@ async def create(
text_image: typing.Optional[TextImageInputRequest] = OMIT,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
+ multi_input: typing.Optional[MultiInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EmbeddingSuccessResponse:
"""
@@ -183,7 +190,7 @@ async def create(
**When to use this endpoint**:
- Create embeddings for text, images, audio, or video content
- - Get immediate results without waiting for background processing
+ - Retrieve immediate results without waiting for background processing
- Process audio or video content up to 10 minutes in duration
**Do not use this endpoint for**:
@@ -222,7 +229,8 @@ async def create(
- `video`: Creates embeddings for a video file
- `image`: Creates embeddings for an image file
- `text`: Creates embeddings for text input
- - `text_image`: Creates embeddings for text and an image.
+ - `text_image`: Creates embeddings for text and an image
+ - `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source
model_name : CreateEmbeddingsRequestModelName
The video understanding model to use. Only "marengo3.0" is supported.
@@ -237,6 +245,8 @@ async def create(
video : typing.Optional[VideoInputRequest]
+ multi_input : typing.Optional[MultiInputRequest]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -276,6 +286,7 @@ async def main() -> None:
text_image=text_image,
audio=audio,
video=video,
+ multi_input=multi_input,
request_options=request_options,
)
return _response.data
diff --git a/src/twelvelabs/embed/v_2/raw_client.py b/src/twelvelabs/embed/v_2/raw_client.py
index 2772749..d19ad6c 100644
--- a/src/twelvelabs/embed/v_2/raw_client.py
+++ b/src/twelvelabs/embed/v_2/raw_client.py
@@ -15,6 +15,7 @@
from ...types.audio_input_request import AudioInputRequest
from ...types.embedding_success_response import EmbeddingSuccessResponse
from ...types.image_input_request import ImageInputRequest
+from ...types.multi_input_request import MultiInputRequest
from ...types.text_image_input_request import TextImageInputRequest
from ...types.text_input_request import TextInputRequest
from ...types.video_input_request import VideoInputRequest
@@ -39,6 +40,7 @@ def create(
text_image: typing.Optional[TextImageInputRequest] = OMIT,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
+ multi_input: typing.Optional[MultiInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[EmbeddingSuccessResponse]:
"""
@@ -50,7 +52,7 @@ def create(
**When to use this endpoint**:
- Create embeddings for text, images, audio, or video content
- - Get immediate results without waiting for background processing
+ - Retrieve immediate results without waiting for background processing
- Process audio or video content up to 10 minutes in duration
**Do not use this endpoint for**:
@@ -89,7 +91,8 @@ def create(
- `video`: Creates embeddings for a video file
- `image`: Creates embeddings for an image file
- `text`: Creates embeddings for text input
- - `text_image`: Creates embeddings for text and an image.
+ - `text_image`: Creates embeddings for text and an image
+ - `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source
model_name : CreateEmbeddingsRequestModelName
The video understanding model to use. Only "marengo3.0" is supported.
@@ -104,6 +107,8 @@ def create(
video : typing.Optional[VideoInputRequest]
+ multi_input : typing.Optional[MultiInputRequest]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -133,6 +138,9 @@ def create(
"video": convert_and_respect_annotation_metadata(
object_=video, annotation=VideoInputRequest, direction="write"
),
+ "multi_input": convert_and_respect_annotation_metadata(
+ object_=multi_input, annotation=MultiInputRequest, direction="write"
+ ),
},
headers={
"content-type": "application/json",
@@ -203,6 +211,7 @@ async def create(
text_image: typing.Optional[TextImageInputRequest] = OMIT,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
+ multi_input: typing.Optional[MultiInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[EmbeddingSuccessResponse]:
"""
@@ -214,7 +223,7 @@ async def create(
**When to use this endpoint**:
- Create embeddings for text, images, audio, or video content
- - Get immediate results without waiting for background processing
+ - Retrieve immediate results without waiting for background processing
- Process audio or video content up to 10 minutes in duration
**Do not use this endpoint for**:
@@ -253,7 +262,8 @@ async def create(
- `video`: Creates embeddings for a video file
- `image`: Creates embeddings for an image file
- `text`: Creates embeddings for text input
- - `text_image`: Creates embeddings for text and an image.
+ - `text_image`: Creates embeddings for text and an image
+ - `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source
model_name : CreateEmbeddingsRequestModelName
The video understanding model to use. Only "marengo3.0" is supported.
@@ -268,6 +278,8 @@ async def create(
video : typing.Optional[VideoInputRequest]
+ multi_input : typing.Optional[MultiInputRequest]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -297,6 +309,9 @@ async def create(
"video": convert_and_respect_annotation_metadata(
object_=video, annotation=VideoInputRequest, direction="write"
),
+ "multi_input": convert_and_respect_annotation_metadata(
+ object_=multi_input, annotation=MultiInputRequest, direction="write"
+ ),
},
headers={
"content-type": "application/json",
diff --git a/src/twelvelabs/embed/v_2/types/create_embeddings_request_input_type.py b/src/twelvelabs/embed/v_2/types/create_embeddings_request_input_type.py
index 8bc8f95..69f7fe8 100644
--- a/src/twelvelabs/embed/v_2/types/create_embeddings_request_input_type.py
+++ b/src/twelvelabs/embed/v_2/types/create_embeddings_request_input_type.py
@@ -3,5 +3,5 @@
import typing
CreateEmbeddingsRequestInputType = typing.Union[
- typing.Literal["text", "image", "text_image", "audio", "video"], typing.Any
+ typing.Literal["text", "image", "text_image", "audio", "video", "multi_input"], typing.Any
]
diff --git a/src/twelvelabs/indexes/types/indexes_create_request_models_item.py b/src/twelvelabs/indexes/types/indexes_create_request_models_item.py
index ebb3f3b..3e828c9 100644
--- a/src/twelvelabs/indexes/types/indexes_create_request_models_item.py
+++ b/src/twelvelabs/indexes/types/indexes_create_request_models_item.py
@@ -13,8 +13,8 @@ class IndexesCreateRequestModelsItem(UniversalBaseModel):
- **Embedding**: These models are proficient at performing tasks such as search and classification, enabling enhanced video understanding.
- - `marengo3.0`: Enhanced model with sports intelligence and extended content support. For a list of the new features, see the [New in Marengo 3.0](/v1.3/docs/concepts/models/marengo#new-in-marengo-30) section.
- - `marengo2.7`: Video embedding model for multimodal search.
+ - `marengo3.0`: Enhanced model with sports intelligence and extended content support.
+ - `marengo2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details.
- **Generative**: These models generate text based on your videos.
diff --git a/src/twelvelabs/raw_base_client.py b/src/twelvelabs/raw_base_client.py
index fd55260..46fce27 100644
--- a/src/twelvelabs/raw_base_client.py
+++ b/src/twelvelabs/raw_base_client.py
@@ -13,12 +13,9 @@
from .core.serialization import convert_and_respect_annotation_metadata
from .errors.bad_request_error import BadRequestError
from .errors.too_many_requests_error import TooManyRequestsError
-from .types.gist import Gist
-from .types.gist_request_types_item import GistRequestTypesItem
from .types.non_stream_analyze_response import NonStreamAnalyzeResponse
from .types.response_format import ResponseFormat
from .types.stream_analyze_response import StreamAnalyzeResponse
-from .types.summarize_response import SummarizeResponse
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -28,212 +25,6 @@ class RawBaseClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def summarize(
- self,
- *,
- video_id: str,
- type: str,
- prompt: typing.Optional[str] = OMIT,
- temperature: typing.Optional[float] = OMIT,
- response_format: typing.Optional[ResponseFormat] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[SummarizeResponse]:
- """
-
-
- This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
- This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video that you want to summarize.
-
- type : str
- Specifies the type of summary. Use one of the following values:
- - `summary`: A brief that encapsulates the key points of a video, presenting the most important information clearly and concisely.
- - `chapter`: A chronological list of all the chapters in a video, providing a granular breakdown of its content. For each chapter, the platform returns its starting and end times, measured in seconds from the beginning of the video clip, a descriptive headline that offers a brief of the events or activities within that part of the video, and an accompanying summary that elaborates on the headline.
- - `highlight`: A chronologically ordered list of the most important events within a video. Unlike chapters, highlights only capture the key moments, providing a snapshot of the video's main topics. For each highlight, the platform returns its starting and end times, measured in seconds from the beginning of the video, a title, and a brief description that captures the essence of this part of the video.
-
- prompt : typing.Optional[str]
- Use this field to provide context for the summarization task, such as the target audience, style, tone of voice, and purpose.
-
-
- - Your prompts can be instructive or descriptive, or you can also phrase them as questions.
- - The maximum length of a prompt is 2,000 tokens.
-
-
- **Example**: Generate a summary of this video for a social media post, up to two sentences.
-
- temperature : typing.Optional[float]
- Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
- **Default:** 0.2
- **Min:** 0
- **Max:** 1
-
- response_format : typing.Optional[ResponseFormat]
- Use this parameter to specify the format of the response.
- This parameter is only valid when the `type` parameter is set to `summary`.
- If you omit this parameter, the platform returns unstructured text.
-
- max_tokens : typing.Optional[int]
- The maximum number of tokens to generate.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- HttpResponse[SummarizeResponse]
- The specified video has successfully been summarized.
- """
- _response = self._client_wrapper.httpx_client.request(
- "summarize",
- method="POST",
- json={
- "video_id": video_id,
- "type": type,
- "prompt": prompt,
- "temperature": temperature,
- "response_format": convert_and_respect_annotation_metadata(
- object_=response_format, annotation=ResponseFormat, direction="write"
- ),
- "max_tokens": max_tokens,
- },
- headers={
- "content-type": "application/json",
- },
- request_options=request_options,
- omit=OMIT,
- )
- try:
- if 200 <= _response.status_code < 300:
- _data = typing.cast(
- SummarizeResponse,
- parse_obj_as(
- type_=SummarizeResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- return HttpResponse(response=_response, data=_data)
- if _response.status_code == 400:
- raise BadRequestError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
-
- def gist(
- self,
- *,
- video_id: str,
- types: typing.Sequence[GistRequestTypesItem],
- request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[Gist]:
- """
-
- This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
- This method analyzes videos and generates titles, topics, and hashtags.
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video that you want to generate a gist for.
-
- types : typing.Sequence[GistRequestTypesItem]
- Specifies the type of gist. Use one of the following values:
- - `title`: A title succinctly captures a video's main theme, such as "From Consumerism to Minimalism: A Journey Toward Sustainable Living," guiding viewers to its content and themes.
- - `topic`: A topic is the central theme of a video, such as "Shopping Vlog Lifestyle", summarizing its content for efficient categorization and reference.
- - `hashtag`: A hashtag, like "#BlackFriday", represents key themes in a video, enhancing its discoverability and categorization on social media platforms.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- HttpResponse[Gist]
- The gist of the specified video has successfully been generated.
- """
- _response = self._client_wrapper.httpx_client.request(
- "gist",
- method="POST",
- json={
- "video_id": video_id,
- "types": types,
- },
- headers={
- "content-type": "application/json",
- },
- request_options=request_options,
- omit=OMIT,
- )
- try:
- if 200 <= _response.status_code < 300:
- _data = typing.cast(
- Gist,
- parse_obj_as(
- type_=Gist, # type: ignore
- object_=_response.json(),
- ),
- )
- return HttpResponse(response=_response, data=_data)
- if _response.status_code == 400:
- raise BadRequestError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
-
@contextlib.contextmanager
def analyze_stream(
self,
@@ -250,7 +41,7 @@ def analyze_stream(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+ - This endpoint supports streaming responses.
Parameters
@@ -380,7 +171,7 @@ def analyze(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+ - This endpoint supports streaming responses.
Parameters
@@ -483,212 +274,6 @@ class AsyncRawBaseClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def summarize(
- self,
- *,
- video_id: str,
- type: str,
- prompt: typing.Optional[str] = OMIT,
- temperature: typing.Optional[float] = OMIT,
- response_format: typing.Optional[ResponseFormat] = OMIT,
- max_tokens: typing.Optional[int] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[SummarizeResponse]:
- """
-
-
- This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
- This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video that you want to summarize.
-
- type : str
- Specifies the type of summary. Use one of the following values:
- - `summary`: A brief that encapsulates the key points of a video, presenting the most important information clearly and concisely.
- - `chapter`: A chronological list of all the chapters in a video, providing a granular breakdown of its content. For each chapter, the platform returns its starting and end times, measured in seconds from the beginning of the video clip, a descriptive headline that offers a brief of the events or activities within that part of the video, and an accompanying summary that elaborates on the headline.
- - `highlight`: A chronologically ordered list of the most important events within a video. Unlike chapters, highlights only capture the key moments, providing a snapshot of the video's main topics. For each highlight, the platform returns its starting and end times, measured in seconds from the beginning of the video, a title, and a brief description that captures the essence of this part of the video.
-
- prompt : typing.Optional[str]
- Use this field to provide context for the summarization task, such as the target audience, style, tone of voice, and purpose.
-
-
- - Your prompts can be instructive or descriptive, or you can also phrase them as questions.
- - The maximum length of a prompt is 2,000 tokens.
-
-
- **Example**: Generate a summary of this video for a social media post, up to two sentences.
-
- temperature : typing.Optional[float]
- Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
- **Default:** 0.2
- **Min:** 0
- **Max:** 1
-
- response_format : typing.Optional[ResponseFormat]
- Use this parameter to specify the format of the response.
- This parameter is only valid when the `type` parameter is set to `summary`.
- If you omit this parameter, the platform returns unstructured text.
-
- max_tokens : typing.Optional[int]
- The maximum number of tokens to generate.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- AsyncHttpResponse[SummarizeResponse]
- The specified video has successfully been summarized.
- """
- _response = await self._client_wrapper.httpx_client.request(
- "summarize",
- method="POST",
- json={
- "video_id": video_id,
- "type": type,
- "prompt": prompt,
- "temperature": temperature,
- "response_format": convert_and_respect_annotation_metadata(
- object_=response_format, annotation=ResponseFormat, direction="write"
- ),
- "max_tokens": max_tokens,
- },
- headers={
- "content-type": "application/json",
- },
- request_options=request_options,
- omit=OMIT,
- )
- try:
- if 200 <= _response.status_code < 300:
- _data = typing.cast(
- SummarizeResponse,
- parse_obj_as(
- type_=SummarizeResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- return AsyncHttpResponse(response=_response, data=_data)
- if _response.status_code == 400:
- raise BadRequestError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
-
- async def gist(
- self,
- *,
- video_id: str,
- types: typing.Sequence[GistRequestTypesItem],
- request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[Gist]:
- """
-
- This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
-
-
- This method analyzes videos and generates titles, topics, and hashtags.
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video that you want to generate a gist for.
-
- types : typing.Sequence[GistRequestTypesItem]
- Specifies the type of gist. Use one of the following values:
- - `title`: A title succinctly captures a video's main theme, such as "From Consumerism to Minimalism: A Journey Toward Sustainable Living," guiding viewers to its content and themes.
- - `topic`: A topic is the central theme of a video, such as "Shopping Vlog Lifestyle", summarizing its content for efficient categorization and reference.
- - `hashtag`: A hashtag, like "#BlackFriday", represents key themes in a video, enhancing its discoverability and categorization on social media platforms.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- AsyncHttpResponse[Gist]
- The gist of the specified video has successfully been generated.
- """
- _response = await self._client_wrapper.httpx_client.request(
- "gist",
- method="POST",
- json={
- "video_id": video_id,
- "types": types,
- },
- headers={
- "content-type": "application/json",
- },
- request_options=request_options,
- omit=OMIT,
- )
- try:
- if 200 <= _response.status_code < 300:
- _data = typing.cast(
- Gist,
- parse_obj_as(
- type_=Gist, # type: ignore
- object_=_response.json(),
- ),
- )
- return AsyncHttpResponse(response=_response, data=_data)
- if _response.status_code == 400:
- raise BadRequestError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
-
@contextlib.asynccontextmanager
async def analyze_stream(
self,
@@ -705,7 +290,7 @@ async def analyze_stream(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+ - This endpoint supports streaming responses.
Parameters
@@ -835,7 +420,7 @@ async def analyze(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
+ - This endpoint supports streaming responses.
Parameters
diff --git a/src/twelvelabs/search/client.py b/src/twelvelabs/search/client.py
index 270a4a3..5d444a3 100644
--- a/src/twelvelabs/search/client.py
+++ b/src/twelvelabs/search/client.py
@@ -63,20 +63,17 @@ def create(
**Media queries**:
- Set the `query_media_type` parameter to the corresponding media type (example: `image`).
- - Specify either one of the following parameters:
+ - Provide up to 10 images by specifying the following parameters multiple times:
- `query_media_url`: Publicly accessible URL of your media file.
- `query_media_file`: Local media file.
- If both `query_media_url` and `query_media_file` are specified in the same request, `query_media_url` takes precedence.
+ - Marengo 2.7 supports a single image per request.
**Composed text and media queries** (Marengo 3.0 only):
- Use the `query_text` parameter for your text query.
- Set `query_media_type` to `image`.
- - Specify the image using either the `query_media_url` or the `query_media_file` parameter.
-
- Example: Provide an image of a car and include "red color" in your query to find red instances of that car model.
+ - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times.
**Entity search** (Marengo 3.0 only and in beta):
-
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
@@ -112,7 +109,13 @@ def create(
The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
query_media_url : typing.Optional[str]
- The publicly accessible URL of the media file you wish to use. This parameter is required for media queries if `query_media_file` is not provided.
+ The publicly accessible URL of a media file to use as a query. This parameter is required for media queries if `query_media_file` is not provided.
+
+ You can provide up to 10 images by specifying this parameter multiple times (Marengo 3.0 only):
+ ```
+ --form query_media_url=https://example.com/image1.jpg \
+ --form query_media_url=https://example.com/image2.jpg
+ ```
query_media_file : typing.Optional[core.File]
See core.File for more documentation
@@ -352,20 +355,17 @@ async def create(
**Media queries**:
- Set the `query_media_type` parameter to the corresponding media type (example: `image`).
- - Specify either one of the following parameters:
+ - Provide up to 10 images by specifying the following parameters multiple times:
- `query_media_url`: Publicly accessible URL of your media file.
- `query_media_file`: Local media file.
- If both `query_media_url` and `query_media_file` are specified in the same request, `query_media_url` takes precedence.
+ - Marengo 2.7 supports a single image per request.
**Composed text and media queries** (Marengo 3.0 only):
- Use the `query_text` parameter for your text query.
- Set `query_media_type` to `image`.
- - Specify the image using either the `query_media_url` or the `query_media_file` parameter.
-
- Example: Provide an image of a car and include "red color" in your query to find red instances of that car model.
+ - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times.
**Entity search** (Marengo 3.0 only and in beta):
-
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
@@ -401,7 +401,13 @@ async def create(
The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
query_media_url : typing.Optional[str]
- The publicly accessible URL of the media file you wish to use. This parameter is required for media queries if `query_media_file` is not provided.
+ The publicly accessible URL of a media file to use as a query. This parameter is required for media queries if `query_media_file` is not provided.
+
+ You can provide up to 10 images by specifying this parameter multiple times (Marengo 3.0 only):
+ ```
+ --form query_media_url=https://example.com/image1.jpg \
+ --form query_media_url=https://example.com/image2.jpg
+ ```
query_media_file : typing.Optional[core.File]
See core.File for more documentation
diff --git a/src/twelvelabs/search/raw_client.py b/src/twelvelabs/search/raw_client.py
index 85870ff..4eda3d5 100644
--- a/src/twelvelabs/search/raw_client.py
+++ b/src/twelvelabs/search/raw_client.py
@@ -58,20 +58,17 @@ def create(
**Media queries**:
- Set the `query_media_type` parameter to the corresponding media type (example: `image`).
- - Specify either one of the following parameters:
+ - Provide up to 10 images by specifying the following parameters multiple times:
- `query_media_url`: Publicly accessible URL of your media file.
- `query_media_file`: Local media file.
- If both `query_media_url` and `query_media_file` are specified in the same request, `query_media_url` takes precedence.
+ - Marengo 2.7 supports a single image per request.
**Composed text and media queries** (Marengo 3.0 only):
- Use the `query_text` parameter for your text query.
- Set `query_media_type` to `image`.
- - Specify the image using either the `query_media_url` or the `query_media_file` parameter.
-
- Example: Provide an image of a car and include "red color" in your query to find red instances of that car model.
+ - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times.
**Entity search** (Marengo 3.0 only and in beta):
-
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
@@ -107,7 +104,13 @@ def create(
The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
query_media_url : typing.Optional[str]
- The publicly accessible URL of the media file you wish to use. This parameter is required for media queries if `query_media_file` is not provided.
+ The publicly accessible URL of a media file to use as a query. This parameter is required for media queries if `query_media_file` is not provided.
+
+ You can provide up to 10 images by specifying this parameter multiple times (Marengo 3.0 only):
+ ```
+ --form query_media_url=https://example.com/image1.jpg \
+ --form query_media_url=https://example.com/image2.jpg
+ ```
query_media_file : typing.Optional[core.File]
See core.File for more documentation
@@ -384,20 +387,17 @@ async def create(
**Media queries**:
- Set the `query_media_type` parameter to the corresponding media type (example: `image`).
- - Specify either one of the following parameters:
+ - Provide up to 10 images by specifying the following parameters multiple times:
- `query_media_url`: Publicly accessible URL of your media file.
- `query_media_file`: Local media file.
- If both `query_media_url` and `query_media_file` are specified in the same request, `query_media_url` takes precedence.
+ - Marengo 2.7 supports a single image per request.
**Composed text and media queries** (Marengo 3.0 only):
- Use the `query_text` parameter for your text query.
- Set `query_media_type` to `image`.
- - Specify the image using either the `query_media_url` or the `query_media_file` parameter.
-
- Example: Provide an image of a car and include "red color" in your query to find red instances of that car model.
+ - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times.
**Entity search** (Marengo 3.0 only and in beta):
-
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
@@ -433,7 +433,13 @@ async def create(
The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
query_media_url : typing.Optional[str]
- The publicly accessible URL of the media file you wish to use. This parameter is required for media queries if `query_media_file` is not provided.
+ The publicly accessible URL of a media file to use as a query. This parameter is required for media queries if `query_media_file` is not provided.
+
+ You can provide up to 10 images by specifying this parameter multiple times (Marengo 3.0 only):
+ ```
+ --form query_media_url=https://example.com/image1.jpg \
+ --form query_media_url=https://example.com/image2.jpg
+ ```
query_media_file : typing.Optional[core.File]
See core.File for more documentation
diff --git a/src/twelvelabs/types/__init__.py b/src/twelvelabs/types/__init__.py
index 1880b3e..5fedbba 100644
--- a/src/twelvelabs/types/__init__.py
+++ b/src/twelvelabs/types/__init__.py
@@ -10,6 +10,7 @@
from .audio_input_request import AudioInputRequest
from .audio_input_request_embedding_option_item import AudioInputRequestEmbeddingOptionItem
from .audio_input_request_embedding_scope_item import AudioInputRequestEmbeddingScopeItem
+from .audio_input_request_embedding_type_item import AudioInputRequestEmbeddingTypeItem
from .audio_segment import AudioSegment
from .audio_segmentation import AudioSegmentation
from .audio_segmentation_fixed import AudioSegmentationFixed
@@ -37,9 +38,11 @@
EmbeddingMediaMetadata,
EmbeddingMediaMetadata_Audio,
EmbeddingMediaMetadata_Image,
+ EmbeddingMediaMetadata_MultiInput,
EmbeddingMediaMetadata_TextImage,
EmbeddingMediaMetadata_Video,
)
+from .embedding_multi_input_metadata import EmbeddingMultiInputMetadata
from .embedding_response import EmbeddingResponse
from .embedding_success_response import EmbeddingSuccessResponse
from .embedding_task_media_metadata import (
@@ -63,8 +66,6 @@
from .finish_reason import FinishReason
from .forbidden_error_body import ForbiddenErrorBody
from .get_upload_status_response import GetUploadStatusResponse
-from .gist import Gist
-from .gist_request_types_item import GistRequestTypesItem
from .hls_object import HlsObject
from .hls_object_status import HlsObjectStatus
from .image_embedding_result import ImageEmbeddingResult
@@ -89,6 +90,9 @@
from .media_embedding_task_audio_embedding import MediaEmbeddingTaskAudioEmbedding
from .media_embedding_task_video_embedding import MediaEmbeddingTaskVideoEmbedding
from .media_source import MediaSource
+from .multi_input_media_source import MultiInputMediaSource
+from .multi_input_media_source_media_type import MultiInputMediaSourceMediaType
+from .multi_input_request import MultiInputRequest
from .multipart_upload_status_type import MultipartUploadStatusType
from .next_page_token import NextPageToken
from .non_stream_analyze_response import NonStreamAnalyzeResponse
@@ -123,15 +127,12 @@
from .stream_text_response import StreamTextResponse
from .summarize_chapter_result import SummarizeChapterResult
from .summarize_chapter_result_chapters_item import SummarizeChapterResultChaptersItem
+from .summarize_chapter_result_summarize_type import SummarizeChapterResultSummarizeType
from .summarize_highlight_result import SummarizeHighlightResult
from .summarize_highlight_result_highlights_item import SummarizeHighlightResultHighlightsItem
-from .summarize_response import (
- SummarizeResponse,
- SummarizeResponse_Chapter,
- SummarizeResponse_Highlight,
- SummarizeResponse_Summary,
-)
+from .summarize_highlight_result_summarize_type import SummarizeHighlightResultSummarizeType
from .summarize_summary_result import SummarizeSummaryResult
+from .summarize_summary_result_summarize_type import SummarizeSummaryResultSummarizeType
from .text_embedding_result import TextEmbeddingResult
from .text_image_input_request import TextImageInputRequest
from .text_input_request import TextInputRequest
@@ -153,6 +154,7 @@
from .video_input_request import VideoInputRequest
from .video_input_request_embedding_option_item import VideoInputRequestEmbeddingOptionItem
from .video_input_request_embedding_scope_item import VideoInputRequestEmbeddingScopeItem
+from .video_input_request_embedding_type_item import VideoInputRequestEmbeddingTypeItem
from .video_item import VideoItem
from .video_item_failed import VideoItemFailed
from .video_segment import VideoSegment
@@ -173,6 +175,7 @@
"AudioInputRequest",
"AudioInputRequestEmbeddingOptionItem",
"AudioInputRequestEmbeddingScopeItem",
+ "AudioInputRequestEmbeddingTypeItem",
"AudioSegment",
"AudioSegmentation",
"AudioSegmentationFixed",
@@ -199,8 +202,10 @@
"EmbeddingMediaMetadata",
"EmbeddingMediaMetadata_Audio",
"EmbeddingMediaMetadata_Image",
+ "EmbeddingMediaMetadata_MultiInput",
"EmbeddingMediaMetadata_TextImage",
"EmbeddingMediaMetadata_Video",
+ "EmbeddingMultiInputMetadata",
"EmbeddingResponse",
"EmbeddingSuccessResponse",
"EmbeddingTaskMediaMetadata",
@@ -222,8 +227,6 @@
"FinishReason",
"ForbiddenErrorBody",
"GetUploadStatusResponse",
- "Gist",
- "GistRequestTypesItem",
"HlsObject",
"HlsObjectStatus",
"ImageEmbeddingResult",
@@ -248,6 +251,9 @@
"MediaEmbeddingTaskAudioEmbedding",
"MediaEmbeddingTaskVideoEmbedding",
"MediaSource",
+ "MultiInputMediaSource",
+ "MultiInputMediaSourceMediaType",
+ "MultiInputRequest",
"MultipartUploadStatusType",
"NextPageToken",
"NonStreamAnalyzeResponse",
@@ -280,13 +286,12 @@
"StreamTextResponse",
"SummarizeChapterResult",
"SummarizeChapterResultChaptersItem",
+ "SummarizeChapterResultSummarizeType",
"SummarizeHighlightResult",
"SummarizeHighlightResultHighlightsItem",
- "SummarizeResponse",
- "SummarizeResponse_Chapter",
- "SummarizeResponse_Highlight",
- "SummarizeResponse_Summary",
+ "SummarizeHighlightResultSummarizeType",
"SummarizeSummaryResult",
+ "SummarizeSummaryResultSummarizeType",
"TextEmbeddingResult",
"TextImageInputRequest",
"TextInputRequest",
@@ -308,6 +313,7 @@
"VideoInputRequest",
"VideoInputRequestEmbeddingOptionItem",
"VideoInputRequestEmbeddingScopeItem",
+ "VideoInputRequestEmbeddingTypeItem",
"VideoItem",
"VideoItemFailed",
"VideoSegment",
diff --git a/src/twelvelabs/types/audio_input_request.py b/src/twelvelabs/types/audio_input_request.py
index e61a44e..4110be3 100644
--- a/src/twelvelabs/types/audio_input_request.py
+++ b/src/twelvelabs/types/audio_input_request.py
@@ -6,13 +6,14 @@
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
from .audio_input_request_embedding_option_item import AudioInputRequestEmbeddingOptionItem
from .audio_input_request_embedding_scope_item import AudioInputRequestEmbeddingScopeItem
+from .audio_input_request_embedding_type_item import AudioInputRequestEmbeddingTypeItem
from .audio_segmentation import AudioSegmentation
from .media_source import MediaSource
class AudioInputRequest(UniversalBaseModel):
"""
- This field is required if `input_type` is `audio`.
+ This field is required if the `input_type` parameter is `audio`.
"""
media_source: MediaSource
@@ -57,6 +58,19 @@ class AudioInputRequest(UniversalBaseModel):
You can specify multiple scopes to generate embeddings at different levels.
"""
+ embedding_type: typing.Optional[typing.List[AudioInputRequestEmbeddingTypeItem]] = pydantic.Field(default=None)
+ """
+ Specifies how to structure the embedding. Include this parameter only when the `embedding_option` parameter contains at least two values.
+
+ **Values**:
+ - `separate_embedding`: Returns separate embeddings for each modality specified in the `embedding_option` parameter.
+ - `fused_embedding`: Returns a single combined embedding that integrates all modalities into one vector.
+
+ Specify both values to receive separate and fused embeddings in the same response.
+
+ **Default**: `separate_embedding`.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/twelvelabs/types/audio_input_request_embedding_type_item.py b/src/twelvelabs/types/audio_input_request_embedding_type_item.py
new file mode 100644
index 0000000..efa1908
--- /dev/null
+++ b/src/twelvelabs/types/audio_input_request_embedding_type_item.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AudioInputRequestEmbeddingTypeItem = typing.Union[typing.Literal["separate_embedding", "fused_embedding"], typing.Any]
diff --git a/src/twelvelabs/types/chunk_info.py b/src/twelvelabs/types/chunk_info.py
index 7f04f8a..ff7ab64 100644
--- a/src/twelvelabs/types/chunk_info.py
+++ b/src/twelvelabs/types/chunk_info.py
@@ -12,7 +12,7 @@
class ChunkInfo(UniversalBaseModel):
index: typing.Optional[int] = pydantic.Field(default=None)
"""
- The index of the chunk. The platform uses 1-based indexing, and this value matches the value of the [`chunk_index`](/v1.3/api-reference/multipart-uploads/create#response.body.upload_urls.chunk_index) field in the list of upload URLs.
+ The index of the chunk. The platform uses 1-based indexing, and this value matches the value of the [`chunk_index`](/v1.3/api-reference/upload-content/multipart-uploads/create#response.body.upload_urls.chunk_index) field in the list of upload URLs.
"""
status: typing.Optional[ChunkInfoStatus] = pydantic.Field(default=None)
diff --git a/src/twelvelabs/types/completed_chunk.py b/src/twelvelabs/types/completed_chunk.py
index 2a00459..db34c06 100644
--- a/src/twelvelabs/types/completed_chunk.py
+++ b/src/twelvelabs/types/completed_chunk.py
@@ -25,7 +25,7 @@ class CompletedChunk(UniversalBaseModel):
chunk_size: int = pydantic.Field()
"""
- The number of bytes uploaded for this chunk. For all chunks except the last, this value equals [`chunk_size`](/v1.3/api-reference/multipart-uploads/create#response.body.chunk_size). For the last chunk, it may be smaller.
+ The number of bytes uploaded for this chunk. For all chunks except the last, this value equals [`chunk_size`](/v1.3/api-reference/upload-content/multipart-uploads/create#response.body.chunk_size). For the last chunk, it may be smaller.
"""
if IS_PYDANTIC_V2:
diff --git a/src/twelvelabs/types/create_asset_upload_response.py b/src/twelvelabs/types/create_asset_upload_response.py
index 43bbc80..8b57679 100644
--- a/src/twelvelabs/types/create_asset_upload_response.py
+++ b/src/twelvelabs/types/create_asset_upload_response.py
@@ -30,7 +30,7 @@ class CreateAssetUploadResponse(UniversalBaseModel):
Note the following about the presigned URLs:
- URLs expire after one hour.
- - Depending on the size of the file, the initial set may not include URLs for all chunks. If you need more URLs, you can request additional ones using the [`POST`](/v1.3/api-reference/multipart-uploads/get-additional-presigned-urls) method of the `/assets/multipart-uploads/{upload_id}/presigned-urls` endpoint.
+ - Depending on the size of the file, the initial set may not include URLs for all chunks. If you need more URLs, you can request additional ones using the [`POST`](/v1.3/api-reference/upload-content/multipart-uploads/get-additional-presigned-urls) method of the `/assets/multipart-uploads/{upload_id}/presigned-urls` endpoint.
"""
upload_headers: typing.Optional[typing.Dict[str, str]] = None
diff --git a/src/twelvelabs/types/embedding_data.py b/src/twelvelabs/types/embedding_data.py
index 67df0b7..6de355c 100644
--- a/src/twelvelabs/types/embedding_data.py
+++ b/src/twelvelabs/types/embedding_data.py
@@ -20,13 +20,14 @@ class EmbeddingData(UniversalBaseModel):
embedding_option: typing.Optional[EmbeddingDataEmbeddingOption] = pydantic.Field(default=None)
"""
- The type of embedding generated.
+ The modality used to generate this embedding.
- **Values**:
- - `visual`: Embedding based on visual content (video only)
- - `audio`: Embedding based on audio content
- - `transcription`: Embedding based on transcribed speech
- - `null`: For text and image embeddings
+ **Values**:
+ - `visual`: Embedding based on visual content (video only)
+ - `audio`: Embedding based on audio content
+ - `transcription`: Embedding based on transcribed speech
+ - `fused`: Embedding based on a combination of the modalities specified in the request. The platform returns this embedding only for video and audio content, and only when the `embedding_type` parameter in the request includes `fused_embedding`.
+ - `null`: For text and image embeddings
"""
embedding_scope: typing.Optional[EmbeddingDataEmbeddingScope] = pydantic.Field(default=None)
diff --git a/src/twelvelabs/types/embedding_data_embedding_option.py b/src/twelvelabs/types/embedding_data_embedding_option.py
index 9be2c05..6df2344 100644
--- a/src/twelvelabs/types/embedding_data_embedding_option.py
+++ b/src/twelvelabs/types/embedding_data_embedding_option.py
@@ -2,4 +2,4 @@
import typing
-EmbeddingDataEmbeddingOption = typing.Union[typing.Literal["visual", "audio", "transcription"], typing.Any]
+EmbeddingDataEmbeddingOption = typing.Union[typing.Literal["visual", "audio", "transcription", "fused"], typing.Any]
diff --git a/src/twelvelabs/types/embedding_media_metadata.py b/src/twelvelabs/types/embedding_media_metadata.py
index 1fe9fb0..e947eb0 100644
--- a/src/twelvelabs/types/embedding_media_metadata.py
+++ b/src/twelvelabs/types/embedding_media_metadata.py
@@ -12,7 +12,7 @@
class EmbeddingMediaMetadata_Image(UniversalBaseModel):
"""
- Metadata for the media input. Available for image, text_image, audio, and video inputs.
+ Metadata for the media input. Available for image, text_image, audio, video, and multi_input inputs.
"""
input_type: typing.Literal["image"] = "image"
@@ -31,7 +31,7 @@ class Config:
class EmbeddingMediaMetadata_TextImage(UniversalBaseModel):
"""
- Metadata for the media input. Available for image, text_image, audio, and video inputs.
+ Metadata for the media input. Available for image, text_image, audio, video, and multi_input inputs.
"""
input_type: typing.Literal["text_image"] = "text_image"
@@ -50,7 +50,7 @@ class Config:
class EmbeddingMediaMetadata_Audio(UniversalBaseModel):
"""
- Metadata for the media input. Available for image, text_image, audio, and video inputs.
+ Metadata for the media input. Available for image, text_image, audio, video, and multi_input inputs.
"""
input_type: typing.Literal["audio"] = "audio"
@@ -74,7 +74,7 @@ class Config:
class EmbeddingMediaMetadata_Video(UniversalBaseModel):
"""
- Metadata for the media input. Available for image, text_image, audio, and video inputs.
+ Metadata for the media input. Available for image, text_image, audio, video, and multi_input inputs.
"""
input_type: typing.Literal["video"] = "video"
@@ -97,9 +97,27 @@ class Config:
extra = pydantic.Extra.allow
+class EmbeddingMediaMetadata_MultiInput(UniversalBaseModel):
+ """
+ Metadata for the media input. Available for image, text_image, audio, video, and multi_input inputs.
+ """
+
+ input_type: typing.Literal["multi_input"] = "multi_input"
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
EmbeddingMediaMetadata = typing.Union[
EmbeddingMediaMetadata_Image,
EmbeddingMediaMetadata_TextImage,
EmbeddingMediaMetadata_Audio,
EmbeddingMediaMetadata_Video,
+ EmbeddingMediaMetadata_MultiInput,
]
diff --git a/src/twelvelabs/types/embedding_multi_input_metadata.py b/src/twelvelabs/types/embedding_multi_input_metadata.py
new file mode 100644
index 0000000..e5e1612
--- /dev/null
+++ b/src/twelvelabs/types/embedding_multi_input_metadata.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class EmbeddingMultiInputMetadata(UniversalBaseModel):
+ """
+ Metadata for multi-input embeddings
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/twelvelabs/types/gist.py b/src/twelvelabs/types/gist.py
deleted file mode 100644
index 37821e6..0000000
--- a/src/twelvelabs/types/gist.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .token_usage import TokenUsage
-
-
-class Gist(UniversalBaseModel):
- """
- Generated title, topics, and hashtags for the specified video.
- """
-
- id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Unique identifier of the response.
- """
-
- title: typing.Optional[str] = pydantic.Field(default=None)
- """
- Suggested title for the video.
- """
-
- topics: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- An array of topics that are relevant to the video.
- """
-
- hashtags: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- An array of hashtags that are relevant to the video.
- """
-
- usage: typing.Optional[TokenUsage] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/twelvelabs/types/gist_request_types_item.py b/src/twelvelabs/types/gist_request_types_item.py
deleted file mode 100644
index cfeba84..0000000
--- a/src/twelvelabs/types/gist_request_types_item.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-GistRequestTypesItem = typing.Union[typing.Literal["title", "topic", "hashtag"], typing.Any]
diff --git a/src/twelvelabs/types/image_input_request.py b/src/twelvelabs/types/image_input_request.py
index adc2b09..8a90e32 100644
--- a/src/twelvelabs/types/image_input_request.py
+++ b/src/twelvelabs/types/image_input_request.py
@@ -9,7 +9,7 @@
class ImageInputRequest(UniversalBaseModel):
"""
- This field is required if `input_type` is `image`.
+ This field is required if the `input_type` parameter is `image`.
"""
media_source: MediaSource
diff --git a/src/twelvelabs/types/multi_input_media_source.py b/src/twelvelabs/types/multi_input_media_source.py
new file mode 100644
index 0000000..69432ad
--- /dev/null
+++ b/src/twelvelabs/types/multi_input_media_source.py
@@ -0,0 +1,55 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+import typing_extensions
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from ..core.serialization import FieldMetadata
+from .multi_input_media_source_media_type import MultiInputMediaSourceMediaType
+
+
+class MultiInputMediaSource(UniversalBaseModel):
+ """
+ An object specifying an image source for multi-input embeddings. You must provide exactly one of `url`, `base64_string`, or `asset_id`.
+ """
+
+ name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The unique identifier for this media source.
+
+ This field is required when `input_type` references this image.
+ """
+
+ media_type: MultiInputMediaSourceMediaType = pydantic.Field()
+ """
+ The type of media.
+
+ **Value**: `image`
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The publicly accessible URL of the image file Use direct links to raw image files. Image hosting platforms and cloud storage sharing links are not supported.
+ """
+
+ base_64_string: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="base64_string")] = (
+ pydantic.Field(default=None)
+ )
+ """
+ The base64-encoded image data.
+ """
+
+ asset_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The unique identifier of an asset from a [direct](/v1.3/api-reference/upload-content) or [multipart](/v1.3/api-reference/upload-content/multipart-uploads) upload.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/twelvelabs/types/multi_input_media_source_media_type.py b/src/twelvelabs/types/multi_input_media_source_media_type.py
new file mode 100644
index 0000000..1b35f4b
--- /dev/null
+++ b/src/twelvelabs/types/multi_input_media_source_media_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+MultiInputMediaSourceMediaType = typing.Union[typing.Literal["image"], typing.Any]
diff --git a/src/twelvelabs/types/multi_input_request.py b/src/twelvelabs/types/multi_input_request.py
new file mode 100644
index 0000000..5943275
--- /dev/null
+++ b/src/twelvelabs/types/multi_input_request.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .multi_input_media_source import MultiInputMediaSource
+
+
+class MultiInputRequest(UniversalBaseModel):
+ """
+ This field is required if the `input_type` parameter is `multi_input`.
+ """
+
+ input_text: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Text to combine with the images when generating the embedding.
+
+ **Usage options**:
+ - Omit this field to create an embedding from images only.
+ - Provide plain text to add context. Example: "A person cooking."
+ - Use image references to describe relationships between specific images. The format is `<@name>`, where `name` matches the `name` field of a media source. Example: "A person wearing <@outfit> and holding <@accessory>."
+ """
+
+ media_sources: typing.List[MultiInputMediaSource] = pydantic.Field()
+ """
+ An array of up to 10 images to include in the embedding. The platform processes images in the order they appear in the array. If you use image references in the [`input_text`](/v1.3/api-reference/create-embeddings-v2/create-embeddings#request.body.multi_input.input_text) parameter, each must have a corresponding image with a matching `name` field. If an image reference has no match, the request fails.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/twelvelabs/types/summarize_chapter_result.py b/src/twelvelabs/types/summarize_chapter_result.py
index 3443ac2..ff8a440 100644
--- a/src/twelvelabs/types/summarize_chapter_result.py
+++ b/src/twelvelabs/types/summarize_chapter_result.py
@@ -5,6 +5,7 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
from .summarize_chapter_result_chapters_item import SummarizeChapterResultChaptersItem
+from .summarize_chapter_result_summarize_type import SummarizeChapterResultSummarizeType
from .token_usage import TokenUsage
@@ -13,6 +14,11 @@ class SummarizeChapterResult(UniversalBaseModel):
An object that represents the response from a summarize chapter operation.
"""
+ summarize_type: SummarizeChapterResultSummarizeType = pydantic.Field()
+ """
+ The type of summarize response.
+ """
+
id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier of the response.
diff --git a/src/twelvelabs/types/summarize_chapter_result_summarize_type.py b/src/twelvelabs/types/summarize_chapter_result_summarize_type.py
new file mode 100644
index 0000000..f9be97a
--- /dev/null
+++ b/src/twelvelabs/types/summarize_chapter_result_summarize_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SummarizeChapterResultSummarizeType = typing.Union[typing.Literal["chapter"], typing.Any]
diff --git a/src/twelvelabs/types/summarize_highlight_result.py b/src/twelvelabs/types/summarize_highlight_result.py
index 6dfe28b..088d319 100644
--- a/src/twelvelabs/types/summarize_highlight_result.py
+++ b/src/twelvelabs/types/summarize_highlight_result.py
@@ -5,6 +5,7 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
from .summarize_highlight_result_highlights_item import SummarizeHighlightResultHighlightsItem
+from .summarize_highlight_result_summarize_type import SummarizeHighlightResultSummarizeType
from .token_usage import TokenUsage
@@ -13,6 +14,11 @@ class SummarizeHighlightResult(UniversalBaseModel):
An object that represents the response from a summarize highlight operation.
"""
+ summarize_type: SummarizeHighlightResultSummarizeType = pydantic.Field()
+ """
+ The type of summarize response.
+ """
+
id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier of the response.
diff --git a/src/twelvelabs/types/summarize_highlight_result_summarize_type.py b/src/twelvelabs/types/summarize_highlight_result_summarize_type.py
new file mode 100644
index 0000000..4e58bbf
--- /dev/null
+++ b/src/twelvelabs/types/summarize_highlight_result_summarize_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SummarizeHighlightResultSummarizeType = typing.Union[typing.Literal["highlight"], typing.Any]
diff --git a/src/twelvelabs/types/summarize_response.py b/src/twelvelabs/types/summarize_response.py
deleted file mode 100644
index fe7288c..0000000
--- a/src/twelvelabs/types/summarize_response.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from __future__ import annotations
-
-import typing
-
-import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from .summarize_chapter_result_chapters_item import SummarizeChapterResultChaptersItem
-from .summarize_highlight_result_highlights_item import SummarizeHighlightResultHighlightsItem
-from .token_usage import TokenUsage
-
-
-class SummarizeResponse_Summary(UniversalBaseModel):
- summarize_type: typing.Literal["summary"] = "summary"
- id: typing.Optional[str] = None
- summary: typing.Optional[str] = None
- usage: typing.Optional[TokenUsage] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class SummarizeResponse_Chapter(UniversalBaseModel):
- summarize_type: typing.Literal["chapter"] = "chapter"
- id: typing.Optional[str] = None
- chapters: typing.Optional[typing.List[SummarizeChapterResultChaptersItem]] = None
- usage: typing.Optional[TokenUsage] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class SummarizeResponse_Highlight(UniversalBaseModel):
- summarize_type: typing.Literal["highlight"] = "highlight"
- id: typing.Optional[str] = None
- highlights: typing.Optional[typing.List[SummarizeHighlightResultHighlightsItem]] = None
- usage: typing.Optional[TokenUsage] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-SummarizeResponse = typing.Union[SummarizeResponse_Summary, SummarizeResponse_Chapter, SummarizeResponse_Highlight]
diff --git a/src/twelvelabs/types/summarize_summary_result.py b/src/twelvelabs/types/summarize_summary_result.py
index a7b3a18..814714d 100644
--- a/src/twelvelabs/types/summarize_summary_result.py
+++ b/src/twelvelabs/types/summarize_summary_result.py
@@ -4,6 +4,7 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .summarize_summary_result_summarize_type import SummarizeSummaryResultSummarizeType
from .token_usage import TokenUsage
@@ -12,6 +13,11 @@ class SummarizeSummaryResult(UniversalBaseModel):
An object that represents the response from a summarize summary operation.
"""
+ summarize_type: SummarizeSummaryResultSummarizeType = pydantic.Field()
+ """
+ The type of summarize response.
+ """
+
id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier of the response.
diff --git a/src/twelvelabs/types/summarize_summary_result_summarize_type.py b/src/twelvelabs/types/summarize_summary_result_summarize_type.py
new file mode 100644
index 0000000..85fb94f
--- /dev/null
+++ b/src/twelvelabs/types/summarize_summary_result_summarize_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SummarizeSummaryResultSummarizeType = typing.Union[typing.Literal["summary"], typing.Any]
diff --git a/src/twelvelabs/types/text_image_input_request.py b/src/twelvelabs/types/text_image_input_request.py
index 48e55e8..80fd9eb 100644
--- a/src/twelvelabs/types/text_image_input_request.py
+++ b/src/twelvelabs/types/text_image_input_request.py
@@ -9,7 +9,7 @@
class TextImageInputRequest(UniversalBaseModel):
"""
- This field is required if `input_type` is `text_image`.
+ This field is required if the `input_type` parameter is `text_image`.
"""
media_source: MediaSource
diff --git a/src/twelvelabs/types/text_input_request.py b/src/twelvelabs/types/text_input_request.py
index a1f3e4b..01ee6d1 100644
--- a/src/twelvelabs/types/text_input_request.py
+++ b/src/twelvelabs/types/text_input_request.py
@@ -8,7 +8,7 @@
class TextInputRequest(UniversalBaseModel):
"""
- This field is required if `input_type` is `text`.
+ This field is required if the `input_type` parameter is `text`.
"""
input_text: str = pydantic.Field()
diff --git a/src/twelvelabs/types/video_input_request.py b/src/twelvelabs/types/video_input_request.py
index 95d8ee6..75156fa 100644
--- a/src/twelvelabs/types/video_input_request.py
+++ b/src/twelvelabs/types/video_input_request.py
@@ -7,12 +7,13 @@
from .media_source import MediaSource
from .video_input_request_embedding_option_item import VideoInputRequestEmbeddingOptionItem
from .video_input_request_embedding_scope_item import VideoInputRequestEmbeddingScopeItem
+from .video_input_request_embedding_type_item import VideoInputRequestEmbeddingTypeItem
from .video_segmentation import VideoSegmentation
class VideoInputRequest(UniversalBaseModel):
"""
- Required if the `input_type` parameter is `video`.
+ This field is required if the `input_type` parameter is `video`.
"""
media_source: MediaSource
@@ -61,6 +62,19 @@ class VideoInputRequest(UniversalBaseModel):
**Default**: `["clip", "asset"]`
"""
+ embedding_type: typing.Optional[typing.List[VideoInputRequestEmbeddingTypeItem]] = pydantic.Field(default=None)
+ """
+ Specifies how to structure the embedding. Include this parameter only when `embedding_option` contains at least two values.
+
+ **Values**:
+ - `separate_embedding`: Returns separate embeddings per modality specified in `embedding_option`
+ - `fused_embedding`: Returns a single embedding that combines all modalities into one vector
+
+ Specify both values to receive separate and fused embeddings in the same response.
+
+ **Default**: `separate_embedding`.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/twelvelabs/types/video_input_request_embedding_type_item.py b/src/twelvelabs/types/video_input_request_embedding_type_item.py
new file mode 100644
index 0000000..1b6addd
--- /dev/null
+++ b/src/twelvelabs/types/video_input_request_embedding_type_item.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+VideoInputRequestEmbeddingTypeItem = typing.Union[typing.Literal["separate_embedding", "fused_embedding"], typing.Any]
From 4e42e02b9fed3c9972151040e7d381608c046449 Mon Sep 17 00:00:00 2001
From: tl-genie
Date: Wed, 11 Mar 2026 11:21:24 +0900
Subject: [PATCH 2/2] Enhance SearchClientWrapper and AsyncSearchClientWrapper
to support multiple media URLs and files. Updated search request handling to
accommodate new parameters, improving flexibility in media queries.
---
.../wrapper/search_client_wrapper.py | 150 +++++++++++++-----
1 file changed, 114 insertions(+), 36 deletions(-)
diff --git a/src/twelvelabs/wrapper/search_client_wrapper.py b/src/twelvelabs/wrapper/search_client_wrapper.py
index dc00c8f..8a8cdae 100644
--- a/src/twelvelabs/wrapper/search_client_wrapper.py
+++ b/src/twelvelabs/wrapper/search_client_wrapper.py
@@ -59,6 +59,8 @@ def query(
query_media_type: typing.Optional[typing.Literal["image"]] = OMIT,
query_media_url: typing.Optional[str] = OMIT,
query_media_file: typing.Optional[core.File] = OMIT,
+ query_media_urls: typing.Optional[typing.List[str]] = None,
+ query_media_files: typing.Optional[typing.List[core.File]] = None,
query_text: typing.Optional[str] = OMIT,
adjust_confidence_level: typing.Optional[float] = OMIT,
group_by: typing.Optional[SearchCreateRequestGroupBy] = OMIT,
@@ -255,24 +257,61 @@ def query(
)
"""
- _response = self.create(
- index_id=index_id,
- search_options=search_options,
- query_media_type=query_media_type,
- query_media_url=query_media_url,
- query_media_file=query_media_file,
- query_text=query_text,
- adjust_confidence_level=adjust_confidence_level,
- group_by=group_by,
- threshold=threshold,
- sort_option=sort_option,
- operator=operator,
- page_limit=page_limit,
- filter=filter,
- include_user_metadata=include_user_metadata,
- transcription_options=transcription_options,
- request_options=request_options,
- )
+ _has_plural = query_media_urls is not None or query_media_files is not None
+ if _has_plural:
+ _data: typing.Dict[str, typing.Any] = {
+ "index_id": index_id,
+ "search_options": search_options,
+ "query_media_type": query_media_type,
+ "query_media_url": query_media_urls if query_media_urls is not None else query_media_url,
+ "query_text": query_text,
+ "adjust_confidence_level": adjust_confidence_level,
+ "group_by": group_by,
+ "threshold": threshold,
+ "sort_option": sort_option,
+ "operator": operator,
+ "page_limit": page_limit,
+ "filter": filter,
+ "include_user_metadata": include_user_metadata,
+ "transcription_options": transcription_options,
+ }
+ _files: typing.Dict[str, typing.Any] = {}
+ if query_media_files is not None:
+ _files["query_media_file"] = query_media_files
+ elif query_media_file is not OMIT:
+ _files["query_media_file"] = query_media_file
+ _http_response = self._raw_client._client_wrapper.httpx_client.request(
+ "search",
+ method="POST",
+ data=_data,
+ files=_files,
+ request_options=request_options,
+ omit=OMIT,
+ force_multipart=True,
+ )
+ _response = typing.cast(
+ SearchResults,
+ parse_obj_as(type_=SearchResults, object_=_http_response.json()),
+ )
+ else:
+ _response = self.create(
+ index_id=index_id,
+ search_options=search_options,
+ query_media_type=query_media_type,
+ query_media_url=query_media_url,
+ query_media_file=query_media_file,
+ query_text=query_text,
+ adjust_confidence_level=adjust_confidence_level,
+ group_by=group_by,
+ threshold=threshold,
+ sort_option=sort_option,
+ operator=operator,
+ page_limit=page_limit,
+ filter=filter,
+ include_user_metadata=include_user_metadata,
+ transcription_options=transcription_options,
+ request_options=request_options,
+ )
_has_next = (
_response.page_info is not None
@@ -324,6 +363,8 @@ async def query(
query_media_type: typing.Optional[typing.Literal["image"]] = OMIT,
query_media_url: typing.Optional[str] = OMIT,
query_media_file: typing.Optional[core.File] = OMIT,
+ query_media_urls: typing.Optional[typing.List[str]] = None,
+ query_media_files: typing.Optional[typing.List[core.File]] = None,
query_text: typing.Optional[str] = OMIT,
adjust_confidence_level: typing.Optional[float] = OMIT,
group_by: typing.Optional[SearchCreateRequestGroupBy] = OMIT,
@@ -528,24 +569,61 @@ async def main() -> None:
asyncio.run(main())
"""
- _response = await self.create(
- index_id=index_id,
- search_options=search_options,
- query_media_type=query_media_type,
- query_media_url=query_media_url,
- query_media_file=query_media_file,
- query_text=query_text,
- adjust_confidence_level=adjust_confidence_level,
- group_by=group_by,
- threshold=threshold,
- sort_option=sort_option,
- operator=operator,
- page_limit=page_limit,
- filter=filter,
- include_user_metadata=include_user_metadata,
- transcription_options=transcription_options,
- request_options=request_options,
- )
+ _has_plural = query_media_urls is not None or query_media_files is not None
+ if _has_plural:
+ _data: typing.Dict[str, typing.Any] = {
+ "index_id": index_id,
+ "search_options": search_options,
+ "query_media_type": query_media_type,
+ "query_media_url": query_media_urls if query_media_urls is not None else query_media_url,
+ "query_text": query_text,
+ "adjust_confidence_level": adjust_confidence_level,
+ "group_by": group_by,
+ "threshold": threshold,
+ "sort_option": sort_option,
+ "operator": operator,
+ "page_limit": page_limit,
+ "filter": filter,
+ "include_user_metadata": include_user_metadata,
+ "transcription_options": transcription_options,
+ }
+ _files: typing.Dict[str, typing.Any] = {}
+ if query_media_files is not None:
+ _files["query_media_file"] = query_media_files
+ elif query_media_file is not OMIT:
+ _files["query_media_file"] = query_media_file
+ _http_response = await self._raw_client._client_wrapper.httpx_client.request(
+ "search",
+ method="POST",
+ data=_data,
+ files=_files,
+ request_options=request_options,
+ omit=OMIT,
+ force_multipart=True,
+ )
+ _response = typing.cast(
+ SearchResults,
+ parse_obj_as(type_=SearchResults, object_=_http_response.json()),
+ )
+ else:
+ _response = await self.create(
+ index_id=index_id,
+ search_options=search_options,
+ query_media_type=query_media_type,
+ query_media_url=query_media_url,
+ query_media_file=query_media_file,
+ query_text=query_text,
+ adjust_confidence_level=adjust_confidence_level,
+ group_by=group_by,
+ threshold=threshold,
+ sort_option=sort_option,
+ operator=operator,
+ page_limit=page_limit,
+ filter=filter,
+ include_user_metadata=include_user_metadata,
+ transcription_options=transcription_options,
+ request_options=request_options,
+ )
_has_next = (
_response.page_info is not None