diff --git a/poetry.lock b/poetry.lock
index 794340b..a19d7e2 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -60,13 +60,13 @@ files = [
[[package]]
name = "exceptiongroup"
-version = "1.3.0"
+version = "1.3.1"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
- {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"},
- {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"},
+ {file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"},
+ {file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index e7c505d..d99e091 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "twelvelabs"
[tool.poetry]
name = "twelvelabs"
-version = "1.1.0"
+version = "1.1.1"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index e023b9e..1547bf0 100644
--- a/reference.md
+++ b/reference.md
@@ -400,6 +400,7 @@ response = client.analyze_stream(
prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
temperature=0.2,
response_format=ResponseFormat(
+ type="json_schema",
json_schema={
"type": "object",
"properties": {
@@ -541,6 +542,7 @@ client.analyze(
prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
temperature=0.2,
response_format=ResponseFormat(
+ type="json_schema",
json_schema={
"type": "object",
"properties": {
@@ -1632,10 +1634,7 @@ client.indexes.delete(
This method returns a list of assets in your account.
-
-- The platform returns your assets sorted by creation date, with the newest at the top of the list.
-- The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+The platform returns your assets sorted by creation date, with the newest at the top of the list.
@@ -1749,7 +1748,7 @@ The number of items to return on each page.
-
-This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -1823,9 +1822,7 @@ typing.Optional[core.File]` — See core.File for more documentation
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+URL uploads have a maximum limit of 4GB.
@@ -2130,6 +2127,7 @@ client = TwelveLabs(
)
client.multipart_upload.create(
filename="my-video.mp4",
+ type="video",
total_size=104857600,
)
@@ -2147,7 +2145,15 @@ client.multipart_upload.create(
-
-**filename:** `str` — Original filename of the asset
+**filename:** `str` — The original file name of the asset.
+
+
+
+
+
+-
+
+**type:** `CreateAssetUploadRequestType` — The type of asset you want to upload.
@@ -2194,14 +2200,14 @@ The total size of the file in bytes. The platform uses this value to:
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
-Use this endpoint to:
+Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
@@ -2303,11 +2309,10 @@ The number of items to return on each page.
-
-This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
@@ -2333,6 +2338,7 @@ client.multipart_upload.report_chunk_batch(
CompletedChunk(
chunk_index=1,
proof="d41d8cd98f00b204e9800998ecf8427e",
+ proof_type="etag",
chunk_size=5242880,
)
],
@@ -3339,7 +3345,7 @@ For detailed guidance and version-specific behavior, see the [Search options](/v
-
-**query_media_type:** `typing.Optional[typing.Literal["image"]]` — The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
+**query_media_type:** `typing.Optional[SearchCreateRequestQueryMediaType]` — The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
@@ -3913,7 +3919,7 @@ The desired duration in seconds for each clip for which the platform generates a
Defines the scope of video embedding generation. Valid values are the following:
- `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`.
-- `clip` and `video`: Creates embeddings for video segments and the entire video.
+- `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance.
To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below:
@@ -4154,7 +4160,7 @@ This endpoint synchronously creates embeddings for multimodal content and return
- Maximum file size for base64 encoded strings: 36 MB
- Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless)
- Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Video resolution: 360x360 to 3840x2160 pixels
+ - Video resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
@@ -4198,7 +4204,17 @@ client.embed.v_2.create(
-
-**input_type:** `CreateEmbeddingsRequestInputType` — The type of content for which you wish to create embeddings.
+**input_type:** `CreateEmbeddingsRequestInputType`
+
+The type of content for the embeddings.
+
+
+**Values**:
+- `audio`: Creates embeddings for an audio file
+- `video`: Creates embeddings for a video file
+- `image`: Creates embeddings for an image file
+- `text`: Creates embeddings for text input
+- `text_image`: Creates embeddings for text and an image.
@@ -4206,7 +4222,7 @@ client.embed.v_2.create(
-
-**model_name:** `str` — The video understanding model you wish to use.
+**model_name:** `CreateEmbeddingsRequestModelName` — The video understanding model to use. Only "marengo3.0" is supported.
@@ -4419,7 +4435,7 @@ This endpoint creates embeddings for audio and video content asynchronously.
- Maximum duration: 4 hours
- Maximum file size: 4 GB
- Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Resolution: 360x360 to 3840x2160 pixels
+ - Resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
**Audio**:
@@ -4494,7 +4510,7 @@ client.embed.v_2.tasks.create(
**input_type:** `CreateAsyncEmbeddingRequestInputType`
-The type of content for which you wish to create embeddings.
+The type of content for the embeddings.
**Values**:
- `audio`: Audio files
@@ -4506,7 +4522,7 @@ The type of content for which you wish to create embeddings.
-
-**model_name:** `str` — The model you wish to use.
+**model_name:** `CreateAsyncEmbeddingRequestModelName` — The model you wish to use. Only `"marengo3.0"` is supported.
@@ -5624,7 +5640,7 @@ status=ready&status=validating
-
-**created_at:** `typing.Optional[str]` — Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+**created_at:** `typing.Optional[str]` — Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
@@ -5779,20 +5795,20 @@ client.indexes.indexed_assets.create(
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
-**Common use cases**:
+Use this method to:
-- Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+- Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
-- Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+- Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
-- Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+- Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -5874,7 +5890,7 @@ To retrieve embeddings for a video, it must be indexed using the Marengo video u
-
-**transcription:** `typing.Optional[bool]` — The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+**transcription:** `typing.Optional[bool]` — Specifies whether to retrieve a transcription of the spoken words.
@@ -5985,7 +6001,7 @@ client.indexes.indexed_assets.delete(
-
-Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
@@ -6499,7 +6515,7 @@ client.indexes.videos.delete(
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
-Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
diff --git a/src/twelvelabs/__init__.py b/src/twelvelabs/__init__.py
index f9426c8..5df6c68 100644
--- a/src/twelvelabs/__init__.py
+++ b/src/twelvelabs/__init__.py
@@ -14,6 +14,7 @@
AudioSegment,
AudioSegmentation,
AudioSegmentationFixed,
+ AudioSegmentationStrategy,
BadRequestErrorBody,
BaseEmbeddingMetadata,
BaseSegment,
@@ -23,6 +24,7 @@
ChunkInfo,
ChunkInfoStatus,
CompletedChunk,
+ CompletedChunkProofType,
Confidence,
CreateAssetUploadResponse,
CreatedAt,
@@ -97,6 +99,7 @@
ReportChunkBatchResponse,
RequestAdditionalPresignedUrLsResponse,
ResponseFormat,
+ ResponseFormatType,
ScoreSearchTerms,
SearchItem,
SearchItemClipsItem,
@@ -165,9 +168,11 @@
from .environment import TwelveLabsEnvironment
from .indexes import IndexesCreateRequestModelsItem, IndexesCreateResponse, IndexesListResponse
from .manage_entities import ListAllEntitiesRequestSortBy, ListAllEntitiesRequestStatus, ListAllEntitiesResponse
+from .multipart_upload import CreateAssetUploadRequestType
from .search import (
SearchCreateRequestGroupBy,
SearchCreateRequestOperator,
+ SearchCreateRequestQueryMediaType,
SearchCreateRequestSearchOptionsItem,
SearchCreateRequestSortOption,
SearchCreateRequestTranscriptionOptionsItem,
@@ -199,6 +204,7 @@
"AudioSegment",
"AudioSegmentation",
"AudioSegmentationFixed",
+ "AudioSegmentationStrategy",
"BadRequestError",
"BadRequestErrorBody",
"BaseEmbeddingMetadata",
@@ -209,7 +215,9 @@
"ChunkInfo",
"ChunkInfoStatus",
"CompletedChunk",
+ "CompletedChunkProofType",
"Confidence",
+ "CreateAssetUploadRequestType",
"CreateAssetUploadResponse",
"CreatedAt",
"EmbeddingAudioMetadata",
@@ -294,9 +302,11 @@
"ReportChunkBatchResponse",
"RequestAdditionalPresignedUrLsResponse",
"ResponseFormat",
+ "ResponseFormatType",
"ScoreSearchTerms",
"SearchCreateRequestGroupBy",
"SearchCreateRequestOperator",
+ "SearchCreateRequestQueryMediaType",
"SearchCreateRequestSearchOptionsItem",
"SearchCreateRequestSortOption",
"SearchCreateRequestTranscriptionOptionsItem",
diff --git a/src/twelvelabs/assets/client.py b/src/twelvelabs/assets/client.py
index c98547d..405c15a 100644
--- a/src/twelvelabs/assets/client.py
+++ b/src/twelvelabs/assets/client.py
@@ -44,10 +44,7 @@ def list(
"""
This method returns a list of assets in your account.
-
- - The platform returns your assets sorted by creation date, with the newest at the top of the list.
- - The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+ The platform returns your assets sorted by creation date, with the newest at the top of the list.
Parameters
----------
@@ -111,7 +108,7 @@ def create(
request_options: typing.Optional[RequestOptions] = None,
) -> Asset:
"""
- This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+ This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -138,9 +135,7 @@ def create(
url : typing.Optional[str]
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+ URL uploads have a maximum limit of 4GB.
filename : typing.Optional[str]
The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL.
@@ -260,10 +255,7 @@ async def list(
"""
This method returns a list of assets in your account.
-
- - The platform returns your assets sorted by creation date, with the newest at the top of the list.
- - The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+ The platform returns your assets sorted by creation date, with the newest at the top of the list.
Parameters
----------
@@ -336,7 +328,7 @@ async def create(
request_options: typing.Optional[RequestOptions] = None,
) -> Asset:
"""
- This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+ This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -363,9 +355,7 @@ async def create(
url : typing.Optional[str]
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+ URL uploads have a maximum limit of 4GB.
filename : typing.Optional[str]
The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL.
diff --git a/src/twelvelabs/assets/raw_client.py b/src/twelvelabs/assets/raw_client.py
index fca924d..2f78edc 100644
--- a/src/twelvelabs/assets/raw_client.py
+++ b/src/twelvelabs/assets/raw_client.py
@@ -39,10 +39,7 @@ def list(
"""
This method returns a list of assets in your account.
-
- - The platform returns your assets sorted by creation date, with the newest at the top of the list.
- - The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+ The platform returns your assets sorted by creation date, with the newest at the top of the list.
Parameters
----------
@@ -131,7 +128,7 @@ def create(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[Asset]:
"""
- This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+ This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -158,9 +155,7 @@ def create(
url : typing.Optional[str]
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+ URL uploads have a maximum limit of 4GB.
filename : typing.Optional[str]
The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL.
@@ -323,10 +318,7 @@ async def list(
"""
This method returns a list of assets in your account.
-
- - The platform returns your assets sorted by creation date, with the newest at the top of the list.
- - The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+ The platform returns your assets sorted by creation date, with the newest at the top of the list.
Parameters
----------
@@ -418,7 +410,7 @@ async def create(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[Asset]:
"""
- This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+ This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -445,9 +437,7 @@ async def create(
url : typing.Optional[str]
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+ URL uploads have a maximum limit of 4GB.
filename : typing.Optional[str]
The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL.
diff --git a/src/twelvelabs/base_client.py b/src/twelvelabs/base_client.py
index 54a32c8..60396a3 100644
--- a/src/twelvelabs/base_client.py
+++ b/src/twelvelabs/base_client.py
@@ -400,6 +400,7 @@ def analyze_stream(
prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
temperature=0.2,
response_format=ResponseFormat(
+ type="json_schema",
json_schema={
"type": "object",
"properties": {
@@ -493,6 +494,7 @@ def analyze(
prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
temperature=0.2,
response_format=ResponseFormat(
+ type="json_schema",
json_schema={
"type": "object",
"properties": {
@@ -918,6 +920,7 @@ async def main() -> None:
prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
temperature=0.2,
response_format=ResponseFormat(
+ type="json_schema",
json_schema={
"type": "object",
"properties": {
@@ -1020,6 +1023,7 @@ async def main() -> None:
prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
temperature=0.2,
response_format=ResponseFormat(
+ type="json_schema",
json_schema={
"type": "object",
"properties": {
diff --git a/src/twelvelabs/core/client_wrapper.py b/src/twelvelabs/core/client_wrapper.py
index 434f508..16234c4 100644
--- a/src/twelvelabs/core/client_wrapper.py
+++ b/src/twelvelabs/core/client_wrapper.py
@@ -22,10 +22,10 @@ def __init__(
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "twelvelabs/1.1.0",
+ "User-Agent": "twelvelabs/1.1.1",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "twelvelabs",
- "X-Fern-SDK-Version": "1.1.0",
+ "X-Fern-SDK-Version": "1.1.1",
**(self.get_custom_headers() or {}),
}
headers["x-api-key"] = self.api_key
diff --git a/src/twelvelabs/embed/__init__.py b/src/twelvelabs/embed/__init__.py
index 812e0ae..0a04482 100644
--- a/src/twelvelabs/embed/__init__.py
+++ b/src/twelvelabs/embed/__init__.py
@@ -14,10 +14,11 @@
TasksStatusResponse,
TasksStatusResponseVideoEmbedding,
)
-from .v_2 import CreateEmbeddingsRequestInputType
+from .v_2 import CreateEmbeddingsRequestInputType, CreateEmbeddingsRequestModelName
__all__ = [
"CreateEmbeddingsRequestInputType",
+ "CreateEmbeddingsRequestModelName",
"TasksCreateRequestVideoEmbeddingScopeItem",
"TasksCreateResponse",
"TasksListResponse",
diff --git a/src/twelvelabs/embed/tasks/client.py b/src/twelvelabs/embed/tasks/client.py
index 458a52a..58bc869 100644
--- a/src/twelvelabs/embed/tasks/client.py
+++ b/src/twelvelabs/embed/tasks/client.py
@@ -190,7 +190,7 @@ def create(
video_embedding_scope : typing.Optional[typing.List[TasksCreateRequestVideoEmbeddingScopeItem]]
Defines the scope of video embedding generation. Valid values are the following:
- `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`.
- - `clip` and `video`: Creates embeddings for video segments and the entire video.
+ - `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance.
To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below:
@@ -507,7 +507,7 @@ async def create(
video_embedding_scope : typing.Optional[typing.List[TasksCreateRequestVideoEmbeddingScopeItem]]
Defines the scope of video embedding generation. Valid values are the following:
- `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`.
- - `clip` and `video`: Creates embeddings for video segments and the entire video.
+ - `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance.
To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below:
diff --git a/src/twelvelabs/embed/tasks/raw_client.py b/src/twelvelabs/embed/tasks/raw_client.py
index 7b7ee34..6df36da 100644
--- a/src/twelvelabs/embed/tasks/raw_client.py
+++ b/src/twelvelabs/embed/tasks/raw_client.py
@@ -208,7 +208,7 @@ def create(
video_embedding_scope : typing.Optional[typing.List[TasksCreateRequestVideoEmbeddingScopeItem]]
Defines the scope of video embedding generation. Valid values are the following:
- `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`.
- - `clip` and `video`: Creates embeddings for video segments and the entire video.
+ - `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance.
To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below:
@@ -589,7 +589,7 @@ async def create(
video_embedding_scope : typing.Optional[typing.List[TasksCreateRequestVideoEmbeddingScopeItem]]
Defines the scope of video embedding generation. Valid values are the following:
- `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`.
- - `clip` and `video`: Creates embeddings for video segments and the entire video.
+ - `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance.
To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below:
diff --git a/src/twelvelabs/embed/v_2/__init__.py b/src/twelvelabs/embed/v_2/__init__.py
index 6f7ed1b..00ee08e 100644
--- a/src/twelvelabs/embed/v_2/__init__.py
+++ b/src/twelvelabs/embed/v_2/__init__.py
@@ -2,19 +2,24 @@
# isort: skip_file
-from .types import CreateEmbeddingsRequestInputType
+from .types import CreateEmbeddingsRequestInputType, CreateEmbeddingsRequestModelName
from . import tasks
from .tasks import (
CreateAsyncEmbeddingRequestInputType,
+ CreateAsyncEmbeddingRequestModelName,
TasksCreateResponse,
+ TasksCreateResponseStatus,
TasksListResponse,
TasksListResponsePageInfo,
)
__all__ = [
"CreateAsyncEmbeddingRequestInputType",
+ "CreateAsyncEmbeddingRequestModelName",
"CreateEmbeddingsRequestInputType",
+ "CreateEmbeddingsRequestModelName",
"TasksCreateResponse",
+ "TasksCreateResponseStatus",
"TasksListResponse",
"TasksListResponsePageInfo",
"tasks",
diff --git a/src/twelvelabs/embed/v_2/client.py b/src/twelvelabs/embed/v_2/client.py
index 286e0dd..3867073 100644
--- a/src/twelvelabs/embed/v_2/client.py
+++ b/src/twelvelabs/embed/v_2/client.py
@@ -13,6 +13,7 @@
from .raw_client import AsyncRawV2Client, RawV2Client
from .tasks.client import AsyncTasksClient, TasksClient
from .types.create_embeddings_request_input_type import CreateEmbeddingsRequestInputType
+from .types.create_embeddings_request_model_name import CreateEmbeddingsRequestModelName
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -38,7 +39,7 @@ def create(
self,
*,
input_type: CreateEmbeddingsRequestInputType,
- model_name: str,
+ model_name: CreateEmbeddingsRequestModelName,
text: typing.Optional[TextInputRequest] = OMIT,
image: typing.Optional[ImageInputRequest] = OMIT,
text_image: typing.Optional[TextImageInputRequest] = OMIT,
@@ -75,17 +76,25 @@ def create(
- Maximum file size for base64 encoded strings: 36 MB
- Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless)
- Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Video resolution: 360x360 to 3840x2160 pixels
+ - Video resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
- model_name : str
- The video understanding model you wish to use.
+
+ **Values**:
+ - `audio`: Creates embeddings for an audio file
+ - `video`: Creates embeddings for a video file
+ - `image`: Creates embeddings for an image file
+ - `text`: Creates embeddings for text input
+ - `text_image`: Creates embeddings for text and an image.
+
+ model_name : CreateEmbeddingsRequestModelName
+ The video understanding model to use. Only "marengo3.0" is supported.
text : typing.Optional[TextInputRequest]
@@ -153,7 +162,7 @@ async def create(
self,
*,
input_type: CreateEmbeddingsRequestInputType,
- model_name: str,
+ model_name: CreateEmbeddingsRequestModelName,
text: typing.Optional[TextInputRequest] = OMIT,
image: typing.Optional[ImageInputRequest] = OMIT,
text_image: typing.Optional[TextImageInputRequest] = OMIT,
@@ -190,17 +199,25 @@ async def create(
- Maximum file size for base64 encoded strings: 36 MB
- Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless)
- Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Video resolution: 360x360 to 3840x2160 pixels
+ - Video resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
+
+
+ **Values**:
+ - `audio`: Creates embeddings for an audio file
+ - `video`: Creates embeddings for a video file
+ - `image`: Creates embeddings for an image file
+ - `text`: Creates embeddings for text input
+ - `text_image`: Creates embeddings for text and an image.
- model_name : str
- The video understanding model you wish to use.
+ model_name : CreateEmbeddingsRequestModelName
+ The video understanding model to use. Only "marengo3.0" is supported.
text : typing.Optional[TextInputRequest]
diff --git a/src/twelvelabs/embed/v_2/raw_client.py b/src/twelvelabs/embed/v_2/raw_client.py
index 4cd1b47..1cf647a 100644
--- a/src/twelvelabs/embed/v_2/raw_client.py
+++ b/src/twelvelabs/embed/v_2/raw_client.py
@@ -19,6 +19,7 @@
from ...types.text_input_request import TextInputRequest
from ...types.video_input_request import VideoInputRequest
from .types.create_embeddings_request_input_type import CreateEmbeddingsRequestInputType
+from .types.create_embeddings_request_model_name import CreateEmbeddingsRequestModelName
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -32,7 +33,7 @@ def create(
self,
*,
input_type: CreateEmbeddingsRequestInputType,
- model_name: str,
+ model_name: CreateEmbeddingsRequestModelName,
text: typing.Optional[TextInputRequest] = OMIT,
image: typing.Optional[ImageInputRequest] = OMIT,
text_image: typing.Optional[TextImageInputRequest] = OMIT,
@@ -69,17 +70,25 @@ def create(
- Maximum file size for base64 encoded strings: 36 MB
- Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless)
- Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Video resolution: 360x360 to 3840x2160 pixels
+ - Video resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
- model_name : str
- The video understanding model you wish to use.
+
+ **Values**:
+ - `audio`: Creates embeddings for an audio file
+ - `video`: Creates embeddings for a video file
+ - `image`: Creates embeddings for an image file
+ - `text`: Creates embeddings for text input
+ - `text_image`: Creates embeddings for text and an image.
+
+ model_name : CreateEmbeddingsRequestModelName
+ The video understanding model to use. Only "marengo3.0" is supported.
text : typing.Optional[TextInputRequest]
@@ -184,7 +193,7 @@ async def create(
self,
*,
input_type: CreateEmbeddingsRequestInputType,
- model_name: str,
+ model_name: CreateEmbeddingsRequestModelName,
text: typing.Optional[TextInputRequest] = OMIT,
image: typing.Optional[ImageInputRequest] = OMIT,
text_image: typing.Optional[TextImageInputRequest] = OMIT,
@@ -221,17 +230,25 @@ async def create(
- Maximum file size for base64 encoded strings: 36 MB
- Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless)
- Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Video resolution: 360x360 to 3840x2160 pixels
+ - Video resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
+
+
+ **Values**:
+ - `audio`: Creates embeddings for an audio file
+ - `video`: Creates embeddings for a video file
+ - `image`: Creates embeddings for an image file
+ - `text`: Creates embeddings for text input
+ - `text_image`: Creates embeddings for text and an image.
- model_name : str
- The video understanding model you wish to use.
+ model_name : CreateEmbeddingsRequestModelName
+ The video understanding model to use. Only "marengo3.0" is supported.
text : typing.Optional[TextInputRequest]
diff --git a/src/twelvelabs/embed/v_2/tasks/__init__.py b/src/twelvelabs/embed/v_2/tasks/__init__.py
index 29cfdb9..a5e489f 100644
--- a/src/twelvelabs/embed/v_2/tasks/__init__.py
+++ b/src/twelvelabs/embed/v_2/tasks/__init__.py
@@ -4,14 +4,18 @@
from .types import (
CreateAsyncEmbeddingRequestInputType,
+ CreateAsyncEmbeddingRequestModelName,
TasksCreateResponse,
+ TasksCreateResponseStatus,
TasksListResponse,
TasksListResponsePageInfo,
)
__all__ = [
"CreateAsyncEmbeddingRequestInputType",
+ "CreateAsyncEmbeddingRequestModelName",
"TasksCreateResponse",
+ "TasksCreateResponseStatus",
"TasksListResponse",
"TasksListResponsePageInfo",
]
diff --git a/src/twelvelabs/embed/v_2/tasks/client.py b/src/twelvelabs/embed/v_2/tasks/client.py
index e8e681a..ac1a2c3 100644
--- a/src/twelvelabs/embed/v_2/tasks/client.py
+++ b/src/twelvelabs/embed/v_2/tasks/client.py
@@ -11,6 +11,7 @@
from ....types.video_input_request import VideoInputRequest
from .raw_client import AsyncRawTasksClient, RawTasksClient
from .types.create_async_embedding_request_input_type import CreateAsyncEmbeddingRequestInputType
+from .types.create_async_embedding_request_model_name import CreateAsyncEmbeddingRequestModelName
from .types.tasks_create_response import TasksCreateResponse
# this is used as the default value for optional parameters
@@ -110,7 +111,7 @@ def create(
self,
*,
input_type: CreateAsyncEmbeddingRequestInputType,
- model_name: str,
+ model_name: CreateAsyncEmbeddingRequestModelName,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
@@ -132,7 +133,7 @@ def create(
- Maximum duration: 4 hours
- Maximum file size: 4 GB
- Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Resolution: 360x360 to 3840x2160 pixels
+ - Resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
**Audio**:
@@ -151,14 +152,14 @@ def create(
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
**Values**:
- `audio`: Audio files
- `video`: Video content
- model_name : str
- The model you wish to use.
+ model_name : CreateAsyncEmbeddingRequestModelName
+ The model you wish to use. Only `"marengo3.0"` is supported.
audio : typing.Optional[AudioInputRequest]
@@ -195,6 +196,7 @@ def create(
start_sec=0.0,
end_sec=3600.0,
segmentation=AudioSegmentation(
+ strategy="fixed",
fixed=AudioSegmentationFixed(
duration_sec=6,
),
@@ -352,7 +354,7 @@ async def create(
self,
*,
input_type: CreateAsyncEmbeddingRequestInputType,
- model_name: str,
+ model_name: CreateAsyncEmbeddingRequestModelName,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
@@ -374,7 +376,7 @@ async def create(
- Maximum duration: 4 hours
- Maximum file size: 4 GB
- Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Resolution: 360x360 to 3840x2160 pixels
+ - Resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
**Audio**:
@@ -393,14 +395,14 @@ async def create(
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
**Values**:
- `audio`: Audio files
- `video`: Video content
- model_name : str
- The model you wish to use.
+ model_name : CreateAsyncEmbeddingRequestModelName
+ The model you wish to use. Only `"marengo3.0"` is supported.
audio : typing.Optional[AudioInputRequest]
@@ -442,6 +444,7 @@ async def main() -> None:
start_sec=0.0,
end_sec=3600.0,
segmentation=AudioSegmentation(
+ strategy="fixed",
fixed=AudioSegmentationFixed(
duration_sec=6,
),
diff --git a/src/twelvelabs/embed/v_2/tasks/raw_client.py b/src/twelvelabs/embed/v_2/tasks/raw_client.py
index c559a01..881e04a 100644
--- a/src/twelvelabs/embed/v_2/tasks/raw_client.py
+++ b/src/twelvelabs/embed/v_2/tasks/raw_client.py
@@ -19,6 +19,7 @@
from ....types.media_embedding_task import MediaEmbeddingTask
from ....types.video_input_request import VideoInputRequest
from .types.create_async_embedding_request_input_type import CreateAsyncEmbeddingRequestInputType
+from .types.create_async_embedding_request_model_name import CreateAsyncEmbeddingRequestModelName
from .types.tasks_create_response import TasksCreateResponse
from .types.tasks_list_response import TasksListResponse
@@ -131,7 +132,7 @@ def create(
self,
*,
input_type: CreateAsyncEmbeddingRequestInputType,
- model_name: str,
+ model_name: CreateAsyncEmbeddingRequestModelName,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
@@ -153,7 +154,7 @@ def create(
- Maximum duration: 4 hours
- Maximum file size: 4 GB
- Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Resolution: 360x360 to 3840x2160 pixels
+ - Resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
**Audio**:
@@ -172,14 +173,14 @@ def create(
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
**Values**:
- `audio`: Audio files
- `video`: Video content
- model_name : str
- The model you wish to use.
+ model_name : CreateAsyncEmbeddingRequestModelName
+ The model you wish to use. Only `"marengo3.0"` is supported.
audio : typing.Optional[AudioInputRequest]
@@ -415,7 +416,7 @@ async def create(
self,
*,
input_type: CreateAsyncEmbeddingRequestInputType,
- model_name: str,
+ model_name: CreateAsyncEmbeddingRequestModelName,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
@@ -437,7 +438,7 @@ async def create(
- Maximum duration: 4 hours
- Maximum file size: 4 GB
- Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html)
- - Resolution: 360x360 to 3840x2160 pixels
+ - Resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
**Audio**:
@@ -456,14 +457,14 @@ async def create(
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
**Values**:
- `audio`: Audio files
- `video`: Video content
- model_name : str
- The model you wish to use.
+ model_name : CreateAsyncEmbeddingRequestModelName
+ The model you wish to use. Only `"marengo3.0"` is supported.
audio : typing.Optional[AudioInputRequest]
diff --git a/src/twelvelabs/embed/v_2/tasks/types/__init__.py b/src/twelvelabs/embed/v_2/tasks/types/__init__.py
index 66721fe..06e4df1 100644
--- a/src/twelvelabs/embed/v_2/tasks/types/__init__.py
+++ b/src/twelvelabs/embed/v_2/tasks/types/__init__.py
@@ -3,13 +3,17 @@
# isort: skip_file
from .create_async_embedding_request_input_type import CreateAsyncEmbeddingRequestInputType
+from .create_async_embedding_request_model_name import CreateAsyncEmbeddingRequestModelName
from .tasks_create_response import TasksCreateResponse
+from .tasks_create_response_status import TasksCreateResponseStatus
from .tasks_list_response import TasksListResponse
from .tasks_list_response_page_info import TasksListResponsePageInfo
__all__ = [
"CreateAsyncEmbeddingRequestInputType",
+ "CreateAsyncEmbeddingRequestModelName",
"TasksCreateResponse",
+ "TasksCreateResponseStatus",
"TasksListResponse",
"TasksListResponsePageInfo",
]
diff --git a/src/twelvelabs/embed/v_2/tasks/types/create_async_embedding_request_model_name.py b/src/twelvelabs/embed/v_2/tasks/types/create_async_embedding_request_model_name.py
new file mode 100644
index 0000000..7c19456
--- /dev/null
+++ b/src/twelvelabs/embed/v_2/tasks/types/create_async_embedding_request_model_name.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateAsyncEmbeddingRequestModelName = typing.Union[typing.Literal["marengo3.0"], typing.Any]
diff --git a/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py
index 064a40b..95e8cb9 100644
--- a/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py
+++ b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py
@@ -7,6 +7,7 @@
from .....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
from .....core.serialization import FieldMetadata
from .....types.embedding_data import EmbeddingData
+from .tasks_create_response_status import TasksCreateResponseStatus
class TasksCreateResponse(UniversalBaseModel):
@@ -15,14 +16,14 @@ class TasksCreateResponse(UniversalBaseModel):
The unique identifier of the embedding task
"""
- status: typing.Literal["processing"] = pydantic.Field(default="processing")
+ status: TasksCreateResponseStatus = pydantic.Field()
"""
The initial status of the embedding task.
"""
data: typing.Optional[typing.List[EmbeddingData]] = pydantic.Field(default=None)
"""
- Array of embedding results (only when status is ready)
+ An array of embedding results when `status` is `ready`, or `null` when `status` is `processing` or `failed`.
"""
if IS_PYDANTIC_V2:
diff --git a/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response_status.py b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response_status.py
new file mode 100644
index 0000000..8b1b643
--- /dev/null
+++ b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response_status.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TasksCreateResponseStatus = typing.Union[typing.Literal["processing"], typing.Any]
diff --git a/src/twelvelabs/embed/v_2/types/__init__.py b/src/twelvelabs/embed/v_2/types/__init__.py
index f369e66..9efd359 100644
--- a/src/twelvelabs/embed/v_2/types/__init__.py
+++ b/src/twelvelabs/embed/v_2/types/__init__.py
@@ -3,5 +3,6 @@
# isort: skip_file
from .create_embeddings_request_input_type import CreateEmbeddingsRequestInputType
+from .create_embeddings_request_model_name import CreateEmbeddingsRequestModelName
-__all__ = ["CreateEmbeddingsRequestInputType"]
+__all__ = ["CreateEmbeddingsRequestInputType", "CreateEmbeddingsRequestModelName"]
diff --git a/src/twelvelabs/embed/v_2/types/create_embeddings_request_model_name.py b/src/twelvelabs/embed/v_2/types/create_embeddings_request_model_name.py
new file mode 100644
index 0000000..523db4c
--- /dev/null
+++ b/src/twelvelabs/embed/v_2/types/create_embeddings_request_model_name.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateEmbeddingsRequestModelName = typing.Union[typing.Literal["marengo3.0"], typing.Any]
diff --git a/src/twelvelabs/indexes/indexed_assets/client.py b/src/twelvelabs/indexes/indexed_assets/client.py
index 34e00c8..2c846cd 100644
--- a/src/twelvelabs/indexes/indexed_assets/client.py
+++ b/src/twelvelabs/indexes/indexed_assets/client.py
@@ -122,7 +122,7 @@ def list(
Filter by size. Expressed in bytes.
created_at : typing.Optional[str]
- Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+ Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
updated_at : typing.Optional[str]
This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time.
@@ -263,20 +263,20 @@ def retrieve(
"""
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
- **Common use cases**:
+ Use this method to:
- - Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+ - Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
- - Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+ - Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
- - Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+ - Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -301,7 +301,7 @@ def retrieve(
transcription : typing.Optional[bool]
- The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+ Specifies whether to retrieve a transcription of the spoken words.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -378,7 +378,7 @@ def update(
request_options: typing.Optional[RequestOptions] = None,
) -> None:
"""
- Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
Parameters
----------
@@ -525,7 +525,7 @@ async def list(
Filter by size. Expressed in bytes.
created_at : typing.Optional[str]
- Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+ Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
updated_at : typing.Optional[str]
This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time.
@@ -683,20 +683,20 @@ async def retrieve(
"""
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
- **Common use cases**:
+ Use this method to:
- - Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+ - Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
- - Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+ - Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
- - Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+ - Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -721,7 +721,7 @@ async def retrieve(
transcription : typing.Optional[bool]
- The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+ Specifies whether to retrieve a transcription of the spoken words.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -814,7 +814,7 @@ async def update(
request_options: typing.Optional[RequestOptions] = None,
) -> None:
"""
- Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
Parameters
----------
diff --git a/src/twelvelabs/indexes/indexed_assets/raw_client.py b/src/twelvelabs/indexes/indexed_assets/raw_client.py
index f82e1b6..d656030 100644
--- a/src/twelvelabs/indexes/indexed_assets/raw_client.py
+++ b/src/twelvelabs/indexes/indexed_assets/raw_client.py
@@ -120,7 +120,7 @@ def list(
Filter by size. Expressed in bytes.
created_at : typing.Optional[str]
- Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+ Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
updated_at : typing.Optional[str]
This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time.
@@ -333,20 +333,20 @@ def retrieve(
"""
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
- **Common use cases**:
+ Use this method to:
- - Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+ - Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
- - Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+ - Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
- - Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+ - Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -371,7 +371,7 @@ def retrieve(
transcription : typing.Optional[bool]
- The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+ Specifies whether to retrieve a transcription of the spoken words.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -481,7 +481,7 @@ def update(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[None]:
"""
- Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
Parameters
----------
@@ -625,7 +625,7 @@ async def list(
Filter by size. Expressed in bytes.
created_at : typing.Optional[str]
- Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+ Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
updated_at : typing.Optional[str]
This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time.
@@ -841,20 +841,20 @@ async def retrieve(
"""
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
- **Common use cases**:
+ Use this method to:
- - Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+ - Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
- - Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+ - Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
- - Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+ - Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -879,7 +879,7 @@ async def retrieve(
transcription : typing.Optional[bool]
- The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+ Specifies whether to retrieve a transcription of the spoken words.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -989,7 +989,7 @@ async def update(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[None]:
"""
- Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
Parameters
----------
diff --git a/src/twelvelabs/indexes/videos/client.py b/src/twelvelabs/indexes/videos/client.py
index c2273e2..3ef476e 100644
--- a/src/twelvelabs/indexes/videos/client.py
+++ b/src/twelvelabs/indexes/videos/client.py
@@ -289,7 +289,7 @@ def update(
"""
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
- Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
Parameters
----------
@@ -632,7 +632,7 @@ async def update(
"""
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
- Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
Parameters
----------
diff --git a/src/twelvelabs/indexes/videos/raw_client.py b/src/twelvelabs/indexes/videos/raw_client.py
index 455986f..6f8c38b 100644
--- a/src/twelvelabs/indexes/videos/raw_client.py
+++ b/src/twelvelabs/indexes/videos/raw_client.py
@@ -348,7 +348,7 @@ def update(
"""
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
- Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
Parameters
----------
@@ -726,7 +726,7 @@ async def update(
"""
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
- Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
Parameters
----------
diff --git a/src/twelvelabs/multipart_upload/__init__.py b/src/twelvelabs/multipart_upload/__init__.py
index 5cde020..ddeaf92 100644
--- a/src/twelvelabs/multipart_upload/__init__.py
+++ b/src/twelvelabs/multipart_upload/__init__.py
@@ -2,3 +2,6 @@
# isort: skip_file
+from .types import CreateAssetUploadRequestType
+
+__all__ = ["CreateAssetUploadRequestType"]
diff --git a/src/twelvelabs/multipart_upload/client.py b/src/twelvelabs/multipart_upload/client.py
index 569e97d..2119003 100644
--- a/src/twelvelabs/multipart_upload/client.py
+++ b/src/twelvelabs/multipart_upload/client.py
@@ -12,6 +12,7 @@
from ..types.report_chunk_batch_response import ReportChunkBatchResponse
from ..types.request_additional_presigned_ur_ls_response import RequestAdditionalPresignedUrLsResponse
from .raw_client import AsyncRawMultipartUploadClient, RawMultipartUploadClient
+from .types.create_asset_upload_request_type import CreateAssetUploadRequestType
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -85,7 +86,12 @@ def list_incomplete_uploads(
)
def create(
- self, *, filename: str, total_size: int, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ filename: str,
+ type: CreateAssetUploadRequestType,
+ total_size: int,
+ request_options: typing.Optional[RequestOptions] = None,
) -> CreateAssetUploadResponse:
"""
This method creates a multipart upload session.
@@ -102,7 +108,10 @@ def create(
Parameters
----------
filename : str
- Original filename of the asset
+ The original file name of the asset.
+
+ type : CreateAssetUploadRequestType
+ The type of asset you want to upload.
total_size : int
The total size of the file in bytes. The platform uses this value to:
@@ -127,10 +136,13 @@ def create(
)
client.multipart_upload.create(
filename="my-video.mp4",
+ type="video",
total_size=104857600,
)
"""
- _response = self._raw_client.create(filename=filename, total_size=total_size, request_options=request_options)
+ _response = self._raw_client.create(
+ filename=filename, type=type, total_size=total_size, request_options=request_options
+ )
return _response.data
def get_status(
@@ -144,14 +156,14 @@ def get_status(
"""
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
- Use this endpoint to:
+ Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+ You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
Parameters
----------
@@ -205,11 +217,10 @@ def report_chunk_batch(
request_options: typing.Optional[RequestOptions] = None,
) -> ReportChunkBatchResponse:
"""
- This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+ This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
Parameters
----------
@@ -240,6 +251,7 @@ def report_chunk_batch(
CompletedChunk(
chunk_index=1,
proof="d41d8cd98f00b204e9800998ecf8427e",
+ proof_type="etag",
chunk_size=5242880,
)
],
@@ -376,7 +388,12 @@ async def main() -> None:
)
async def create(
- self, *, filename: str, total_size: int, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ filename: str,
+ type: CreateAssetUploadRequestType,
+ total_size: int,
+ request_options: typing.Optional[RequestOptions] = None,
) -> CreateAssetUploadResponse:
"""
This method creates a multipart upload session.
@@ -393,7 +410,10 @@ async def create(
Parameters
----------
filename : str
- Original filename of the asset
+ The original file name of the asset.
+
+ type : CreateAssetUploadRequestType
+ The type of asset you want to upload.
total_size : int
The total size of the file in bytes. The platform uses this value to:
@@ -423,6 +443,7 @@ async def create(
async def main() -> None:
await client.multipart_upload.create(
filename="my-video.mp4",
+ type="video",
total_size=104857600,
)
@@ -430,7 +451,7 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._raw_client.create(
- filename=filename, total_size=total_size, request_options=request_options
+ filename=filename, type=type, total_size=total_size, request_options=request_options
)
return _response.data
@@ -445,14 +466,14 @@ async def get_status(
"""
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
- Use this endpoint to:
+ Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+ You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
Parameters
----------
@@ -517,11 +538,10 @@ async def report_chunk_batch(
request_options: typing.Optional[RequestOptions] = None,
) -> ReportChunkBatchResponse:
"""
- This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+ This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
Parameters
----------
@@ -557,6 +577,7 @@ async def main() -> None:
CompletedChunk(
chunk_index=1,
proof="d41d8cd98f00b204e9800998ecf8427e",
+ proof_type="etag",
chunk_size=5242880,
)
],
diff --git a/src/twelvelabs/multipart_upload/raw_client.py b/src/twelvelabs/multipart_upload/raw_client.py
index 35e4a10..ef93e45 100644
--- a/src/twelvelabs/multipart_upload/raw_client.py
+++ b/src/twelvelabs/multipart_upload/raw_client.py
@@ -23,6 +23,7 @@
from ..types.list_incomplete_uploads_response import ListIncompleteUploadsResponse
from ..types.report_chunk_batch_response import ReportChunkBatchResponse
from ..types.request_additional_presigned_ur_ls_response import RequestAdditionalPresignedUrLsResponse
+from .types.create_asset_upload_request_type import CreateAssetUploadRequestType
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -132,7 +133,12 @@ def list_incomplete_uploads(
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def create(
- self, *, filename: str, total_size: int, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ filename: str,
+ type: CreateAssetUploadRequestType,
+ total_size: int,
+ request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[CreateAssetUploadResponse]:
"""
This method creates a multipart upload session.
@@ -149,7 +155,10 @@ def create(
Parameters
----------
filename : str
- Original filename of the asset
+ The original file name of the asset.
+
+ type : CreateAssetUploadRequestType
+ The type of asset you want to upload.
total_size : int
The total size of the file in bytes. The platform uses this value to:
@@ -170,8 +179,8 @@ def create(
method="POST",
json={
"filename": filename,
+ "type": type,
"total_size": total_size,
- "type": "video",
},
headers={
"content-type": "application/json",
@@ -238,14 +247,14 @@ def get_status(
"""
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
- Use this endpoint to:
+ Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+ You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
Parameters
----------
@@ -359,11 +368,10 @@ def report_chunk_batch(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[ReportChunkBatchResponse]:
"""
- This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+ This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
Parameters
----------
@@ -653,7 +661,12 @@ async def _get_next():
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def create(
- self, *, filename: str, total_size: int, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ filename: str,
+ type: CreateAssetUploadRequestType,
+ total_size: int,
+ request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[CreateAssetUploadResponse]:
"""
This method creates a multipart upload session.
@@ -670,7 +683,10 @@ async def create(
Parameters
----------
filename : str
- Original filename of the asset
+ The original file name of the asset.
+
+ type : CreateAssetUploadRequestType
+ The type of asset you want to upload.
total_size : int
The total size of the file in bytes. The platform uses this value to:
@@ -691,8 +707,8 @@ async def create(
method="POST",
json={
"filename": filename,
+ "type": type,
"total_size": total_size,
- "type": "video",
},
headers={
"content-type": "application/json",
@@ -759,14 +775,14 @@ async def get_status(
"""
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
- Use this endpoint to:
+ Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+ You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
Parameters
----------
@@ -883,11 +899,10 @@ async def report_chunk_batch(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[ReportChunkBatchResponse]:
"""
- This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+ This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
Parameters
----------
diff --git a/src/twelvelabs/multipart_upload/types/__init__.py b/src/twelvelabs/multipart_upload/types/__init__.py
new file mode 100644
index 0000000..125735d
--- /dev/null
+++ b/src/twelvelabs/multipart_upload/types/__init__.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+from .create_asset_upload_request_type import CreateAssetUploadRequestType
+
+__all__ = ["CreateAssetUploadRequestType"]
diff --git a/src/twelvelabs/multipart_upload/types/create_asset_upload_request_type.py b/src/twelvelabs/multipart_upload/types/create_asset_upload_request_type.py
new file mode 100644
index 0000000..6c4da36
--- /dev/null
+++ b/src/twelvelabs/multipart_upload/types/create_asset_upload_request_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CreateAssetUploadRequestType = typing.Union[typing.Literal["video"], typing.Any]
diff --git a/src/twelvelabs/search/__init__.py b/src/twelvelabs/search/__init__.py
index 6c68973..6da738b 100644
--- a/src/twelvelabs/search/__init__.py
+++ b/src/twelvelabs/search/__init__.py
@@ -5,6 +5,7 @@
from .types import (
SearchCreateRequestGroupBy,
SearchCreateRequestOperator,
+ SearchCreateRequestQueryMediaType,
SearchCreateRequestSearchOptionsItem,
SearchCreateRequestSortOption,
SearchCreateRequestTranscriptionOptionsItem,
@@ -15,6 +16,7 @@
__all__ = [
"SearchCreateRequestGroupBy",
"SearchCreateRequestOperator",
+ "SearchCreateRequestQueryMediaType",
"SearchCreateRequestSearchOptionsItem",
"SearchCreateRequestSortOption",
"SearchCreateRequestTranscriptionOptionsItem",
diff --git a/src/twelvelabs/search/client.py b/src/twelvelabs/search/client.py
index 1870d39..fe109fd 100644
--- a/src/twelvelabs/search/client.py
+++ b/src/twelvelabs/search/client.py
@@ -10,6 +10,7 @@
from .raw_client import AsyncRawSearchClient, RawSearchClient
from .types.search_create_request_group_by import SearchCreateRequestGroupBy
from .types.search_create_request_operator import SearchCreateRequestOperator
+from .types.search_create_request_query_media_type import SearchCreateRequestQueryMediaType
from .types.search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem
from .types.search_create_request_sort_option import SearchCreateRequestSortOption
from .types.search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem
@@ -39,7 +40,7 @@ def create(
*,
index_id: str,
search_options: typing.List[SearchCreateRequestSearchOptionsItem],
- query_media_type: typing.Optional[typing.Literal["image"]] = OMIT,
+ query_media_type: typing.Optional[SearchCreateRequestQueryMediaType] = OMIT,
query_media_url: typing.Optional[str] = OMIT,
query_media_file: typing.Optional[core.File] = OMIT,
query_text: typing.Optional[str] = OMIT,
@@ -110,7 +111,7 @@ def create(
For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section.
- query_media_type : typing.Optional[typing.Literal["image"]]
+ query_media_type : typing.Optional[SearchCreateRequestQueryMediaType]
The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
query_media_url : typing.Optional[str]
@@ -331,7 +332,7 @@ async def create(
*,
index_id: str,
search_options: typing.List[SearchCreateRequestSearchOptionsItem],
- query_media_type: typing.Optional[typing.Literal["image"]] = OMIT,
+ query_media_type: typing.Optional[SearchCreateRequestQueryMediaType] = OMIT,
query_media_url: typing.Optional[str] = OMIT,
query_media_file: typing.Optional[core.File] = OMIT,
query_text: typing.Optional[str] = OMIT,
@@ -402,7 +403,7 @@ async def create(
For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section.
- query_media_type : typing.Optional[typing.Literal["image"]]
+ query_media_type : typing.Optional[SearchCreateRequestQueryMediaType]
The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
query_media_url : typing.Optional[str]
diff --git a/src/twelvelabs/search/raw_client.py b/src/twelvelabs/search/raw_client.py
index 2267cab..15ab327 100644
--- a/src/twelvelabs/search/raw_client.py
+++ b/src/twelvelabs/search/raw_client.py
@@ -16,6 +16,7 @@
from ..types.threshold_search import ThresholdSearch
from .types.search_create_request_group_by import SearchCreateRequestGroupBy
from .types.search_create_request_operator import SearchCreateRequestOperator
+from .types.search_create_request_query_media_type import SearchCreateRequestQueryMediaType
from .types.search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem
from .types.search_create_request_sort_option import SearchCreateRequestSortOption
from .types.search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem
@@ -34,7 +35,7 @@ def create(
*,
index_id: str,
search_options: typing.List[SearchCreateRequestSearchOptionsItem],
- query_media_type: typing.Optional[typing.Literal["image"]] = OMIT,
+ query_media_type: typing.Optional[SearchCreateRequestQueryMediaType] = OMIT,
query_media_url: typing.Optional[str] = OMIT,
query_media_file: typing.Optional[core.File] = OMIT,
query_text: typing.Optional[str] = OMIT,
@@ -105,7 +106,7 @@ def create(
For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section.
- query_media_type : typing.Optional[typing.Literal["image"]]
+ query_media_type : typing.Optional[SearchCreateRequestQueryMediaType]
The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
query_media_url : typing.Optional[str]
@@ -363,7 +364,7 @@ async def create(
*,
index_id: str,
search_options: typing.List[SearchCreateRequestSearchOptionsItem],
- query_media_type: typing.Optional[typing.Literal["image"]] = OMIT,
+ query_media_type: typing.Optional[SearchCreateRequestQueryMediaType] = OMIT,
query_media_url: typing.Optional[str] = OMIT,
query_media_file: typing.Optional[core.File] = OMIT,
query_text: typing.Optional[str] = OMIT,
@@ -434,7 +435,7 @@ async def create(
For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section.
- query_media_type : typing.Optional[typing.Literal["image"]]
+ query_media_type : typing.Optional[SearchCreateRequestQueryMediaType]
The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
query_media_url : typing.Optional[str]
diff --git a/src/twelvelabs/search/types/__init__.py b/src/twelvelabs/search/types/__init__.py
index 272ecd4..6d84d9a 100644
--- a/src/twelvelabs/search/types/__init__.py
+++ b/src/twelvelabs/search/types/__init__.py
@@ -4,6 +4,7 @@
from .search_create_request_group_by import SearchCreateRequestGroupBy
from .search_create_request_operator import SearchCreateRequestOperator
+from .search_create_request_query_media_type import SearchCreateRequestQueryMediaType
from .search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem
from .search_create_request_sort_option import SearchCreateRequestSortOption
from .search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem
@@ -13,6 +14,7 @@
__all__ = [
"SearchCreateRequestGroupBy",
"SearchCreateRequestOperator",
+ "SearchCreateRequestQueryMediaType",
"SearchCreateRequestSearchOptionsItem",
"SearchCreateRequestSortOption",
"SearchCreateRequestTranscriptionOptionsItem",
diff --git a/src/twelvelabs/search/types/search_create_request_query_media_type.py b/src/twelvelabs/search/types/search_create_request_query_media_type.py
new file mode 100644
index 0000000..7ea0eae
--- /dev/null
+++ b/src/twelvelabs/search/types/search_create_request_query_media_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SearchCreateRequestQueryMediaType = typing.Union[typing.Literal["image"], typing.Any]
diff --git a/src/twelvelabs/types/__init__.py b/src/twelvelabs/types/__init__.py
index 6f92f2c..64dd12d 100644
--- a/src/twelvelabs/types/__init__.py
+++ b/src/twelvelabs/types/__init__.py
@@ -13,6 +13,7 @@
from .audio_segment import AudioSegment
from .audio_segmentation import AudioSegmentation
from .audio_segmentation_fixed import AudioSegmentationFixed
+from .audio_segmentation_strategy import AudioSegmentationStrategy
from .bad_request_error_body import BadRequestErrorBody
from .base_embedding_metadata import BaseEmbeddingMetadata
from .base_segment import BaseSegment
@@ -22,6 +23,7 @@
from .chunk_info import ChunkInfo
from .chunk_info_status import ChunkInfoStatus
from .completed_chunk import CompletedChunk
+from .completed_chunk_proof_type import CompletedChunkProofType
from .confidence import Confidence
from .create_asset_upload_response import CreateAssetUploadResponse
from .created_at import CreatedAt
@@ -100,6 +102,7 @@
from .report_chunk_batch_response import ReportChunkBatchResponse
from .request_additional_presigned_ur_ls_response import RequestAdditionalPresignedUrLsResponse
from .response_format import ResponseFormat
+from .response_format_type import ResponseFormatType
from .score_search_terms import ScoreSearchTerms
from .search_item import SearchItem
from .search_item_clips_item import SearchItemClipsItem
@@ -174,6 +177,7 @@
"AudioSegment",
"AudioSegmentation",
"AudioSegmentationFixed",
+ "AudioSegmentationStrategy",
"BadRequestErrorBody",
"BaseEmbeddingMetadata",
"BaseSegment",
@@ -183,6 +187,7 @@
"ChunkInfo",
"ChunkInfoStatus",
"CompletedChunk",
+ "CompletedChunkProofType",
"Confidence",
"CreateAssetUploadResponse",
"CreatedAt",
@@ -257,6 +262,7 @@
"ReportChunkBatchResponse",
"RequestAdditionalPresignedUrLsResponse",
"ResponseFormat",
+ "ResponseFormatType",
"ScoreSearchTerms",
"SearchItem",
"SearchItemClipsItem",
diff --git a/src/twelvelabs/types/asset.py b/src/twelvelabs/types/asset.py
index 97e6e26..816784c 100644
--- a/src/twelvelabs/types/asset.py
+++ b/src/twelvelabs/types/asset.py
@@ -32,10 +32,10 @@ class Asset(UniversalBaseModel):
status: typing.Optional[AssetStatus] = pydantic.Field(default=None)
"""
- Indicates the current state of the asset.
+ Indicates the current status of the asset.
**Values**:
- - `waiting`: The platform is preparing to process the upload
+ - `failed`: The platform failed to process the upload
- `processing`: The platform is processing the uploaded file
- `ready`: The asset is ready to use
"""
@@ -50,20 +50,6 @@ class Asset(UniversalBaseModel):
The MIME type of the asset file.
"""
- url: typing.Optional[str] = pydantic.Field(default=None)
- """
- The URL where you can access the asset file. Use this URL to preview or download the asset.
-
-
- This URL expires after the time specified in the `url_expires_at` field. After expiration, you must retrieve the asset again to obtain a new URL.
-
- """
-
- url_expires_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- The date and time, in RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"), when the URL expires. After this time, the URL in the `url` field becomes invalid. Retrieve the asset again to obtain a new URL.
- """
-
created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
"""
The date and time, in RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"), when the asset was created.
diff --git a/src/twelvelabs/types/asset_status.py b/src/twelvelabs/types/asset_status.py
index afbbc51..80a1487 100644
--- a/src/twelvelabs/types/asset_status.py
+++ b/src/twelvelabs/types/asset_status.py
@@ -2,4 +2,4 @@
import typing
-AssetStatus = typing.Union[typing.Literal["waiting", "processing", "ready"], typing.Any]
+AssetStatus = typing.Union[typing.Literal["failed", "processing", "ready"], typing.Any]
diff --git a/src/twelvelabs/types/audio_segmentation.py b/src/twelvelabs/types/audio_segmentation.py
index b73beea..12d23df 100644
--- a/src/twelvelabs/types/audio_segmentation.py
+++ b/src/twelvelabs/types/audio_segmentation.py
@@ -5,6 +5,7 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
from .audio_segmentation_fixed import AudioSegmentationFixed
+from .audio_segmentation_strategy import AudioSegmentationStrategy
class AudioSegmentation(UniversalBaseModel):
@@ -12,7 +13,7 @@ class AudioSegmentation(UniversalBaseModel):
Specifies how the platform divides the audio into segments.
"""
- strategy: typing.Literal["fixed"] = "fixed"
+ strategy: AudioSegmentationStrategy
fixed: AudioSegmentationFixed = pydantic.Field()
"""
Configuration for fixed segmentation.
diff --git a/src/twelvelabs/types/audio_segmentation_strategy.py b/src/twelvelabs/types/audio_segmentation_strategy.py
new file mode 100644
index 0000000..b77049a
--- /dev/null
+++ b/src/twelvelabs/types/audio_segmentation_strategy.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AudioSegmentationStrategy = typing.Union[typing.Literal["fixed"], typing.Any]
diff --git a/src/twelvelabs/types/completed_chunk.py b/src/twelvelabs/types/completed_chunk.py
index aa6ff29..2a00459 100644
--- a/src/twelvelabs/types/completed_chunk.py
+++ b/src/twelvelabs/types/completed_chunk.py
@@ -4,6 +4,7 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .completed_chunk_proof_type import CompletedChunkProofType
class CompletedChunk(UniversalBaseModel):
@@ -17,7 +18,7 @@ class CompletedChunk(UniversalBaseModel):
The ETag value you received after uploading the chunk. When you upload a chunk to a presigned URLs, the response includes an ETag. Use this value and submit it as proof of successful upload.
"""
- proof_type: typing.Literal["etag"] = pydantic.Field(default="etag")
+ proof_type: CompletedChunkProofType = pydantic.Field()
"""
The verification method. Supported value: `etag`.
"""
diff --git a/src/twelvelabs/types/completed_chunk_proof_type.py b/src/twelvelabs/types/completed_chunk_proof_type.py
new file mode 100644
index 0000000..7ce79d0
--- /dev/null
+++ b/src/twelvelabs/types/completed_chunk_proof_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CompletedChunkProofType = typing.Union[typing.Literal["etag"], typing.Any]
diff --git a/src/twelvelabs/types/create_asset_upload_response.py b/src/twelvelabs/types/create_asset_upload_response.py
index ba9364c..43bbc80 100644
--- a/src/twelvelabs/types/create_asset_upload_response.py
+++ b/src/twelvelabs/types/create_asset_upload_response.py
@@ -26,12 +26,11 @@ class CreateAssetUploadResponse(UniversalBaseModel):
upload_urls: typing.Optional[typing.List[PresignedUrlChunk]] = pydantic.Field(default=None)
"""
- The initial set of presigned URLs for uploading chunks. Each URL corresponds to a specific chunk.
+ An array containing the initial set of presigned URLs for uploading chunks. Each URL corresponds to a specific chunk.
-