From a3398cc4a1c4ecd844c7a06e48033cf1616b2662 Mon Sep 17 00:00:00 2001 From: Jaemin Seo Date: Fri, 12 Dec 2025 21:31:35 +0900 Subject: [PATCH 1/2] feat: update --- poetry.lock | 6 +- pyproject.toml | 2 +- reference.md | 86 ++++++++----------- src/twelvelabs/assets/client.py | 22 ++--- src/twelvelabs/assets/raw_client.py | 22 ++--- src/twelvelabs/core/client_wrapper.py | 4 +- src/twelvelabs/embed/tasks/client.py | 4 +- src/twelvelabs/embed/tasks/raw_client.py | 4 +- src/twelvelabs/embed/v_2/client.py | 32 ++++--- src/twelvelabs/embed/v_2/raw_client.py | 32 ++++--- src/twelvelabs/embed/v_2/tasks/client.py | 22 ++--- src/twelvelabs/embed/v_2/tasks/raw_client.py | 20 ++--- .../v_2/tasks/types/tasks_create_response.py | 2 +- .../indexes/indexed_assets/client.py | 56 ++++++------ .../indexes/indexed_assets/raw_client.py | 56 ++++++------ src/twelvelabs/indexes/videos/client.py | 4 +- src/twelvelabs/indexes/videos/raw_client.py | 4 +- src/twelvelabs/multipart_upload/client.py | 22 +++-- src/twelvelabs/multipart_upload/raw_client.py | 22 +++-- src/twelvelabs/types/asset.py | 18 +--- src/twelvelabs/types/asset_status.py | 2 +- .../types/create_asset_upload_response.py | 9 +- src/twelvelabs/types/embedding_data.py | 34 +++++++- .../types/embedding_task_response.py | 5 +- src/twelvelabs/types/indexed_asset.py | 2 +- src/twelvelabs/types/media_source.py | 2 +- src/twelvelabs/types/video_input_request.py | 4 +- 27 files changed, 234 insertions(+), 264 deletions(-) diff --git a/poetry.lock b/poetry.lock index 794340b..a19d7e2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -60,13 +60,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.3.0" +version = "1.3.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, - {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, + {file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"}, + {file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index e7c505d..d99e091 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "twelvelabs" [tool.poetry] name = "twelvelabs" -version = "1.1.0" +version = "1.1.1" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index e023b9e..c2b5355 100644 --- a/reference.md +++ b/reference.md @@ -1632,10 +1632,7 @@ client.indexes.delete( This method returns a list of assets in your account. - -- The platform returns your assets sorted by creation date, with the newest at the top of the list. -- The platform automatically deletes assets that are not associated with any entity after 72 hours. - +The platform returns your assets sorted by creation date, with the newest at the top of the list. @@ -1749,7 +1746,7 @@ The number of items to return on each page.
-This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. +This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. **Supported content**: Video, audio, and images. @@ -1823,9 +1820,7 @@ typing.Optional[core.File]` — See core.File for more documentation Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`. - - URL uploads are limited to 4GB. - +URL uploads have a maximum limit of 4GB.
@@ -2147,7 +2142,7 @@ client.multipart_upload.create(
-**filename:** `str` — Original filename of the asset +**filename:** `str` — The original file name of the asset.
@@ -2194,14 +2189,14 @@ The total size of the file in bytes. The platform uses this value to: This method provides information about an upload session, including its current status, chunk-level progress, and completion state. -Use this endpoint to: +Use this method to: - Verify upload completion (`status` = `completed`) - Identify any failed chunks that require a retry - Monitor the upload progress by comparing `uploaded_size` with `total_size` - Determine if the session has expired - Retrieve the status information for each chunk - You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. +You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. @@ -2303,11 +2298,10 @@ The number of items to return on each page.
-This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload. +This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks. + - For optimal performance, report chunks in batches and in any order. -
@@ -3913,7 +3907,7 @@ The desired duration in seconds for each clip for which the platform generates a Defines the scope of video embedding generation. Valid values are the following: - `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`. -- `clip` and `video`: Creates embeddings for video segments and the entire video. +- `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance. To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below: @@ -4154,7 +4148,7 @@ This endpoint synchronously creates embeddings for multimodal content and return - Maximum file size for base64 encoded strings: 36 MB - Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless) - Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Video resolution: 360x360 to 3840x2160 pixels + - Video resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 @@ -4178,7 +4172,6 @@ client = TwelveLabs( ) client.embed.v_2.create( input_type="text", - model_name="marengo3.0", text=TextInputRequest( input_text="man walking a dog", ), @@ -4198,15 +4191,17 @@ client.embed.v_2.create(
-**input_type:** `CreateEmbeddingsRequestInputType` — The type of content for which you wish to create embeddings. - -
-
+**input_type:** `CreateEmbeddingsRequestInputType` + +The type of content for the embeddings. -
-
-**model_name:** `str` — The video understanding model you wish to use. +**Values**: +- `audio`: Creates embeddings for an audio file +- `video`: Creates embeddings for a video file +- `image`: Creates embeddings for an image file +- `text`: Creates embeddings for text input +- `text_image`: Creates embeddings for text and an image.
@@ -4419,7 +4414,7 @@ This endpoint creates embeddings for audio and video content asynchronously. - Maximum duration: 4 hours - Maximum file size: 4 GB - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Resolution: 360x360 to 3840x2160 pixels + - Resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 **Audio**: @@ -4461,7 +4456,6 @@ client = TwelveLabs( ) client.embed.v_2.tasks.create( input_type="video", - model_name="marengo3.0", video=VideoInputRequest( media_source=MediaSource( url="https://user-bucket.com/video/long-video.mp4", @@ -4494,7 +4488,7 @@ client.embed.v_2.tasks.create( **input_type:** `CreateAsyncEmbeddingRequestInputType` -The type of content for which you wish to create embeddings. +The type of content for the embeddings. **Values**: - `audio`: Audio files @@ -4506,14 +4500,6 @@ The type of content for which you wish to create embeddings.
-**model_name:** `str` — The model you wish to use. - -
-
- -
-
- **audio:** `typing.Optional[AudioInputRequest]`
@@ -5624,7 +5610,7 @@ status=ready&status=validating
-**created_at:** `typing.Optional[str]` — Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time. +**created_at:** `typing.Optional[str]` — Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
@@ -5779,20 +5765,20 @@ client.indexes.indexed_assets.create( This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription. -**Common use cases**: +Use this method to: -- Monitor indexing progress: - - Call this endpoint after creating an indexed asset - - Check the `status` field until it shows `ready` - - Once ready, your content is available for search and analysis +- Monitor the indexing progress: + - Call this endpoint after creating an indexed asset + - Check the `status` field until it shows `ready` + - Once ready, your content is available for search and analysis -- Retrieve asset metadata: - - Retrieve system metadata (duration, resolution, filename) - - Access user-defined metadata +- Retrieve the asset metadata: + - Retrieve system metadata (duration, resolution, filename) + - Access user-defined metadata -- Retrieve embeddings: - - Include the `embedding_option` parameter to retrieve video embeddings - - Requires the Marengo video understanding model to be enabled in your index +- Retrieve the embeddings: + - Include the `embeddingOption` parameter to retrieve video embeddings + - Requires the Marengo video understanding model to be enabled in your index - Retrieve transcriptions: - Set the `transcription` parameter to `true` to retrieve spoken words from your video @@ -5874,7 +5860,7 @@ To retrieve embeddings for a video, it must be indexed using the Marengo video u
-**transcription:** `typing.Optional[bool]` — The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset. +**transcription:** `typing.Optional[bool]` — Specifies whether to retrieve a transcription of the spoken words.
@@ -5985,7 +5971,7 @@ client.indexes.indexed_assets.delete(
-Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null. +This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
@@ -6499,7 +6485,7 @@ client.indexes.videos.delete( This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method. -Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null. +This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
diff --git a/src/twelvelabs/assets/client.py b/src/twelvelabs/assets/client.py index c98547d..405c15a 100644 --- a/src/twelvelabs/assets/client.py +++ b/src/twelvelabs/assets/client.py @@ -44,10 +44,7 @@ def list( """ This method returns a list of assets in your account. - - - The platform returns your assets sorted by creation date, with the newest at the top of the list. - - The platform automatically deletes assets that are not associated with any entity after 72 hours. - + The platform returns your assets sorted by creation date, with the newest at the top of the list. Parameters ---------- @@ -111,7 +108,7 @@ def create( request_options: typing.Optional[RequestOptions] = None, ) -> Asset: """ - This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. + This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. **Supported content**: Video, audio, and images. @@ -138,9 +135,7 @@ def create( url : typing.Optional[str] Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`. - - URL uploads are limited to 4GB. - + URL uploads have a maximum limit of 4GB. filename : typing.Optional[str] The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL. @@ -260,10 +255,7 @@ async def list( """ This method returns a list of assets in your account. - - - The platform returns your assets sorted by creation date, with the newest at the top of the list. - - The platform automatically deletes assets that are not associated with any entity after 72 hours. - + The platform returns your assets sorted by creation date, with the newest at the top of the list. Parameters ---------- @@ -336,7 +328,7 @@ async def create( request_options: typing.Optional[RequestOptions] = None, ) -> Asset: """ - This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. + This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. **Supported content**: Video, audio, and images. @@ -363,9 +355,7 @@ async def create( url : typing.Optional[str] Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`. - - URL uploads are limited to 4GB. - + URL uploads have a maximum limit of 4GB. filename : typing.Optional[str] The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL. diff --git a/src/twelvelabs/assets/raw_client.py b/src/twelvelabs/assets/raw_client.py index fca924d..2f78edc 100644 --- a/src/twelvelabs/assets/raw_client.py +++ b/src/twelvelabs/assets/raw_client.py @@ -39,10 +39,7 @@ def list( """ This method returns a list of assets in your account. - - - The platform returns your assets sorted by creation date, with the newest at the top of the list. - - The platform automatically deletes assets that are not associated with any entity after 72 hours. - + The platform returns your assets sorted by creation date, with the newest at the top of the list. Parameters ---------- @@ -131,7 +128,7 @@ def create( request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[Asset]: """ - This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. + This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. **Supported content**: Video, audio, and images. @@ -158,9 +155,7 @@ def create( url : typing.Optional[str] Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`. - - URL uploads are limited to 4GB. - + URL uploads have a maximum limit of 4GB. filename : typing.Optional[str] The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL. @@ -323,10 +318,7 @@ async def list( """ This method returns a list of assets in your account. - - - The platform returns your assets sorted by creation date, with the newest at the top of the list. - - The platform automatically deletes assets that are not associated with any entity after 72 hours. - + The platform returns your assets sorted by creation date, with the newest at the top of the list. Parameters ---------- @@ -418,7 +410,7 @@ async def create( request_options: typing.Optional[RequestOptions] = None, ) -> AsyncHttpResponse[Asset]: """ - This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. + This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities. **Supported content**: Video, audio, and images. @@ -445,9 +437,7 @@ async def create( url : typing.Optional[str] Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`. - - URL uploads are limited to 4GB. - + URL uploads have a maximum limit of 4GB. filename : typing.Optional[str] The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL. diff --git a/src/twelvelabs/core/client_wrapper.py b/src/twelvelabs/core/client_wrapper.py index 434f508..16234c4 100644 --- a/src/twelvelabs/core/client_wrapper.py +++ b/src/twelvelabs/core/client_wrapper.py @@ -22,10 +22,10 @@ def __init__( def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { - "User-Agent": "twelvelabs/1.1.0", + "User-Agent": "twelvelabs/1.1.1", "X-Fern-Language": "Python", "X-Fern-SDK-Name": "twelvelabs", - "X-Fern-SDK-Version": "1.1.0", + "X-Fern-SDK-Version": "1.1.1", **(self.get_custom_headers() or {}), } headers["x-api-key"] = self.api_key diff --git a/src/twelvelabs/embed/tasks/client.py b/src/twelvelabs/embed/tasks/client.py index 458a52a..58bc869 100644 --- a/src/twelvelabs/embed/tasks/client.py +++ b/src/twelvelabs/embed/tasks/client.py @@ -190,7 +190,7 @@ def create( video_embedding_scope : typing.Optional[typing.List[TasksCreateRequestVideoEmbeddingScopeItem]] Defines the scope of video embedding generation. Valid values are the following: - `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`. - - `clip` and `video`: Creates embeddings for video segments and the entire video. + - `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance. To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below: @@ -507,7 +507,7 @@ async def create( video_embedding_scope : typing.Optional[typing.List[TasksCreateRequestVideoEmbeddingScopeItem]] Defines the scope of video embedding generation. Valid values are the following: - `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`. - - `clip` and `video`: Creates embeddings for video segments and the entire video. + - `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance. To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below: diff --git a/src/twelvelabs/embed/tasks/raw_client.py b/src/twelvelabs/embed/tasks/raw_client.py index 7b7ee34..6df36da 100644 --- a/src/twelvelabs/embed/tasks/raw_client.py +++ b/src/twelvelabs/embed/tasks/raw_client.py @@ -208,7 +208,7 @@ def create( video_embedding_scope : typing.Optional[typing.List[TasksCreateRequestVideoEmbeddingScopeItem]] Defines the scope of video embedding generation. Valid values are the following: - `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`. - - `clip` and `video`: Creates embeddings for video segments and the entire video. + - `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance. To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below: @@ -589,7 +589,7 @@ async def create( video_embedding_scope : typing.Optional[typing.List[TasksCreateRequestVideoEmbeddingScopeItem]] Defines the scope of video embedding generation. Valid values are the following: - `clip`: Creates embeddings for each video segment of `video_clip_length` seconds, from `video_start_offset_sec` to `video_end_offset_sec`. - - `clip` and `video`: Creates embeddings for video segments and the entire video. + - `clip` and `video`: Creates embeddings for video segments and the entire video. Use the `video` scope for videos up to 10-30 seconds to maintain optimal performance. To create embeddings for segments and the entire video in the same request, include this parameter twice as shown below: diff --git a/src/twelvelabs/embed/v_2/client.py b/src/twelvelabs/embed/v_2/client.py index 286e0dd..cd4ae28 100644 --- a/src/twelvelabs/embed/v_2/client.py +++ b/src/twelvelabs/embed/v_2/client.py @@ -38,7 +38,6 @@ def create( self, *, input_type: CreateEmbeddingsRequestInputType, - model_name: str, text: typing.Optional[TextInputRequest] = OMIT, image: typing.Optional[ImageInputRequest] = OMIT, text_image: typing.Optional[TextImageInputRequest] = OMIT, @@ -75,17 +74,22 @@ def create( - Maximum file size for base64 encoded strings: 36 MB - Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless) - Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Video resolution: 360x360 to 3840x2160 pixels + - Video resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 Parameters ---------- input_type : CreateEmbeddingsRequestInputType - The type of content for which you wish to create embeddings. + The type of content for the embeddings. - model_name : str - The video understanding model you wish to use. + + **Values**: + - `audio`: Creates embeddings for an audio file + - `video`: Creates embeddings for a video file + - `image`: Creates embeddings for an image file + - `text`: Creates embeddings for text input + - `text_image`: Creates embeddings for text and an image. text : typing.Optional[TextInputRequest] @@ -114,7 +118,6 @@ def create( ) client.embed.v_2.create( input_type="text", - model_name="marengo3.0", text=TextInputRequest( input_text="man walking a dog", ), @@ -122,7 +125,6 @@ def create( """ _response = self._raw_client.create( input_type=input_type, - model_name=model_name, text=text, image=image, text_image=text_image, @@ -153,7 +155,6 @@ async def create( self, *, input_type: CreateEmbeddingsRequestInputType, - model_name: str, text: typing.Optional[TextInputRequest] = OMIT, image: typing.Optional[ImageInputRequest] = OMIT, text_image: typing.Optional[TextImageInputRequest] = OMIT, @@ -190,17 +191,22 @@ async def create( - Maximum file size for base64 encoded strings: 36 MB - Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless) - Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Video resolution: 360x360 to 3840x2160 pixels + - Video resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 Parameters ---------- input_type : CreateEmbeddingsRequestInputType - The type of content for which you wish to create embeddings. + The type of content for the embeddings. + - model_name : str - The video understanding model you wish to use. + **Values**: + - `audio`: Creates embeddings for an audio file + - `video`: Creates embeddings for a video file + - `image`: Creates embeddings for an image file + - `text`: Creates embeddings for text input + - `text_image`: Creates embeddings for text and an image. text : typing.Optional[TextInputRequest] @@ -234,7 +240,6 @@ async def create( async def main() -> None: await client.embed.v_2.create( input_type="text", - model_name="marengo3.0", text=TextInputRequest( input_text="man walking a dog", ), @@ -245,7 +250,6 @@ async def main() -> None: """ _response = await self._raw_client.create( input_type=input_type, - model_name=model_name, text=text, image=image, text_image=text_image, diff --git a/src/twelvelabs/embed/v_2/raw_client.py b/src/twelvelabs/embed/v_2/raw_client.py index 4cd1b47..9be40b1 100644 --- a/src/twelvelabs/embed/v_2/raw_client.py +++ b/src/twelvelabs/embed/v_2/raw_client.py @@ -32,7 +32,6 @@ def create( self, *, input_type: CreateEmbeddingsRequestInputType, - model_name: str, text: typing.Optional[TextInputRequest] = OMIT, image: typing.Optional[ImageInputRequest] = OMIT, text_image: typing.Optional[TextImageInputRequest] = OMIT, @@ -69,17 +68,22 @@ def create( - Maximum file size for base64 encoded strings: 36 MB - Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless) - Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Video resolution: 360x360 to 3840x2160 pixels + - Video resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 Parameters ---------- input_type : CreateEmbeddingsRequestInputType - The type of content for which you wish to create embeddings. + The type of content for the embeddings. - model_name : str - The video understanding model you wish to use. + + **Values**: + - `audio`: Creates embeddings for an audio file + - `video`: Creates embeddings for a video file + - `image`: Creates embeddings for an image file + - `text`: Creates embeddings for text input + - `text_image`: Creates embeddings for text and an image. text : typing.Optional[TextInputRequest] @@ -104,7 +108,6 @@ def create( method="POST", json={ "input_type": input_type, - "model_name": model_name, "text": convert_and_respect_annotation_metadata( object_=text, annotation=TextInputRequest, direction="write" ), @@ -120,6 +123,7 @@ def create( "video": convert_and_respect_annotation_metadata( object_=video, annotation=VideoInputRequest, direction="write" ), + "model_name": "marengo3.0", }, headers={ "content-type": "application/json", @@ -184,7 +188,6 @@ async def create( self, *, input_type: CreateEmbeddingsRequestInputType, - model_name: str, text: typing.Optional[TextInputRequest] = OMIT, image: typing.Optional[ImageInputRequest] = OMIT, text_image: typing.Optional[TextImageInputRequest] = OMIT, @@ -221,17 +224,22 @@ async def create( - Maximum file size for base64 encoded strings: 36 MB - Audio formats: WAV (uncompressed), MP3 (lossy), FLAC (lossless) - Video formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Video resolution: 360x360 to 3840x2160 pixels + - Video resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 Parameters ---------- input_type : CreateEmbeddingsRequestInputType - The type of content for which you wish to create embeddings. + The type of content for the embeddings. + - model_name : str - The video understanding model you wish to use. + **Values**: + - `audio`: Creates embeddings for an audio file + - `video`: Creates embeddings for a video file + - `image`: Creates embeddings for an image file + - `text`: Creates embeddings for text input + - `text_image`: Creates embeddings for text and an image. text : typing.Optional[TextInputRequest] @@ -256,7 +264,6 @@ async def create( method="POST", json={ "input_type": input_type, - "model_name": model_name, "text": convert_and_respect_annotation_metadata( object_=text, annotation=TextInputRequest, direction="write" ), @@ -272,6 +279,7 @@ async def create( "video": convert_and_respect_annotation_metadata( object_=video, annotation=VideoInputRequest, direction="write" ), + "model_name": "marengo3.0", }, headers={ "content-type": "application/json", diff --git a/src/twelvelabs/embed/v_2/tasks/client.py b/src/twelvelabs/embed/v_2/tasks/client.py index e8e681a..ae311bb 100644 --- a/src/twelvelabs/embed/v_2/tasks/client.py +++ b/src/twelvelabs/embed/v_2/tasks/client.py @@ -110,7 +110,6 @@ def create( self, *, input_type: CreateAsyncEmbeddingRequestInputType, - model_name: str, audio: typing.Optional[AudioInputRequest] = OMIT, video: typing.Optional[VideoInputRequest] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -132,7 +131,7 @@ def create( - Maximum duration: 4 hours - Maximum file size: 4 GB - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Resolution: 360x360 to 3840x2160 pixels + - Resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 **Audio**: @@ -151,15 +150,12 @@ def create( Parameters ---------- input_type : CreateAsyncEmbeddingRequestInputType - The type of content for which you wish to create embeddings. + The type of content for the embeddings. **Values**: - `audio`: Audio files - `video`: Video content - model_name : str - The model you wish to use. - audio : typing.Optional[AudioInputRequest] video : typing.Optional[VideoInputRequest] @@ -187,7 +183,6 @@ def create( ) client.embed.v_2.tasks.create( input_type="audio", - model_name="marengo3.0", audio=AudioInputRequest( media_source=MediaSource( url="https://user-bucket.com/audio/long-audio.wav", @@ -205,7 +200,7 @@ def create( ) """ _response = self._raw_client.create( - input_type=input_type, model_name=model_name, audio=audio, video=video, request_options=request_options + input_type=input_type, audio=audio, video=video, request_options=request_options ) return _response.data @@ -352,7 +347,6 @@ async def create( self, *, input_type: CreateAsyncEmbeddingRequestInputType, - model_name: str, audio: typing.Optional[AudioInputRequest] = OMIT, video: typing.Optional[VideoInputRequest] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -374,7 +368,7 @@ async def create( - Maximum duration: 4 hours - Maximum file size: 4 GB - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Resolution: 360x360 to 3840x2160 pixels + - Resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 **Audio**: @@ -393,15 +387,12 @@ async def create( Parameters ---------- input_type : CreateAsyncEmbeddingRequestInputType - The type of content for which you wish to create embeddings. + The type of content for the embeddings. **Values**: - `audio`: Audio files - `video`: Video content - model_name : str - The model you wish to use. - audio : typing.Optional[AudioInputRequest] video : typing.Optional[VideoInputRequest] @@ -434,7 +425,6 @@ async def create( async def main() -> None: await client.embed.v_2.tasks.create( input_type="audio", - model_name="marengo3.0", audio=AudioInputRequest( media_source=MediaSource( url="https://user-bucket.com/audio/long-audio.wav", @@ -455,7 +445,7 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._raw_client.create( - input_type=input_type, model_name=model_name, audio=audio, video=video, request_options=request_options + input_type=input_type, audio=audio, video=video, request_options=request_options ) return _response.data diff --git a/src/twelvelabs/embed/v_2/tasks/raw_client.py b/src/twelvelabs/embed/v_2/tasks/raw_client.py index c559a01..3fc3448 100644 --- a/src/twelvelabs/embed/v_2/tasks/raw_client.py +++ b/src/twelvelabs/embed/v_2/tasks/raw_client.py @@ -131,7 +131,6 @@ def create( self, *, input_type: CreateAsyncEmbeddingRequestInputType, - model_name: str, audio: typing.Optional[AudioInputRequest] = OMIT, video: typing.Optional[VideoInputRequest] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -153,7 +152,7 @@ def create( - Maximum duration: 4 hours - Maximum file size: 4 GB - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Resolution: 360x360 to 3840x2160 pixels + - Resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 **Audio**: @@ -172,15 +171,12 @@ def create( Parameters ---------- input_type : CreateAsyncEmbeddingRequestInputType - The type of content for which you wish to create embeddings. + The type of content for the embeddings. **Values**: - `audio`: Audio files - `video`: Video content - model_name : str - The model you wish to use. - audio : typing.Optional[AudioInputRequest] video : typing.Optional[VideoInputRequest] @@ -198,13 +194,13 @@ def create( method="POST", json={ "input_type": input_type, - "model_name": model_name, "audio": convert_and_respect_annotation_metadata( object_=audio, annotation=AudioInputRequest, direction="write" ), "video": convert_and_respect_annotation_metadata( object_=video, annotation=VideoInputRequest, direction="write" ), + "model_name": "marengo3.0", }, headers={ "content-type": "application/json", @@ -415,7 +411,6 @@ async def create( self, *, input_type: CreateAsyncEmbeddingRequestInputType, - model_name: str, audio: typing.Optional[AudioInputRequest] = OMIT, video: typing.Optional[VideoInputRequest] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -437,7 +432,7 @@ async def create( - Maximum duration: 4 hours - Maximum file size: 4 GB - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) - - Resolution: 360x360 to 3840x2160 pixels + - Resolution: 360x360 to 5184x2160 pixels - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1 **Audio**: @@ -456,15 +451,12 @@ async def create( Parameters ---------- input_type : CreateAsyncEmbeddingRequestInputType - The type of content for which you wish to create embeddings. + The type of content for the embeddings. **Values**: - `audio`: Audio files - `video`: Video content - model_name : str - The model you wish to use. - audio : typing.Optional[AudioInputRequest] video : typing.Optional[VideoInputRequest] @@ -482,13 +474,13 @@ async def create( method="POST", json={ "input_type": input_type, - "model_name": model_name, "audio": convert_and_respect_annotation_metadata( object_=audio, annotation=AudioInputRequest, direction="write" ), "video": convert_and_respect_annotation_metadata( object_=video, annotation=VideoInputRequest, direction="write" ), + "model_name": "marengo3.0", }, headers={ "content-type": "application/json", diff --git a/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py index 064a40b..0210d31 100644 --- a/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py +++ b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py @@ -22,7 +22,7 @@ class TasksCreateResponse(UniversalBaseModel): data: typing.Optional[typing.List[EmbeddingData]] = pydantic.Field(default=None) """ - Array of embedding results (only when status is ready) + An array of embedding results when `status` is `ready`, or `null` when `status` is `processing` or `failed`. """ if IS_PYDANTIC_V2: diff --git a/src/twelvelabs/indexes/indexed_assets/client.py b/src/twelvelabs/indexes/indexed_assets/client.py index 34e00c8..2c846cd 100644 --- a/src/twelvelabs/indexes/indexed_assets/client.py +++ b/src/twelvelabs/indexes/indexed_assets/client.py @@ -122,7 +122,7 @@ def list( Filter by size. Expressed in bytes. created_at : typing.Optional[str] - Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time. + Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time. updated_at : typing.Optional[str] This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time. @@ -263,20 +263,20 @@ def retrieve( """ This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription. - **Common use cases**: + Use this method to: - - Monitor indexing progress: - - Call this endpoint after creating an indexed asset - - Check the `status` field until it shows `ready` - - Once ready, your content is available for search and analysis + - Monitor the indexing progress: + - Call this endpoint after creating an indexed asset + - Check the `status` field until it shows `ready` + - Once ready, your content is available for search and analysis - - Retrieve asset metadata: - - Retrieve system metadata (duration, resolution, filename) - - Access user-defined metadata + - Retrieve the asset metadata: + - Retrieve system metadata (duration, resolution, filename) + - Access user-defined metadata - - Retrieve embeddings: - - Include the `embedding_option` parameter to retrieve video embeddings - - Requires the Marengo video understanding model to be enabled in your index + - Retrieve the embeddings: + - Include the `embeddingOption` parameter to retrieve video embeddings + - Requires the Marengo video understanding model to be enabled in your index - Retrieve transcriptions: - Set the `transcription` parameter to `true` to retrieve spoken words from your video @@ -301,7 +301,7 @@ def retrieve( transcription : typing.Optional[bool] - The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset. + Specifies whether to retrieve a transcription of the spoken words. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -378,7 +378,7 @@ def update( request_options: typing.Optional[RequestOptions] = None, ) -> None: """ - Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null. + This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`. Parameters ---------- @@ -525,7 +525,7 @@ async def list( Filter by size. Expressed in bytes. created_at : typing.Optional[str] - Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time. + Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time. updated_at : typing.Optional[str] This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time. @@ -683,20 +683,20 @@ async def retrieve( """ This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription. - **Common use cases**: + Use this method to: - - Monitor indexing progress: - - Call this endpoint after creating an indexed asset - - Check the `status` field until it shows `ready` - - Once ready, your content is available for search and analysis + - Monitor the indexing progress: + - Call this endpoint after creating an indexed asset + - Check the `status` field until it shows `ready` + - Once ready, your content is available for search and analysis - - Retrieve asset metadata: - - Retrieve system metadata (duration, resolution, filename) - - Access user-defined metadata + - Retrieve the asset metadata: + - Retrieve system metadata (duration, resolution, filename) + - Access user-defined metadata - - Retrieve embeddings: - - Include the `embedding_option` parameter to retrieve video embeddings - - Requires the Marengo video understanding model to be enabled in your index + - Retrieve the embeddings: + - Include the `embeddingOption` parameter to retrieve video embeddings + - Requires the Marengo video understanding model to be enabled in your index - Retrieve transcriptions: - Set the `transcription` parameter to `true` to retrieve spoken words from your video @@ -721,7 +721,7 @@ async def retrieve( transcription : typing.Optional[bool] - The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset. + Specifies whether to retrieve a transcription of the spoken words. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -814,7 +814,7 @@ async def update( request_options: typing.Optional[RequestOptions] = None, ) -> None: """ - Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null. + This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`. Parameters ---------- diff --git a/src/twelvelabs/indexes/indexed_assets/raw_client.py b/src/twelvelabs/indexes/indexed_assets/raw_client.py index f82e1b6..d656030 100644 --- a/src/twelvelabs/indexes/indexed_assets/raw_client.py +++ b/src/twelvelabs/indexes/indexed_assets/raw_client.py @@ -120,7 +120,7 @@ def list( Filter by size. Expressed in bytes. created_at : typing.Optional[str] - Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time. + Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time. updated_at : typing.Optional[str] This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time. @@ -333,20 +333,20 @@ def retrieve( """ This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription. - **Common use cases**: + Use this method to: - - Monitor indexing progress: - - Call this endpoint after creating an indexed asset - - Check the `status` field until it shows `ready` - - Once ready, your content is available for search and analysis + - Monitor the indexing progress: + - Call this endpoint after creating an indexed asset + - Check the `status` field until it shows `ready` + - Once ready, your content is available for search and analysis - - Retrieve asset metadata: - - Retrieve system metadata (duration, resolution, filename) - - Access user-defined metadata + - Retrieve the asset metadata: + - Retrieve system metadata (duration, resolution, filename) + - Access user-defined metadata - - Retrieve embeddings: - - Include the `embedding_option` parameter to retrieve video embeddings - - Requires the Marengo video understanding model to be enabled in your index + - Retrieve the embeddings: + - Include the `embeddingOption` parameter to retrieve video embeddings + - Requires the Marengo video understanding model to be enabled in your index - Retrieve transcriptions: - Set the `transcription` parameter to `true` to retrieve spoken words from your video @@ -371,7 +371,7 @@ def retrieve( transcription : typing.Optional[bool] - The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset. + Specifies whether to retrieve a transcription of the spoken words. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -481,7 +481,7 @@ def update( request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[None]: """ - Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null. + This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`. Parameters ---------- @@ -625,7 +625,7 @@ async def list( Filter by size. Expressed in bytes. created_at : typing.Optional[str] - Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time. + Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time. updated_at : typing.Optional[str] This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time. @@ -841,20 +841,20 @@ async def retrieve( """ This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription. - **Common use cases**: + Use this method to: - - Monitor indexing progress: - - Call this endpoint after creating an indexed asset - - Check the `status` field until it shows `ready` - - Once ready, your content is available for search and analysis + - Monitor the indexing progress: + - Call this endpoint after creating an indexed asset + - Check the `status` field until it shows `ready` + - Once ready, your content is available for search and analysis - - Retrieve asset metadata: - - Retrieve system metadata (duration, resolution, filename) - - Access user-defined metadata + - Retrieve the asset metadata: + - Retrieve system metadata (duration, resolution, filename) + - Access user-defined metadata - - Retrieve embeddings: - - Include the `embedding_option` parameter to retrieve video embeddings - - Requires the Marengo video understanding model to be enabled in your index + - Retrieve the embeddings: + - Include the `embeddingOption` parameter to retrieve video embeddings + - Requires the Marengo video understanding model to be enabled in your index - Retrieve transcriptions: - Set the `transcription` parameter to `true` to retrieve spoken words from your video @@ -879,7 +879,7 @@ async def retrieve( transcription : typing.Optional[bool] - The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset. + Specifies whether to retrieve a transcription of the spoken words. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -989,7 +989,7 @@ async def update( request_options: typing.Optional[RequestOptions] = None, ) -> AsyncHttpResponse[None]: """ - Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null. + This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`. Parameters ---------- diff --git a/src/twelvelabs/indexes/videos/client.py b/src/twelvelabs/indexes/videos/client.py index c2273e2..3ef476e 100644 --- a/src/twelvelabs/indexes/videos/client.py +++ b/src/twelvelabs/indexes/videos/client.py @@ -289,7 +289,7 @@ def update( """ This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method. - Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null. + This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`. Parameters ---------- @@ -632,7 +632,7 @@ async def update( """ This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method. - Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null. + This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`. Parameters ---------- diff --git a/src/twelvelabs/indexes/videos/raw_client.py b/src/twelvelabs/indexes/videos/raw_client.py index 455986f..6f8c38b 100644 --- a/src/twelvelabs/indexes/videos/raw_client.py +++ b/src/twelvelabs/indexes/videos/raw_client.py @@ -348,7 +348,7 @@ def update( """ This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method. - Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null. + This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`. Parameters ---------- @@ -726,7 +726,7 @@ async def update( """ This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method. - Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null. + This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`. Parameters ---------- diff --git a/src/twelvelabs/multipart_upload/client.py b/src/twelvelabs/multipart_upload/client.py index 569e97d..3c74333 100644 --- a/src/twelvelabs/multipart_upload/client.py +++ b/src/twelvelabs/multipart_upload/client.py @@ -102,7 +102,7 @@ def create( Parameters ---------- filename : str - Original filename of the asset + The original file name of the asset. total_size : int The total size of the file in bytes. The platform uses this value to: @@ -144,14 +144,14 @@ def get_status( """ This method provides information about an upload session, including its current status, chunk-level progress, and completion state. - Use this endpoint to: + Use this method to: - Verify upload completion (`status` = `completed`) - Identify any failed chunks that require a retry - Monitor the upload progress by comparing `uploaded_size` with `total_size` - Determine if the session has expired - Retrieve the status information for each chunk - You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. + You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. Parameters ---------- @@ -205,11 +205,10 @@ def report_chunk_batch( request_options: typing.Optional[RequestOptions] = None, ) -> ReportChunkBatchResponse: """ - This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload. + This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks. + - For optimal performance, report chunks in batches and in any order. - Parameters ---------- @@ -393,7 +392,7 @@ async def create( Parameters ---------- filename : str - Original filename of the asset + The original file name of the asset. total_size : int The total size of the file in bytes. The platform uses this value to: @@ -445,14 +444,14 @@ async def get_status( """ This method provides information about an upload session, including its current status, chunk-level progress, and completion state. - Use this endpoint to: + Use this method to: - Verify upload completion (`status` = `completed`) - Identify any failed chunks that require a retry - Monitor the upload progress by comparing `uploaded_size` with `total_size` - Determine if the session has expired - Retrieve the status information for each chunk - You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. + You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. Parameters ---------- @@ -517,11 +516,10 @@ async def report_chunk_batch( request_options: typing.Optional[RequestOptions] = None, ) -> ReportChunkBatchResponse: """ - This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload. + This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks. + - For optimal performance, report chunks in batches and in any order. - Parameters ---------- diff --git a/src/twelvelabs/multipart_upload/raw_client.py b/src/twelvelabs/multipart_upload/raw_client.py index 35e4a10..37df0b5 100644 --- a/src/twelvelabs/multipart_upload/raw_client.py +++ b/src/twelvelabs/multipart_upload/raw_client.py @@ -149,7 +149,7 @@ def create( Parameters ---------- filename : str - Original filename of the asset + The original file name of the asset. total_size : int The total size of the file in bytes. The platform uses this value to: @@ -238,14 +238,14 @@ def get_status( """ This method provides information about an upload session, including its current status, chunk-level progress, and completion state. - Use this endpoint to: + Use this method to: - Verify upload completion (`status` = `completed`) - Identify any failed chunks that require a retry - Monitor the upload progress by comparing `uploaded_size` with `total_size` - Determine if the session has expired - Retrieve the status information for each chunk - You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. + You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. Parameters ---------- @@ -359,11 +359,10 @@ def report_chunk_batch( request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[ReportChunkBatchResponse]: """ - This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload. + This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks. + - For optimal performance, report chunks in batches and in any order. - Parameters ---------- @@ -670,7 +669,7 @@ async def create( Parameters ---------- filename : str - Original filename of the asset + The original file name of the asset. total_size : int The total size of the file in bytes. The platform uses this value to: @@ -759,14 +758,14 @@ async def get_status( """ This method provides information about an upload session, including its current status, chunk-level progress, and completion state. - Use this endpoint to: + Use this method to: - Verify upload completion (`status` = `completed`) - Identify any failed chunks that require a retry - Monitor the upload progress by comparing `uploaded_size` with `total_size` - Determine if the session has expired - Retrieve the status information for each chunk - You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. + You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset. Parameters ---------- @@ -883,11 +882,10 @@ async def report_chunk_batch( request_options: typing.Optional[RequestOptions] = None, ) -> AsyncHttpResponse[ReportChunkBatchResponse]: """ - This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload. + This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks. + - For optimal performance, report chunks in batches and in any order. - Parameters ---------- diff --git a/src/twelvelabs/types/asset.py b/src/twelvelabs/types/asset.py index 97e6e26..816784c 100644 --- a/src/twelvelabs/types/asset.py +++ b/src/twelvelabs/types/asset.py @@ -32,10 +32,10 @@ class Asset(UniversalBaseModel): status: typing.Optional[AssetStatus] = pydantic.Field(default=None) """ - Indicates the current state of the asset. + Indicates the current status of the asset. **Values**: - - `waiting`: The platform is preparing to process the upload + - `failed`: The platform failed to process the upload - `processing`: The platform is processing the uploaded file - `ready`: The asset is ready to use """ @@ -50,20 +50,6 @@ class Asset(UniversalBaseModel): The MIME type of the asset file. """ - url: typing.Optional[str] = pydantic.Field(default=None) - """ - The URL where you can access the asset file. Use this URL to preview or download the asset. - - - This URL expires after the time specified in the `url_expires_at` field. After expiration, you must retrieve the asset again to obtain a new URL. - - """ - - url_expires_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - The date and time, in RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"), when the URL expires. After this time, the URL in the `url` field becomes invalid. Retrieve the asset again to obtain a new URL. - """ - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) """ The date and time, in RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"), when the asset was created. diff --git a/src/twelvelabs/types/asset_status.py b/src/twelvelabs/types/asset_status.py index afbbc51..80a1487 100644 --- a/src/twelvelabs/types/asset_status.py +++ b/src/twelvelabs/types/asset_status.py @@ -2,4 +2,4 @@ import typing -AssetStatus = typing.Union[typing.Literal["waiting", "processing", "ready"], typing.Any] +AssetStatus = typing.Union[typing.Literal["failed", "processing", "ready"], typing.Any] diff --git a/src/twelvelabs/types/create_asset_upload_response.py b/src/twelvelabs/types/create_asset_upload_response.py index ba9364c..43bbc80 100644 --- a/src/twelvelabs/types/create_asset_upload_response.py +++ b/src/twelvelabs/types/create_asset_upload_response.py @@ -26,12 +26,11 @@ class CreateAssetUploadResponse(UniversalBaseModel): upload_urls: typing.Optional[typing.List[PresignedUrlChunk]] = pydantic.Field(default=None) """ - The initial set of presigned URLs for uploading chunks. Each URL corresponds to a specific chunk. + An array containing the initial set of presigned URLs for uploading chunks. Each URL corresponds to a specific chunk. - Date: Fri, 12 Dec 2025 21:40:44 +0900 Subject: [PATCH 2/2] feat: update --- reference.md | 32 ++++++++++++++++++- src/twelvelabs/__init__.py | 10 ++++++ src/twelvelabs/base_client.py | 4 +++ src/twelvelabs/embed/__init__.py | 3 +- src/twelvelabs/embed/v_2/__init__.py | 7 +++- src/twelvelabs/embed/v_2/client.py | 13 ++++++++ src/twelvelabs/embed/v_2/raw_client.py | 13 ++++++-- src/twelvelabs/embed/v_2/tasks/__init__.py | 4 +++ src/twelvelabs/embed/v_2/tasks/client.py | 17 ++++++++-- src/twelvelabs/embed/v_2/tasks/raw_client.py | 13 ++++++-- .../embed/v_2/tasks/types/__init__.py | 4 +++ ...eate_async_embedding_request_model_name.py | 5 +++ .../v_2/tasks/types/tasks_create_response.py | 3 +- .../types/tasks_create_response_status.py | 5 +++ src/twelvelabs/embed/v_2/types/__init__.py | 3 +- .../create_embeddings_request_model_name.py | 5 +++ src/twelvelabs/multipart_upload/__init__.py | 3 ++ src/twelvelabs/multipart_upload/client.py | 31 +++++++++++++++--- src/twelvelabs/multipart_upload/raw_client.py | 25 ++++++++++++--- .../multipart_upload/types/__init__.py | 7 ++++ .../types/create_asset_upload_request_type.py | 5 +++ src/twelvelabs/search/__init__.py | 2 ++ src/twelvelabs/search/client.py | 9 +++--- src/twelvelabs/search/raw_client.py | 9 +++--- src/twelvelabs/search/types/__init__.py | 2 ++ .../search_create_request_query_media_type.py | 5 +++ src/twelvelabs/types/__init__.py | 6 ++++ src/twelvelabs/types/audio_segmentation.py | 3 +- .../types/audio_segmentation_strategy.py | 5 +++ src/twelvelabs/types/completed_chunk.py | 3 +- .../types/completed_chunk_proof_type.py | 5 +++ src/twelvelabs/types/response_format.py | 3 +- src/twelvelabs/types/response_format_type.py | 5 +++ 33 files changed, 239 insertions(+), 30 deletions(-) create mode 100644 src/twelvelabs/embed/v_2/tasks/types/create_async_embedding_request_model_name.py create mode 100644 src/twelvelabs/embed/v_2/tasks/types/tasks_create_response_status.py create mode 100644 src/twelvelabs/embed/v_2/types/create_embeddings_request_model_name.py create mode 100644 src/twelvelabs/multipart_upload/types/__init__.py create mode 100644 src/twelvelabs/multipart_upload/types/create_asset_upload_request_type.py create mode 100644 src/twelvelabs/search/types/search_create_request_query_media_type.py create mode 100644 src/twelvelabs/types/audio_segmentation_strategy.py create mode 100644 src/twelvelabs/types/completed_chunk_proof_type.py create mode 100644 src/twelvelabs/types/response_format_type.py diff --git a/reference.md b/reference.md index c2b5355..1547bf0 100644 --- a/reference.md +++ b/reference.md @@ -400,6 +400,7 @@ response = client.analyze_stream( prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.", temperature=0.2, response_format=ResponseFormat( + type="json_schema", json_schema={ "type": "object", "properties": { @@ -541,6 +542,7 @@ client.analyze( prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.", temperature=0.2, response_format=ResponseFormat( + type="json_schema", json_schema={ "type": "object", "properties": { @@ -2125,6 +2127,7 @@ client = TwelveLabs( ) client.multipart_upload.create( filename="my-video.mp4", + type="video", total_size=104857600, ) @@ -2150,6 +2153,14 @@ client.multipart_upload.create(
+**type:** `CreateAssetUploadRequestType` — The type of asset you want to upload. + +
+
+ +
+
+ **total_size:** `int` The total size of the file in bytes. The platform uses this value to: @@ -2327,6 +2338,7 @@ client.multipart_upload.report_chunk_batch( CompletedChunk( chunk_index=1, proof="d41d8cd98f00b204e9800998ecf8427e", + proof_type="etag", chunk_size=5242880, ) ], @@ -3333,7 +3345,7 @@ For detailed guidance and version-specific behavior, see the [Search options](/v
-**query_media_type:** `typing.Optional[typing.Literal["image"]]` — The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. +**query_media_type:** `typing.Optional[SearchCreateRequestQueryMediaType]` — The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search.
@@ -4172,6 +4184,7 @@ client = TwelveLabs( ) client.embed.v_2.create( input_type="text", + model_name="marengo3.0", text=TextInputRequest( input_text="man walking a dog", ), @@ -4209,6 +4222,14 @@ The type of content for the embeddings.
+**model_name:** `CreateEmbeddingsRequestModelName` — The video understanding model to use. Only "marengo3.0" is supported. + +
+
+ +
+
+ **text:** `typing.Optional[TextInputRequest]`
@@ -4456,6 +4477,7 @@ client = TwelveLabs( ) client.embed.v_2.tasks.create( input_type="video", + model_name="marengo3.0", video=VideoInputRequest( media_source=MediaSource( url="https://user-bucket.com/video/long-video.mp4", @@ -4500,6 +4522,14 @@ The type of content for the embeddings.
+**model_name:** `CreateAsyncEmbeddingRequestModelName` — The model you wish to use. Only `"marengo3.0"` is supported. + +
+
+ +
+
+ **audio:** `typing.Optional[AudioInputRequest]`
diff --git a/src/twelvelabs/__init__.py b/src/twelvelabs/__init__.py index f9426c8..5df6c68 100644 --- a/src/twelvelabs/__init__.py +++ b/src/twelvelabs/__init__.py @@ -14,6 +14,7 @@ AudioSegment, AudioSegmentation, AudioSegmentationFixed, + AudioSegmentationStrategy, BadRequestErrorBody, BaseEmbeddingMetadata, BaseSegment, @@ -23,6 +24,7 @@ ChunkInfo, ChunkInfoStatus, CompletedChunk, + CompletedChunkProofType, Confidence, CreateAssetUploadResponse, CreatedAt, @@ -97,6 +99,7 @@ ReportChunkBatchResponse, RequestAdditionalPresignedUrLsResponse, ResponseFormat, + ResponseFormatType, ScoreSearchTerms, SearchItem, SearchItemClipsItem, @@ -165,9 +168,11 @@ from .environment import TwelveLabsEnvironment from .indexes import IndexesCreateRequestModelsItem, IndexesCreateResponse, IndexesListResponse from .manage_entities import ListAllEntitiesRequestSortBy, ListAllEntitiesRequestStatus, ListAllEntitiesResponse +from .multipart_upload import CreateAssetUploadRequestType from .search import ( SearchCreateRequestGroupBy, SearchCreateRequestOperator, + SearchCreateRequestQueryMediaType, SearchCreateRequestSearchOptionsItem, SearchCreateRequestSortOption, SearchCreateRequestTranscriptionOptionsItem, @@ -199,6 +204,7 @@ "AudioSegment", "AudioSegmentation", "AudioSegmentationFixed", + "AudioSegmentationStrategy", "BadRequestError", "BadRequestErrorBody", "BaseEmbeddingMetadata", @@ -209,7 +215,9 @@ "ChunkInfo", "ChunkInfoStatus", "CompletedChunk", + "CompletedChunkProofType", "Confidence", + "CreateAssetUploadRequestType", "CreateAssetUploadResponse", "CreatedAt", "EmbeddingAudioMetadata", @@ -294,9 +302,11 @@ "ReportChunkBatchResponse", "RequestAdditionalPresignedUrLsResponse", "ResponseFormat", + "ResponseFormatType", "ScoreSearchTerms", "SearchCreateRequestGroupBy", "SearchCreateRequestOperator", + "SearchCreateRequestQueryMediaType", "SearchCreateRequestSearchOptionsItem", "SearchCreateRequestSortOption", "SearchCreateRequestTranscriptionOptionsItem", diff --git a/src/twelvelabs/base_client.py b/src/twelvelabs/base_client.py index 54a32c8..60396a3 100644 --- a/src/twelvelabs/base_client.py +++ b/src/twelvelabs/base_client.py @@ -400,6 +400,7 @@ def analyze_stream( prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.", temperature=0.2, response_format=ResponseFormat( + type="json_schema", json_schema={ "type": "object", "properties": { @@ -493,6 +494,7 @@ def analyze( prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.", temperature=0.2, response_format=ResponseFormat( + type="json_schema", json_schema={ "type": "object", "properties": { @@ -918,6 +920,7 @@ async def main() -> None: prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.", temperature=0.2, response_format=ResponseFormat( + type="json_schema", json_schema={ "type": "object", "properties": { @@ -1020,6 +1023,7 @@ async def main() -> None: prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.", temperature=0.2, response_format=ResponseFormat( + type="json_schema", json_schema={ "type": "object", "properties": { diff --git a/src/twelvelabs/embed/__init__.py b/src/twelvelabs/embed/__init__.py index 812e0ae..0a04482 100644 --- a/src/twelvelabs/embed/__init__.py +++ b/src/twelvelabs/embed/__init__.py @@ -14,10 +14,11 @@ TasksStatusResponse, TasksStatusResponseVideoEmbedding, ) -from .v_2 import CreateEmbeddingsRequestInputType +from .v_2 import CreateEmbeddingsRequestInputType, CreateEmbeddingsRequestModelName __all__ = [ "CreateEmbeddingsRequestInputType", + "CreateEmbeddingsRequestModelName", "TasksCreateRequestVideoEmbeddingScopeItem", "TasksCreateResponse", "TasksListResponse", diff --git a/src/twelvelabs/embed/v_2/__init__.py b/src/twelvelabs/embed/v_2/__init__.py index 6f7ed1b..00ee08e 100644 --- a/src/twelvelabs/embed/v_2/__init__.py +++ b/src/twelvelabs/embed/v_2/__init__.py @@ -2,19 +2,24 @@ # isort: skip_file -from .types import CreateEmbeddingsRequestInputType +from .types import CreateEmbeddingsRequestInputType, CreateEmbeddingsRequestModelName from . import tasks from .tasks import ( CreateAsyncEmbeddingRequestInputType, + CreateAsyncEmbeddingRequestModelName, TasksCreateResponse, + TasksCreateResponseStatus, TasksListResponse, TasksListResponsePageInfo, ) __all__ = [ "CreateAsyncEmbeddingRequestInputType", + "CreateAsyncEmbeddingRequestModelName", "CreateEmbeddingsRequestInputType", + "CreateEmbeddingsRequestModelName", "TasksCreateResponse", + "TasksCreateResponseStatus", "TasksListResponse", "TasksListResponsePageInfo", "tasks", diff --git a/src/twelvelabs/embed/v_2/client.py b/src/twelvelabs/embed/v_2/client.py index cd4ae28..3867073 100644 --- a/src/twelvelabs/embed/v_2/client.py +++ b/src/twelvelabs/embed/v_2/client.py @@ -13,6 +13,7 @@ from .raw_client import AsyncRawV2Client, RawV2Client from .tasks.client import AsyncTasksClient, TasksClient from .types.create_embeddings_request_input_type import CreateEmbeddingsRequestInputType +from .types.create_embeddings_request_model_name import CreateEmbeddingsRequestModelName # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -38,6 +39,7 @@ def create( self, *, input_type: CreateEmbeddingsRequestInputType, + model_name: CreateEmbeddingsRequestModelName, text: typing.Optional[TextInputRequest] = OMIT, image: typing.Optional[ImageInputRequest] = OMIT, text_image: typing.Optional[TextImageInputRequest] = OMIT, @@ -91,6 +93,9 @@ def create( - `text`: Creates embeddings for text input - `text_image`: Creates embeddings for text and an image. + model_name : CreateEmbeddingsRequestModelName + The video understanding model to use. Only "marengo3.0" is supported. + text : typing.Optional[TextInputRequest] image : typing.Optional[ImageInputRequest] @@ -118,6 +123,7 @@ def create( ) client.embed.v_2.create( input_type="text", + model_name="marengo3.0", text=TextInputRequest( input_text="man walking a dog", ), @@ -125,6 +131,7 @@ def create( """ _response = self._raw_client.create( input_type=input_type, + model_name=model_name, text=text, image=image, text_image=text_image, @@ -155,6 +162,7 @@ async def create( self, *, input_type: CreateEmbeddingsRequestInputType, + model_name: CreateEmbeddingsRequestModelName, text: typing.Optional[TextInputRequest] = OMIT, image: typing.Optional[ImageInputRequest] = OMIT, text_image: typing.Optional[TextImageInputRequest] = OMIT, @@ -208,6 +216,9 @@ async def create( - `text`: Creates embeddings for text input - `text_image`: Creates embeddings for text and an image. + model_name : CreateEmbeddingsRequestModelName + The video understanding model to use. Only "marengo3.0" is supported. + text : typing.Optional[TextInputRequest] image : typing.Optional[ImageInputRequest] @@ -240,6 +251,7 @@ async def create( async def main() -> None: await client.embed.v_2.create( input_type="text", + model_name="marengo3.0", text=TextInputRequest( input_text="man walking a dog", ), @@ -250,6 +262,7 @@ async def main() -> None: """ _response = await self._raw_client.create( input_type=input_type, + model_name=model_name, text=text, image=image, text_image=text_image, diff --git a/src/twelvelabs/embed/v_2/raw_client.py b/src/twelvelabs/embed/v_2/raw_client.py index 9be40b1..1cf647a 100644 --- a/src/twelvelabs/embed/v_2/raw_client.py +++ b/src/twelvelabs/embed/v_2/raw_client.py @@ -19,6 +19,7 @@ from ...types.text_input_request import TextInputRequest from ...types.video_input_request import VideoInputRequest from .types.create_embeddings_request_input_type import CreateEmbeddingsRequestInputType +from .types.create_embeddings_request_model_name import CreateEmbeddingsRequestModelName # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -32,6 +33,7 @@ def create( self, *, input_type: CreateEmbeddingsRequestInputType, + model_name: CreateEmbeddingsRequestModelName, text: typing.Optional[TextInputRequest] = OMIT, image: typing.Optional[ImageInputRequest] = OMIT, text_image: typing.Optional[TextImageInputRequest] = OMIT, @@ -85,6 +87,9 @@ def create( - `text`: Creates embeddings for text input - `text_image`: Creates embeddings for text and an image. + model_name : CreateEmbeddingsRequestModelName + The video understanding model to use. Only "marengo3.0" is supported. + text : typing.Optional[TextInputRequest] image : typing.Optional[ImageInputRequest] @@ -108,6 +113,7 @@ def create( method="POST", json={ "input_type": input_type, + "model_name": model_name, "text": convert_and_respect_annotation_metadata( object_=text, annotation=TextInputRequest, direction="write" ), @@ -123,7 +129,6 @@ def create( "video": convert_and_respect_annotation_metadata( object_=video, annotation=VideoInputRequest, direction="write" ), - "model_name": "marengo3.0", }, headers={ "content-type": "application/json", @@ -188,6 +193,7 @@ async def create( self, *, input_type: CreateEmbeddingsRequestInputType, + model_name: CreateEmbeddingsRequestModelName, text: typing.Optional[TextInputRequest] = OMIT, image: typing.Optional[ImageInputRequest] = OMIT, text_image: typing.Optional[TextImageInputRequest] = OMIT, @@ -241,6 +247,9 @@ async def create( - `text`: Creates embeddings for text input - `text_image`: Creates embeddings for text and an image. + model_name : CreateEmbeddingsRequestModelName + The video understanding model to use. Only "marengo3.0" is supported. + text : typing.Optional[TextInputRequest] image : typing.Optional[ImageInputRequest] @@ -264,6 +273,7 @@ async def create( method="POST", json={ "input_type": input_type, + "model_name": model_name, "text": convert_and_respect_annotation_metadata( object_=text, annotation=TextInputRequest, direction="write" ), @@ -279,7 +289,6 @@ async def create( "video": convert_and_respect_annotation_metadata( object_=video, annotation=VideoInputRequest, direction="write" ), - "model_name": "marengo3.0", }, headers={ "content-type": "application/json", diff --git a/src/twelvelabs/embed/v_2/tasks/__init__.py b/src/twelvelabs/embed/v_2/tasks/__init__.py index 29cfdb9..a5e489f 100644 --- a/src/twelvelabs/embed/v_2/tasks/__init__.py +++ b/src/twelvelabs/embed/v_2/tasks/__init__.py @@ -4,14 +4,18 @@ from .types import ( CreateAsyncEmbeddingRequestInputType, + CreateAsyncEmbeddingRequestModelName, TasksCreateResponse, + TasksCreateResponseStatus, TasksListResponse, TasksListResponsePageInfo, ) __all__ = [ "CreateAsyncEmbeddingRequestInputType", + "CreateAsyncEmbeddingRequestModelName", "TasksCreateResponse", + "TasksCreateResponseStatus", "TasksListResponse", "TasksListResponsePageInfo", ] diff --git a/src/twelvelabs/embed/v_2/tasks/client.py b/src/twelvelabs/embed/v_2/tasks/client.py index ae311bb..ac1a2c3 100644 --- a/src/twelvelabs/embed/v_2/tasks/client.py +++ b/src/twelvelabs/embed/v_2/tasks/client.py @@ -11,6 +11,7 @@ from ....types.video_input_request import VideoInputRequest from .raw_client import AsyncRawTasksClient, RawTasksClient from .types.create_async_embedding_request_input_type import CreateAsyncEmbeddingRequestInputType +from .types.create_async_embedding_request_model_name import CreateAsyncEmbeddingRequestModelName from .types.tasks_create_response import TasksCreateResponse # this is used as the default value for optional parameters @@ -110,6 +111,7 @@ def create( self, *, input_type: CreateAsyncEmbeddingRequestInputType, + model_name: CreateAsyncEmbeddingRequestModelName, audio: typing.Optional[AudioInputRequest] = OMIT, video: typing.Optional[VideoInputRequest] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -156,6 +158,9 @@ def create( - `audio`: Audio files - `video`: Video content + model_name : CreateAsyncEmbeddingRequestModelName + The model you wish to use. Only `"marengo3.0"` is supported. + audio : typing.Optional[AudioInputRequest] video : typing.Optional[VideoInputRequest] @@ -183,6 +188,7 @@ def create( ) client.embed.v_2.tasks.create( input_type="audio", + model_name="marengo3.0", audio=AudioInputRequest( media_source=MediaSource( url="https://user-bucket.com/audio/long-audio.wav", @@ -190,6 +196,7 @@ def create( start_sec=0.0, end_sec=3600.0, segmentation=AudioSegmentation( + strategy="fixed", fixed=AudioSegmentationFixed( duration_sec=6, ), @@ -200,7 +207,7 @@ def create( ) """ _response = self._raw_client.create( - input_type=input_type, audio=audio, video=video, request_options=request_options + input_type=input_type, model_name=model_name, audio=audio, video=video, request_options=request_options ) return _response.data @@ -347,6 +354,7 @@ async def create( self, *, input_type: CreateAsyncEmbeddingRequestInputType, + model_name: CreateAsyncEmbeddingRequestModelName, audio: typing.Optional[AudioInputRequest] = OMIT, video: typing.Optional[VideoInputRequest] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -393,6 +401,9 @@ async def create( - `audio`: Audio files - `video`: Video content + model_name : CreateAsyncEmbeddingRequestModelName + The model you wish to use. Only `"marengo3.0"` is supported. + audio : typing.Optional[AudioInputRequest] video : typing.Optional[VideoInputRequest] @@ -425,6 +436,7 @@ async def create( async def main() -> None: await client.embed.v_2.tasks.create( input_type="audio", + model_name="marengo3.0", audio=AudioInputRequest( media_source=MediaSource( url="https://user-bucket.com/audio/long-audio.wav", @@ -432,6 +444,7 @@ async def main() -> None: start_sec=0.0, end_sec=3600.0, segmentation=AudioSegmentation( + strategy="fixed", fixed=AudioSegmentationFixed( duration_sec=6, ), @@ -445,7 +458,7 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._raw_client.create( - input_type=input_type, audio=audio, video=video, request_options=request_options + input_type=input_type, model_name=model_name, audio=audio, video=video, request_options=request_options ) return _response.data diff --git a/src/twelvelabs/embed/v_2/tasks/raw_client.py b/src/twelvelabs/embed/v_2/tasks/raw_client.py index 3fc3448..881e04a 100644 --- a/src/twelvelabs/embed/v_2/tasks/raw_client.py +++ b/src/twelvelabs/embed/v_2/tasks/raw_client.py @@ -19,6 +19,7 @@ from ....types.media_embedding_task import MediaEmbeddingTask from ....types.video_input_request import VideoInputRequest from .types.create_async_embedding_request_input_type import CreateAsyncEmbeddingRequestInputType +from .types.create_async_embedding_request_model_name import CreateAsyncEmbeddingRequestModelName from .types.tasks_create_response import TasksCreateResponse from .types.tasks_list_response import TasksListResponse @@ -131,6 +132,7 @@ def create( self, *, input_type: CreateAsyncEmbeddingRequestInputType, + model_name: CreateAsyncEmbeddingRequestModelName, audio: typing.Optional[AudioInputRequest] = OMIT, video: typing.Optional[VideoInputRequest] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -177,6 +179,9 @@ def create( - `audio`: Audio files - `video`: Video content + model_name : CreateAsyncEmbeddingRequestModelName + The model you wish to use. Only `"marengo3.0"` is supported. + audio : typing.Optional[AudioInputRequest] video : typing.Optional[VideoInputRequest] @@ -194,13 +199,13 @@ def create( method="POST", json={ "input_type": input_type, + "model_name": model_name, "audio": convert_and_respect_annotation_metadata( object_=audio, annotation=AudioInputRequest, direction="write" ), "video": convert_and_respect_annotation_metadata( object_=video, annotation=VideoInputRequest, direction="write" ), - "model_name": "marengo3.0", }, headers={ "content-type": "application/json", @@ -411,6 +416,7 @@ async def create( self, *, input_type: CreateAsyncEmbeddingRequestInputType, + model_name: CreateAsyncEmbeddingRequestModelName, audio: typing.Optional[AudioInputRequest] = OMIT, video: typing.Optional[VideoInputRequest] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -457,6 +463,9 @@ async def create( - `audio`: Audio files - `video`: Video content + model_name : CreateAsyncEmbeddingRequestModelName + The model you wish to use. Only `"marengo3.0"` is supported. + audio : typing.Optional[AudioInputRequest] video : typing.Optional[VideoInputRequest] @@ -474,13 +483,13 @@ async def create( method="POST", json={ "input_type": input_type, + "model_name": model_name, "audio": convert_and_respect_annotation_metadata( object_=audio, annotation=AudioInputRequest, direction="write" ), "video": convert_and_respect_annotation_metadata( object_=video, annotation=VideoInputRequest, direction="write" ), - "model_name": "marengo3.0", }, headers={ "content-type": "application/json", diff --git a/src/twelvelabs/embed/v_2/tasks/types/__init__.py b/src/twelvelabs/embed/v_2/tasks/types/__init__.py index 66721fe..06e4df1 100644 --- a/src/twelvelabs/embed/v_2/tasks/types/__init__.py +++ b/src/twelvelabs/embed/v_2/tasks/types/__init__.py @@ -3,13 +3,17 @@ # isort: skip_file from .create_async_embedding_request_input_type import CreateAsyncEmbeddingRequestInputType +from .create_async_embedding_request_model_name import CreateAsyncEmbeddingRequestModelName from .tasks_create_response import TasksCreateResponse +from .tasks_create_response_status import TasksCreateResponseStatus from .tasks_list_response import TasksListResponse from .tasks_list_response_page_info import TasksListResponsePageInfo __all__ = [ "CreateAsyncEmbeddingRequestInputType", + "CreateAsyncEmbeddingRequestModelName", "TasksCreateResponse", + "TasksCreateResponseStatus", "TasksListResponse", "TasksListResponsePageInfo", ] diff --git a/src/twelvelabs/embed/v_2/tasks/types/create_async_embedding_request_model_name.py b/src/twelvelabs/embed/v_2/tasks/types/create_async_embedding_request_model_name.py new file mode 100644 index 0000000..7c19456 --- /dev/null +++ b/src/twelvelabs/embed/v_2/tasks/types/create_async_embedding_request_model_name.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateAsyncEmbeddingRequestModelName = typing.Union[typing.Literal["marengo3.0"], typing.Any] diff --git a/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py index 0210d31..95e8cb9 100644 --- a/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py +++ b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response.py @@ -7,6 +7,7 @@ from .....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .....core.serialization import FieldMetadata from .....types.embedding_data import EmbeddingData +from .tasks_create_response_status import TasksCreateResponseStatus class TasksCreateResponse(UniversalBaseModel): @@ -15,7 +16,7 @@ class TasksCreateResponse(UniversalBaseModel): The unique identifier of the embedding task """ - status: typing.Literal["processing"] = pydantic.Field(default="processing") + status: TasksCreateResponseStatus = pydantic.Field() """ The initial status of the embedding task. """ diff --git a/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response_status.py b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response_status.py new file mode 100644 index 0000000..8b1b643 --- /dev/null +++ b/src/twelvelabs/embed/v_2/tasks/types/tasks_create_response_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TasksCreateResponseStatus = typing.Union[typing.Literal["processing"], typing.Any] diff --git a/src/twelvelabs/embed/v_2/types/__init__.py b/src/twelvelabs/embed/v_2/types/__init__.py index f369e66..9efd359 100644 --- a/src/twelvelabs/embed/v_2/types/__init__.py +++ b/src/twelvelabs/embed/v_2/types/__init__.py @@ -3,5 +3,6 @@ # isort: skip_file from .create_embeddings_request_input_type import CreateEmbeddingsRequestInputType +from .create_embeddings_request_model_name import CreateEmbeddingsRequestModelName -__all__ = ["CreateEmbeddingsRequestInputType"] +__all__ = ["CreateEmbeddingsRequestInputType", "CreateEmbeddingsRequestModelName"] diff --git a/src/twelvelabs/embed/v_2/types/create_embeddings_request_model_name.py b/src/twelvelabs/embed/v_2/types/create_embeddings_request_model_name.py new file mode 100644 index 0000000..523db4c --- /dev/null +++ b/src/twelvelabs/embed/v_2/types/create_embeddings_request_model_name.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateEmbeddingsRequestModelName = typing.Union[typing.Literal["marengo3.0"], typing.Any] diff --git a/src/twelvelabs/multipart_upload/__init__.py b/src/twelvelabs/multipart_upload/__init__.py index 5cde020..ddeaf92 100644 --- a/src/twelvelabs/multipart_upload/__init__.py +++ b/src/twelvelabs/multipart_upload/__init__.py @@ -2,3 +2,6 @@ # isort: skip_file +from .types import CreateAssetUploadRequestType + +__all__ = ["CreateAssetUploadRequestType"] diff --git a/src/twelvelabs/multipart_upload/client.py b/src/twelvelabs/multipart_upload/client.py index 3c74333..2119003 100644 --- a/src/twelvelabs/multipart_upload/client.py +++ b/src/twelvelabs/multipart_upload/client.py @@ -12,6 +12,7 @@ from ..types.report_chunk_batch_response import ReportChunkBatchResponse from ..types.request_additional_presigned_ur_ls_response import RequestAdditionalPresignedUrLsResponse from .raw_client import AsyncRawMultipartUploadClient, RawMultipartUploadClient +from .types.create_asset_upload_request_type import CreateAssetUploadRequestType # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -85,7 +86,12 @@ def list_incomplete_uploads( ) def create( - self, *, filename: str, total_size: int, request_options: typing.Optional[RequestOptions] = None + self, + *, + filename: str, + type: CreateAssetUploadRequestType, + total_size: int, + request_options: typing.Optional[RequestOptions] = None, ) -> CreateAssetUploadResponse: """ This method creates a multipart upload session. @@ -104,6 +110,9 @@ def create( filename : str The original file name of the asset. + type : CreateAssetUploadRequestType + The type of asset you want to upload. + total_size : int The total size of the file in bytes. The platform uses this value to: - Calculate the optimal chunk size. @@ -127,10 +136,13 @@ def create( ) client.multipart_upload.create( filename="my-video.mp4", + type="video", total_size=104857600, ) """ - _response = self._raw_client.create(filename=filename, total_size=total_size, request_options=request_options) + _response = self._raw_client.create( + filename=filename, type=type, total_size=total_size, request_options=request_options + ) return _response.data def get_status( @@ -239,6 +251,7 @@ def report_chunk_batch( CompletedChunk( chunk_index=1, proof="d41d8cd98f00b204e9800998ecf8427e", + proof_type="etag", chunk_size=5242880, ) ], @@ -375,7 +388,12 @@ async def main() -> None: ) async def create( - self, *, filename: str, total_size: int, request_options: typing.Optional[RequestOptions] = None + self, + *, + filename: str, + type: CreateAssetUploadRequestType, + total_size: int, + request_options: typing.Optional[RequestOptions] = None, ) -> CreateAssetUploadResponse: """ This method creates a multipart upload session. @@ -394,6 +412,9 @@ async def create( filename : str The original file name of the asset. + type : CreateAssetUploadRequestType + The type of asset you want to upload. + total_size : int The total size of the file in bytes. The platform uses this value to: - Calculate the optimal chunk size. @@ -422,6 +443,7 @@ async def create( async def main() -> None: await client.multipart_upload.create( filename="my-video.mp4", + type="video", total_size=104857600, ) @@ -429,7 +451,7 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._raw_client.create( - filename=filename, total_size=total_size, request_options=request_options + filename=filename, type=type, total_size=total_size, request_options=request_options ) return _response.data @@ -555,6 +577,7 @@ async def main() -> None: CompletedChunk( chunk_index=1, proof="d41d8cd98f00b204e9800998ecf8427e", + proof_type="etag", chunk_size=5242880, ) ], diff --git a/src/twelvelabs/multipart_upload/raw_client.py b/src/twelvelabs/multipart_upload/raw_client.py index 37df0b5..ef93e45 100644 --- a/src/twelvelabs/multipart_upload/raw_client.py +++ b/src/twelvelabs/multipart_upload/raw_client.py @@ -23,6 +23,7 @@ from ..types.list_incomplete_uploads_response import ListIncompleteUploadsResponse from ..types.report_chunk_batch_response import ReportChunkBatchResponse from ..types.request_additional_presigned_ur_ls_response import RequestAdditionalPresignedUrLsResponse +from .types.create_asset_upload_request_type import CreateAssetUploadRequestType # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -132,7 +133,12 @@ def list_incomplete_uploads( raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) def create( - self, *, filename: str, total_size: int, request_options: typing.Optional[RequestOptions] = None + self, + *, + filename: str, + type: CreateAssetUploadRequestType, + total_size: int, + request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[CreateAssetUploadResponse]: """ This method creates a multipart upload session. @@ -151,6 +157,9 @@ def create( filename : str The original file name of the asset. + type : CreateAssetUploadRequestType + The type of asset you want to upload. + total_size : int The total size of the file in bytes. The platform uses this value to: - Calculate the optimal chunk size. @@ -170,8 +179,8 @@ def create( method="POST", json={ "filename": filename, + "type": type, "total_size": total_size, - "type": "video", }, headers={ "content-type": "application/json", @@ -652,7 +661,12 @@ async def _get_next(): raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) async def create( - self, *, filename: str, total_size: int, request_options: typing.Optional[RequestOptions] = None + self, + *, + filename: str, + type: CreateAssetUploadRequestType, + total_size: int, + request_options: typing.Optional[RequestOptions] = None, ) -> AsyncHttpResponse[CreateAssetUploadResponse]: """ This method creates a multipart upload session. @@ -671,6 +685,9 @@ async def create( filename : str The original file name of the asset. + type : CreateAssetUploadRequestType + The type of asset you want to upload. + total_size : int The total size of the file in bytes. The platform uses this value to: - Calculate the optimal chunk size. @@ -690,8 +707,8 @@ async def create( method="POST", json={ "filename": filename, + "type": type, "total_size": total_size, - "type": "video", }, headers={ "content-type": "application/json", diff --git a/src/twelvelabs/multipart_upload/types/__init__.py b/src/twelvelabs/multipart_upload/types/__init__.py new file mode 100644 index 0000000..125735d --- /dev/null +++ b/src/twelvelabs/multipart_upload/types/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .create_asset_upload_request_type import CreateAssetUploadRequestType + +__all__ = ["CreateAssetUploadRequestType"] diff --git a/src/twelvelabs/multipart_upload/types/create_asset_upload_request_type.py b/src/twelvelabs/multipart_upload/types/create_asset_upload_request_type.py new file mode 100644 index 0000000..6c4da36 --- /dev/null +++ b/src/twelvelabs/multipart_upload/types/create_asset_upload_request_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateAssetUploadRequestType = typing.Union[typing.Literal["video"], typing.Any] diff --git a/src/twelvelabs/search/__init__.py b/src/twelvelabs/search/__init__.py index 6c68973..6da738b 100644 --- a/src/twelvelabs/search/__init__.py +++ b/src/twelvelabs/search/__init__.py @@ -5,6 +5,7 @@ from .types import ( SearchCreateRequestGroupBy, SearchCreateRequestOperator, + SearchCreateRequestQueryMediaType, SearchCreateRequestSearchOptionsItem, SearchCreateRequestSortOption, SearchCreateRequestTranscriptionOptionsItem, @@ -15,6 +16,7 @@ __all__ = [ "SearchCreateRequestGroupBy", "SearchCreateRequestOperator", + "SearchCreateRequestQueryMediaType", "SearchCreateRequestSearchOptionsItem", "SearchCreateRequestSortOption", "SearchCreateRequestTranscriptionOptionsItem", diff --git a/src/twelvelabs/search/client.py b/src/twelvelabs/search/client.py index 1870d39..fe109fd 100644 --- a/src/twelvelabs/search/client.py +++ b/src/twelvelabs/search/client.py @@ -10,6 +10,7 @@ from .raw_client import AsyncRawSearchClient, RawSearchClient from .types.search_create_request_group_by import SearchCreateRequestGroupBy from .types.search_create_request_operator import SearchCreateRequestOperator +from .types.search_create_request_query_media_type import SearchCreateRequestQueryMediaType from .types.search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem from .types.search_create_request_sort_option import SearchCreateRequestSortOption from .types.search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem @@ -39,7 +40,7 @@ def create( *, index_id: str, search_options: typing.List[SearchCreateRequestSearchOptionsItem], - query_media_type: typing.Optional[typing.Literal["image"]] = OMIT, + query_media_type: typing.Optional[SearchCreateRequestQueryMediaType] = OMIT, query_media_url: typing.Optional[str] = OMIT, query_media_file: typing.Optional[core.File] = OMIT, query_text: typing.Optional[str] = OMIT, @@ -110,7 +111,7 @@ def create( For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. - query_media_type : typing.Optional[typing.Literal["image"]] + query_media_type : typing.Optional[SearchCreateRequestQueryMediaType] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. query_media_url : typing.Optional[str] @@ -331,7 +332,7 @@ async def create( *, index_id: str, search_options: typing.List[SearchCreateRequestSearchOptionsItem], - query_media_type: typing.Optional[typing.Literal["image"]] = OMIT, + query_media_type: typing.Optional[SearchCreateRequestQueryMediaType] = OMIT, query_media_url: typing.Optional[str] = OMIT, query_media_file: typing.Optional[core.File] = OMIT, query_text: typing.Optional[str] = OMIT, @@ -402,7 +403,7 @@ async def create( For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. - query_media_type : typing.Optional[typing.Literal["image"]] + query_media_type : typing.Optional[SearchCreateRequestQueryMediaType] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. query_media_url : typing.Optional[str] diff --git a/src/twelvelabs/search/raw_client.py b/src/twelvelabs/search/raw_client.py index 2267cab..15ab327 100644 --- a/src/twelvelabs/search/raw_client.py +++ b/src/twelvelabs/search/raw_client.py @@ -16,6 +16,7 @@ from ..types.threshold_search import ThresholdSearch from .types.search_create_request_group_by import SearchCreateRequestGroupBy from .types.search_create_request_operator import SearchCreateRequestOperator +from .types.search_create_request_query_media_type import SearchCreateRequestQueryMediaType from .types.search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem from .types.search_create_request_sort_option import SearchCreateRequestSortOption from .types.search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem @@ -34,7 +35,7 @@ def create( *, index_id: str, search_options: typing.List[SearchCreateRequestSearchOptionsItem], - query_media_type: typing.Optional[typing.Literal["image"]] = OMIT, + query_media_type: typing.Optional[SearchCreateRequestQueryMediaType] = OMIT, query_media_url: typing.Optional[str] = OMIT, query_media_file: typing.Optional[core.File] = OMIT, query_text: typing.Optional[str] = OMIT, @@ -105,7 +106,7 @@ def create( For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. - query_media_type : typing.Optional[typing.Literal["image"]] + query_media_type : typing.Optional[SearchCreateRequestQueryMediaType] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. query_media_url : typing.Optional[str] @@ -363,7 +364,7 @@ async def create( *, index_id: str, search_options: typing.List[SearchCreateRequestSearchOptionsItem], - query_media_type: typing.Optional[typing.Literal["image"]] = OMIT, + query_media_type: typing.Optional[SearchCreateRequestQueryMediaType] = OMIT, query_media_url: typing.Optional[str] = OMIT, query_media_file: typing.Optional[core.File] = OMIT, query_text: typing.Optional[str] = OMIT, @@ -434,7 +435,7 @@ async def create( For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. - query_media_type : typing.Optional[typing.Literal["image"]] + query_media_type : typing.Optional[SearchCreateRequestQueryMediaType] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. query_media_url : typing.Optional[str] diff --git a/src/twelvelabs/search/types/__init__.py b/src/twelvelabs/search/types/__init__.py index 272ecd4..6d84d9a 100644 --- a/src/twelvelabs/search/types/__init__.py +++ b/src/twelvelabs/search/types/__init__.py @@ -4,6 +4,7 @@ from .search_create_request_group_by import SearchCreateRequestGroupBy from .search_create_request_operator import SearchCreateRequestOperator +from .search_create_request_query_media_type import SearchCreateRequestQueryMediaType from .search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem from .search_create_request_sort_option import SearchCreateRequestSortOption from .search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem @@ -13,6 +14,7 @@ __all__ = [ "SearchCreateRequestGroupBy", "SearchCreateRequestOperator", + "SearchCreateRequestQueryMediaType", "SearchCreateRequestSearchOptionsItem", "SearchCreateRequestSortOption", "SearchCreateRequestTranscriptionOptionsItem", diff --git a/src/twelvelabs/search/types/search_create_request_query_media_type.py b/src/twelvelabs/search/types/search_create_request_query_media_type.py new file mode 100644 index 0000000..7ea0eae --- /dev/null +++ b/src/twelvelabs/search/types/search_create_request_query_media_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SearchCreateRequestQueryMediaType = typing.Union[typing.Literal["image"], typing.Any] diff --git a/src/twelvelabs/types/__init__.py b/src/twelvelabs/types/__init__.py index 6f92f2c..64dd12d 100644 --- a/src/twelvelabs/types/__init__.py +++ b/src/twelvelabs/types/__init__.py @@ -13,6 +13,7 @@ from .audio_segment import AudioSegment from .audio_segmentation import AudioSegmentation from .audio_segmentation_fixed import AudioSegmentationFixed +from .audio_segmentation_strategy import AudioSegmentationStrategy from .bad_request_error_body import BadRequestErrorBody from .base_embedding_metadata import BaseEmbeddingMetadata from .base_segment import BaseSegment @@ -22,6 +23,7 @@ from .chunk_info import ChunkInfo from .chunk_info_status import ChunkInfoStatus from .completed_chunk import CompletedChunk +from .completed_chunk_proof_type import CompletedChunkProofType from .confidence import Confidence from .create_asset_upload_response import CreateAssetUploadResponse from .created_at import CreatedAt @@ -100,6 +102,7 @@ from .report_chunk_batch_response import ReportChunkBatchResponse from .request_additional_presigned_ur_ls_response import RequestAdditionalPresignedUrLsResponse from .response_format import ResponseFormat +from .response_format_type import ResponseFormatType from .score_search_terms import ScoreSearchTerms from .search_item import SearchItem from .search_item_clips_item import SearchItemClipsItem @@ -174,6 +177,7 @@ "AudioSegment", "AudioSegmentation", "AudioSegmentationFixed", + "AudioSegmentationStrategy", "BadRequestErrorBody", "BaseEmbeddingMetadata", "BaseSegment", @@ -183,6 +187,7 @@ "ChunkInfo", "ChunkInfoStatus", "CompletedChunk", + "CompletedChunkProofType", "Confidence", "CreateAssetUploadResponse", "CreatedAt", @@ -257,6 +262,7 @@ "ReportChunkBatchResponse", "RequestAdditionalPresignedUrLsResponse", "ResponseFormat", + "ResponseFormatType", "ScoreSearchTerms", "SearchItem", "SearchItemClipsItem", diff --git a/src/twelvelabs/types/audio_segmentation.py b/src/twelvelabs/types/audio_segmentation.py index b73beea..12d23df 100644 --- a/src/twelvelabs/types/audio_segmentation.py +++ b/src/twelvelabs/types/audio_segmentation.py @@ -5,6 +5,7 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .audio_segmentation_fixed import AudioSegmentationFixed +from .audio_segmentation_strategy import AudioSegmentationStrategy class AudioSegmentation(UniversalBaseModel): @@ -12,7 +13,7 @@ class AudioSegmentation(UniversalBaseModel): Specifies how the platform divides the audio into segments. """ - strategy: typing.Literal["fixed"] = "fixed" + strategy: AudioSegmentationStrategy fixed: AudioSegmentationFixed = pydantic.Field() """ Configuration for fixed segmentation. diff --git a/src/twelvelabs/types/audio_segmentation_strategy.py b/src/twelvelabs/types/audio_segmentation_strategy.py new file mode 100644 index 0000000..b77049a --- /dev/null +++ b/src/twelvelabs/types/audio_segmentation_strategy.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AudioSegmentationStrategy = typing.Union[typing.Literal["fixed"], typing.Any] diff --git a/src/twelvelabs/types/completed_chunk.py b/src/twelvelabs/types/completed_chunk.py index aa6ff29..2a00459 100644 --- a/src/twelvelabs/types/completed_chunk.py +++ b/src/twelvelabs/types/completed_chunk.py @@ -4,6 +4,7 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from .completed_chunk_proof_type import CompletedChunkProofType class CompletedChunk(UniversalBaseModel): @@ -17,7 +18,7 @@ class CompletedChunk(UniversalBaseModel): The ETag value you received after uploading the chunk. When you upload a chunk to a presigned URLs, the response includes an ETag. Use this value and submit it as proof of successful upload. """ - proof_type: typing.Literal["etag"] = pydantic.Field(default="etag") + proof_type: CompletedChunkProofType = pydantic.Field() """ The verification method. Supported value: `etag`. """ diff --git a/src/twelvelabs/types/completed_chunk_proof_type.py b/src/twelvelabs/types/completed_chunk_proof_type.py new file mode 100644 index 0000000..7ce79d0 --- /dev/null +++ b/src/twelvelabs/types/completed_chunk_proof_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CompletedChunkProofType = typing.Union[typing.Literal["etag"], typing.Any] diff --git a/src/twelvelabs/types/response_format.py b/src/twelvelabs/types/response_format.py index 22b710e..6be5a8a 100644 --- a/src/twelvelabs/types/response_format.py +++ b/src/twelvelabs/types/response_format.py @@ -4,6 +4,7 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from .response_format_type import ResponseFormatType class ResponseFormat(UniversalBaseModel): @@ -11,7 +12,7 @@ class ResponseFormat(UniversalBaseModel): Use this parameter to specify the format of the response. When you omit this parameter, the platform returns unstructured text. """ - type: typing.Literal["json_schema"] = pydantic.Field(default="json_schema") + type: ResponseFormatType = pydantic.Field() """ Set this parameter to "json_schema" to receive structured JSON responses. """ diff --git a/src/twelvelabs/types/response_format_type.py b/src/twelvelabs/types/response_format_type.py new file mode 100644 index 0000000..5f52b62 --- /dev/null +++ b/src/twelvelabs/types/response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ResponseFormatType = typing.Union[typing.Literal["json_schema"], typing.Any]