diff --git a/poetry.lock b/poetry.lock index 2744b38..5092d3a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -483,58 +483,58 @@ files = [ [[package]] name = "tomli" -version = "2.4.0" +version = "2.4.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867"}, - {file = "tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9"}, - {file = "tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95"}, - {file = "tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76"}, - {file = "tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d"}, - {file = "tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576"}, - {file = "tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a"}, - {file = "tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa"}, - {file = "tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614"}, - {file = "tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1"}, - {file = "tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8"}, - {file = "tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a"}, - {file = "tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1"}, - {file = "tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b"}, - {file = "tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51"}, - {file = "tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729"}, - {file = "tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da"}, - {file = "tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3"}, - {file = "tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0"}, - {file = "tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e"}, - {file = "tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4"}, - {file = "tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e"}, - {file = "tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c"}, - {file = "tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f"}, - {file = "tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86"}, - {file = "tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87"}, - {file = "tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132"}, - {file = "tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6"}, - {file = "tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc"}, - {file = "tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66"}, - {file = "tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d"}, - {file = "tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702"}, - {file = "tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8"}, - {file = "tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776"}, - {file = "tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475"}, - {file = "tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2"}, - {file = "tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9"}, - {file = "tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0"}, - {file = "tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df"}, - {file = "tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d"}, - {file = "tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f"}, - {file = "tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b"}, - {file = "tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087"}, - {file = "tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd"}, - {file = "tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4"}, - {file = "tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a"}, - {file = "tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c"}, + {file = "tomli-2.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f8f0fc26ec2cc2b965b7a3b87cd19c5c6b8c5e5f436b984e85f486d652285c30"}, + {file = "tomli-2.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ab97e64ccda8756376892c53a72bd1f964e519c77236368527f758fbc36a53a"}, + {file = "tomli-2.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96481a5786729fd470164b47cdb3e0e58062a496f455ee41b4403be77cb5a076"}, + {file = "tomli-2.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a881ab208c0baf688221f8cecc5401bd291d67e38a1ac884d6736cbcd8247e9"}, + {file = "tomli-2.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47149d5bd38761ac8be13a84864bf0b7b70bc051806bc3669ab1cbc56216b23c"}, + {file = "tomli-2.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ec9bfaf3ad2df51ace80688143a6a4ebc09a248f6ff781a9945e51937008fcbc"}, + {file = "tomli-2.4.1-cp311-cp311-win32.whl", hash = "sha256:ff2983983d34813c1aeb0fa89091e76c3a22889ee83ab27c5eeb45100560c049"}, + {file = "tomli-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:5ee18d9ebdb417e384b58fe414e8d6af9f4e7a0ae761519fb50f721de398dd4e"}, + {file = "tomli-2.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:c2541745709bad0264b7d4705ad453b76ccd191e64aa6f0fc66b69a293a45ece"}, + {file = "tomli-2.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c742f741d58a28940ce01d58f0ab2ea3ced8b12402f162f4d534dfe18ba1cd6a"}, + {file = "tomli-2.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7f86fd587c4ed9dd76f318225e7d9b29cfc5a9d43de44e5754db8d1128487085"}, + {file = "tomli-2.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ff18e6a727ee0ab0388507b89d1bc6a22b138d1e2fa56d1ad494586d61d2eae9"}, + {file = "tomli-2.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:136443dbd7e1dee43c68ac2694fde36b2849865fa258d39bf822c10e8068eac5"}, + {file = "tomli-2.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e262d41726bc187e69af7825504c933b6794dc3fbd5945e41a79bb14c31f585"}, + {file = "tomli-2.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5cb41aa38891e073ee49d55fbc7839cfdb2bc0e600add13874d048c94aadddd1"}, + {file = "tomli-2.4.1-cp312-cp312-win32.whl", hash = "sha256:da25dc3563bff5965356133435b757a795a17b17d01dbc0f42fb32447ddfd917"}, + {file = "tomli-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:52c8ef851d9a240f11a88c003eacb03c31fc1c9c4ec64a99a0f922b93874fda9"}, + {file = "tomli-2.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:f758f1b9299d059cc3f6546ae2af89670cb1c4d48ea29c3cacc4fe7de3058257"}, + {file = "tomli-2.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36d2bd2ad5fb9eaddba5226aa02c8ec3fa4f192631e347b3ed28186d43be6b54"}, + {file = "tomli-2.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:eb0dc4e38e6a1fd579e5d50369aa2e10acfc9cace504579b2faabb478e76941a"}, + {file = "tomli-2.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c7f2c7f2b9ca6bdeef8f0fa897f8e05085923eb091721675170254cbc5b02897"}, + {file = "tomli-2.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f3c6818a1a86dd6dca7ddcaaf76947d5ba31aecc28cb1b67009a5877c9a64f3f"}, + {file = "tomli-2.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d312ef37c91508b0ab2cee7da26ec0b3ed2f03ce12bd87a588d771ae15dcf82d"}, + {file = "tomli-2.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51529d40e3ca50046d7606fa99ce3956a617f9b36380da3b7f0dd3dd28e68cb5"}, + {file = "tomli-2.4.1-cp313-cp313-win32.whl", hash = "sha256:2190f2e9dd7508d2a90ded5ed369255980a1bcdd58e52f7fe24b8162bf9fedbd"}, + {file = "tomli-2.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:8d65a2fbf9d2f8352685bc1364177ee3923d6baf5e7f43ea4959d7d8bc326a36"}, + {file = "tomli-2.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:4b605484e43cdc43f0954ddae319fb75f04cc10dd80d830540060ee7cd0243cd"}, + {file = "tomli-2.4.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:fd0409a3653af6c147209d267a0e4243f0ae46b011aa978b1080359fddc9b6cf"}, + {file = "tomli-2.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a120733b01c45e9a0c34aeef92bf0cf1d56cfe81ed9d47d562f9ed591a9828ac"}, + {file = "tomli-2.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:559db847dc486944896521f68d8190be1c9e719fced785720d2216fe7022b662"}, + {file = "tomli-2.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01f520d4f53ef97964a240a035ec2a869fe1a37dde002b57ebc4417a27ccd853"}, + {file = "tomli-2.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7f94b27a62cfad8496c8d2513e1a222dd446f095fca8987fceef261225538a15"}, + {file = "tomli-2.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ede3e6487c5ef5d28634ba3f31f989030ad6af71edfb0055cbbd14189ff240ba"}, + {file = "tomli-2.4.1-cp314-cp314-win32.whl", hash = "sha256:3d48a93ee1c9b79c04bb38772ee1b64dcf18ff43085896ea460ca8dec96f35f6"}, + {file = "tomli-2.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:88dceee75c2c63af144e456745e10101eb67361050196b0b6af5d717254dddf7"}, + {file = "tomli-2.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:b8c198f8c1805dc42708689ed6864951fd2494f924149d3e4bce7710f8eb5232"}, + {file = "tomli-2.4.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:d4d8fe59808a54658fcc0160ecfb1b30f9089906c50b23bcb4c69eddc19ec2b4"}, + {file = "tomli-2.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7008df2e7655c495dd12d2a4ad038ff878d4ca4b81fccaf82b714e07eae4402c"}, + {file = "tomli-2.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1d8591993e228b0c930c4bb0db464bdad97b3289fb981255d6c9a41aedc84b2d"}, + {file = "tomli-2.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:734e20b57ba95624ecf1841e72b53f6e186355e216e5412de414e3c51e5e3c41"}, + {file = "tomli-2.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8a650c2dbafa08d42e51ba0b62740dae4ecb9338eefa093aa5c78ceb546fcd5c"}, + {file = "tomli-2.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:504aa796fe0569bb43171066009ead363de03675276d2d121ac1a4572397870f"}, + {file = "tomli-2.4.1-cp314-cp314t-win32.whl", hash = "sha256:b1d22e6e9387bf4739fbe23bfa80e93f6b0373a7f1b96c6227c32bef95a4d7a8"}, + {file = "tomli-2.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2c1c351919aca02858f740c6d33adea0c5deea37f9ecca1cc1ef9e884a619d26"}, + {file = "tomli-2.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eab21f45c7f66c13f2a9e0e1535309cee140182a9cdae1e041d02e47291e8396"}, + {file = "tomli-2.4.1-py3-none-any.whl", hash = "sha256:0d85819802132122da43cb86656f8d1f8c6587d54ae7dcaf30e90533028b49fe"}, + {file = "tomli-2.4.1.tar.gz", hash = "sha256:7c7e1a961a0b2f2472c1ac5b69affa0ae1132c39adcb67aba98568702b9cc23f"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index 85994d5..aee2d1b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "twelvelabs" [tool.poetry] name = "twelvelabs" -version = "1.2.1" +version = "1.2.2" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 121761c..370ce2b 100644 --- a/reference.md +++ b/reference.md @@ -11,11 +11,26 @@
-This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. +This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + +- Minimum duration: 4 seconds +- Maximum duration: 1 hour +- Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) +- Resolution: 360x360 to 5184x2160 pixels +- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + +**When to use this method**: +- Analyze videos up to 1 hour +- Retrieve immediate results without waiting for asynchronous processing +- Stream text fragments in real-time for immediate processing and feedback + +**Do not use this method for**: +- Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. -- This endpoint supports streaming responses.
@@ -70,7 +85,7 @@ for chunk in response.data:
-**video_id:** `str` — The unique identifier of the video for which you wish to generate a text. +**prompt:** `AnalyzeTextPrompt`
@@ -78,20 +93,11 @@ for chunk in response.data:
-**prompt:** `str` +**video_id:** `typing.Optional[str]` -A prompt that guides the model on the desired format or content. +The unique identifier of the video to analyze. - -- Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. -- Your prompts can be instructive or descriptive, or you can also phrase them as questions. -- The maximum length of a prompt is 2,000 tokens. - - -**Examples**: - -- Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). -- I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead.
@@ -99,13 +105,15 @@ A prompt that guides the model on the desired format or content.
-**temperature:** `typing.Optional[float]` +**video:** `typing.Optional[VideoContext]` + +
+
-Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. +
+
-**Default:** 0.2 -**Min:** 0 -**Max:** 1 +**temperature:** `typing.Optional[AnalyzeTemperature]`
@@ -121,7 +129,7 @@ Controls the randomness of the text output generated by the model. A higher valu
-**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. +**max_tokens:** `typing.Optional[AnalyzeMaxTokens]`
@@ -153,11 +161,26 @@ Controls the randomness of the text output generated by the model. A higher valu
-This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. +This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + +- Minimum duration: 4 seconds +- Maximum duration: 1 hour +- Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) +- Resolution: 360x360 to 5184x2160 pixels +- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + +**When to use this method**: +- Analyze videos up to 1 hour +- Retrieve immediate results without waiting for asynchronous processing +- Stream text fragments in real-time for immediate processing and feedback + +**Do not use this method for**: +- Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. -- This endpoint supports streaming responses.
@@ -210,7 +233,7 @@ client.analyze(
-**video_id:** `str` — The unique identifier of the video for which you wish to generate a text. +**prompt:** `AnalyzeTextPrompt`
@@ -218,20 +241,11 @@ client.analyze(
-**prompt:** `str` +**video_id:** `typing.Optional[str]` -A prompt that guides the model on the desired format or content. +The unique identifier of the video to analyze. - -- Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. -- Your prompts can be instructive or descriptive, or you can also phrase them as questions. -- The maximum length of a prompt is 2,000 tokens. - - -**Examples**: - -- Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). -- I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead.
@@ -239,13 +253,15 @@ A prompt that guides the model on the desired format or content.
-**temperature:** `typing.Optional[float]` +**video:** `typing.Optional[VideoContext]` + +
+
-Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. +
+
-**Default:** 0.2 -**Min:** 0 -**Max:** 1 +**temperature:** `typing.Optional[AnalyzeTemperature]`
@@ -261,7 +277,7 @@ Controls the randomness of the text output generated by the model. A higher valu
-**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. +**max_tokens:** `typing.Optional[AnalyzeMaxTokens]`
@@ -2600,7 +2616,7 @@ Parameters for embeddings: - The Marengo video understanding model generates embeddings for all modalities in the same latent space. This shared space enables any-to-any searches across different types of content. - You can create multiple types of embeddings in a single API call. -- Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these +- Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. @@ -2644,7 +2660,6 @@ client.embed.create( The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. @@ -2664,27 +2679,6 @@ The text for which you wish to create an embedding.
-**text_truncate:** `typing.Optional[str]` - -Specifies how the platform handles text that exceeds token limits. - -**Available options by model version**: - -**Marengo 3.0**: This parameter is deprecated. The platform automatically truncates text exceeding 500 tokens from the end. - -**Marengo 2.7**: Specifies truncation method for text exceeding 77 tokens: -- `start`: Removes tokens from the beginning -- `end`: Removes tokens from the end (default) -- `none`: Returns an error if the text is longer than the maximum token limit. - -**Default**: `end` - -
-
- -
-
- **image_url:** `typing.Optional[str]` — The publicly accessible URL of the image for which you wish to create an embedding. This parameter is required for image embeddings if `image_file` is not provided.
@@ -2767,14 +2761,12 @@ Use this endpoint to search for relevant matches in an index using text, media, - Provide up to 10 images by specifying the following parameters multiple times: - `query_media_url`: Publicly accessible URL of your media file. - `query_media_file`: Local media file. -- Marengo 2.7 supports a single image per request. - -**Composed text and media queries** (Marengo 3.0 only): +**Composed text and media queries**: - Use the `query_text` parameter for your text query. - Set `query_media_type` to `image`. - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times. -**Entity search** (Marengo 3.0 only and in beta): +**Entity search** (beta): - To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter. @@ -2833,8 +2825,8 @@ Specifies the modalities the video understanding model uses to find relevant inf Available options: - `visual`: Searches visual content. -- `audio`: Searches non-speech audio (Marengo 3.0) or all audio (Marengo 2.7). -- `transcription`: Spoken words (Marengo 3.0 only) +- `audio`: Searches non-speech audio. +- `transcription`: Spoken words - You can specify multiple search options in conjunction with the [`operator`](/v1.3/api-reference/any-to-video-search/make-search-request#request.body.operator.operator) parameter described below to broaden or narrow your search. For example, to search using both visual and non-speech audio content, include this parameter two times in the request as shown below: @@ -2845,7 +2837,7 @@ Available options: ``` -For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. +For guidance, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section.
@@ -2894,7 +2886,7 @@ The text query to search for. This parameter is required for text queries. Note If you're using the Entity Search feature to search for specific persons in your video content, you must enclose the unique identifier of your entity between the `<@` and `>` markers. For example, to search for an entity with the ID `entity123`, use `<@entity123> is walking` as your query. -The maximum query length varies by model. Marengo 3.0 supports up to 500 tokens per query, while Marengo 2.7 supports up to 77 tokens per query. +Marengo supports up to 500 tokens per query. @@ -2904,7 +2896,7 @@ The maximum query length varies by model. Marengo 3.0 supports up to 500 tokens **transcription_options:** `typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]]` -Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when using Marengo 3.0 with the `search_options` parameter containing the `transcription` value. +Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when the `search_options` parameter contains the `transcription` value. Available options: - `lexical`: Exact word matching @@ -2920,23 +2912,6 @@ For details on when to use each option, see the [Transcription options](/v1.3/do
-**adjust_confidence_level:** `typing.Optional[float]` - - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - -This parameter specifies the strictness of the thresholds for assigning the high, medium, or low confidence levels to search results. If you use a lower value, the thresholds become more relaxed, and more search results will be classified as having high, medium, or low confidence levels. You can use this parameter to include a broader range of potentially relevant video clips, even if some results might be less precise. - -**Min**: 0 -**Max**: 1 -**Default:** 0.5 - -
-
- -
-
- **group_by:** `typing.Optional[SearchCreateRequestGroupBy]` Use this parameter to group or ungroup items in a response. It can take one of the following values: @@ -2951,37 +2926,6 @@ Use this parameter to group or ungroup items in a response. It can take one of t
-**threshold:** `typing.Optional[ThresholdSearch]` - -
-
- -
-
- -**sort_option:** `typing.Optional[SearchCreateRequestSortOption]` - - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - -Use this parameter to specify the sort order for the response. - -When performing a search, the platform assigns a relevance ranking to each video clip that matches your search terms. By default, the search results are sorted by relevance ranking in ascending order, with 1 being the most relevant result. - -If you set this parameter to `score` and `group_by` is set to `video`, the platform will determine the highest relevance ranking (lowest number) for each video and sort the videos in the response by this ranking. For each video, the matching video clips will be sorted by relevance ranking in ascending order. - -If you set this parameter to `clip_count` and `group_by` is set to `video`, the platform will sort the videos in the response by the number of clips. For each video, the matching video clips will be sorted by relevance ranking in ascending order. You can use `clip_count` only when the matching video clips are grouped by video. - - -**Default:** `score` - -
-
- -
-
- **operator:** `typing.Optional[SearchCreateRequestOperator]` Combines multiple search options using `or` or `and`. Use `and` to find segments matching all search options. Use `or` to find segments matching any search option. For detailed guidance on using this parameter, see the [Combine multiple modalities](/v1.3/docs/concepts/modalities#combine-multiple-modalities) section. @@ -3149,6 +3093,388 @@ client.search.retrieve(
+
+
+ + +## AnalyzeAsync Tasks +
client.analyze_async.tasks.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This method returns a list of the analysis tasks in your account. The platform returns your analysis tasks sorted by creation date, with the newest at the top of the list. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from twelvelabs import TwelveLabs + +client = TwelveLabs( + api_key="YOUR_API_KEY", +) +client.analyze_async.tasks.list( + page=1, + page_limit=10, + status="queued", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` + +A number that identifies the page to retrieve. + +**Default**: `1`. + +
+
+ +
+
+ +**page_limit:** `typing.Optional[int]` + +The number of items to return on each page. + +**Default**: `10`. +**Max**: `50`. + +
+
+ +
+
+ +**status:** `typing.Optional[AnalyzeTaskStatus]` + +Filter analysis tasks by status. +Possible values: `queued`, `pending`, `processing`, `ready`, `failed`. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.analyze_async.tasks.create(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This method asynchronously analyzes your videos and generates fully customizable text based on your prompts. + + +- Minimum duration: 4 seconds +- Maximum duration: 2 hours +- Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) +- Resolution: 360x360 to 5184x2160 pixels +- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + +**When to use this method**: +- Analyze videos longer than 1 hour +- Process videos asynchronously without blocking your application + +**Do not use this method for**: +- Videos for which you need immediate results or real-time streaming. Use the [`POST`](/v1.3/api-reference/analyze-videos/sync-analysis) method of the `/analyze` endpoint instead. + +Analyzing videos asynchronously requires three steps: + +1. Create an analysis task using this method. The platform returns a task ID. +2. Poll the status of the task using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. Wait until the status is `ready`. +3. Retrieve the results from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. + + +This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from twelvelabs import TwelveLabs, VideoContext_Url + +client = TwelveLabs( + api_key="YOUR_API_KEY", +) +client.analyze_async.tasks.create( + video=VideoContext_Url( + url="https://example.com/video.mp4", + ), + prompt="Generate a detailed summary of this video in 3-4 sentences", + temperature=0.2, + max_tokens=1000, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**video:** `VideoContext` + +
+
+ +
+
+ +**prompt:** `AnalyzeTextPrompt` + +
+
+ +
+
+ +**temperature:** `typing.Optional[AnalyzeTemperature]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[AnalyzeMaxTokens]` + +
+
+ +
+
+ +**response_format:** `typing.Optional[ResponseFormat]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.analyze_async.tasks.retrieve(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This method retrieves the status and results of an analysis task. + +**Task statuses**: +- `queued`: The task is waiting to be processed. +- `pending`: The task is queued and waiting to start. +- `processing`: The platform is analyzing the video. +- `ready`: Processing is complete. Results are available in the response. +- `failed`: The task failed. No results were generated. + +Poll this method until `status` is `ready` or `failed`. When `status` is `ready`, use the results from the response. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from twelvelabs import TwelveLabs + +client = TwelveLabs( + api_key="YOUR_API_KEY", +) +client.analyze_async.tasks.retrieve( + task_id="64f8d2c7e4a1b37f8a9c5d12", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**task_id:** `str` — The unique identifier of the analysis task. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.analyze_async.tasks.delete(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This method deletes an analysis task. You can only delete tasks that are not currently being processed. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from twelvelabs import TwelveLabs + +client = TwelveLabs( + api_key="YOUR_API_KEY", +) +client.analyze_async.tasks.delete( + task_id="64f8d2c7e4a1b37f8a9c5d12", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**task_id:** `str` — The unique identifier of the analyze task. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
@@ -3363,7 +3689,6 @@ client.embed.tasks.create( The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. @@ -3611,11 +3936,7 @@ client.embed.tasks.retrieve( ] ]` -Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: -- **Marengo 3.0**: `visual`, `audio`, `transcription`. -- **Marengo 2.7**: `visual-text`, `audio`. - -For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. +Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. The platform returns all available embeddings when you omit this parameter. @@ -3654,10 +3975,6 @@ The platform returns all available embeddings when you omit this parameter. This endpoint synchronously creates embeddings for multimodal content and returns the results immediately in the response. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Create embeddings for text, images, audio, or video content - Retrieve immediate results without waiting for background processing @@ -3739,7 +4056,7 @@ The type of content for the embeddings. - `image`: Creates embeddings for an image file - `text`: Creates embeddings for text input - `text_image`: Creates embeddings for text and an image -- `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source +- `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source @@ -3747,7 +4064,7 @@ The type of content for the embeddings.
-**model_name:** `CreateEmbeddingsRequestModelName` — The video understanding model to use. Only "marengo3.0" is supported. +**model_name:** `CreateEmbeddingsRequestModelName` — The video understanding model to use. Value: "marengo3.0".
@@ -3954,10 +4271,6 @@ The number of items to return on each page. This endpoint creates embeddings for audio and video content asynchronously. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Process audio or video files longer than 10 minutes - Process files up to 4 hours in duration @@ -3979,7 +4292,7 @@ This endpoint creates embeddings for audio and video content asynchronously. Creating embeddings asynchronously requires three steps: - + 1. Create a task using this endpoint. The platform returns a task ID. 2. Poll for the status of the task using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint. Wait until the status is `ready`. 3. Retrieve the embeddings from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint. @@ -4047,7 +4360,7 @@ The type of content for the embeddings.
-**model_name:** `CreateAsyncEmbeddingRequestModelName` — The model you wish to use. Only `"marengo3.0"` is supported. +**model_name:** `CreateAsyncEmbeddingRequestModelName` — The model you wish to use. Value: `"marengo3.0"`.
@@ -5403,11 +5716,7 @@ client.indexes.indexed_assets.retrieve( ] ]` -Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: -- **Marengo 3.0**: `visual`, `audio`, `transcription`. -- **Marengo 2.7**: `visual-text`, `audio`. - -For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. +Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. @@ -5913,11 +6222,7 @@ client.indexes.videos.retrieve( ] ]` -Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: -- **Marengo 3.0**: `visual`, `audio`, `transcription`. -- **Marengo 2.7**: `visual-text`, `audio`. - -For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. +Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. @@ -5963,7 +6268,7 @@ To retrieve embeddings for a video, it must be indexed using the Marengo video u This method will be deprecated in a future version. New implementations should use the [Delete an indexed asset](/v1.3/api-reference/index-content/delete) method. -This method deletes all the information about the specified video. This action cannot be undone. +This method deletes all the information about the specified indexed video. This action cannot be undone. diff --git a/src/twelvelabs/__init__.py b/src/twelvelabs/__init__.py index cec5af3..849222a 100644 --- a/src/twelvelabs/__init__.py +++ b/src/twelvelabs/__init__.py @@ -3,6 +3,15 @@ # isort: skip_file from .types import ( + AnalyzeMaxTokens, + AnalyzeTaskError, + AnalyzeTaskResponse, + AnalyzeTaskResult, + AnalyzeTaskResultUsage, + AnalyzeTaskStatus, + AnalyzeTaskWebhookInfo, + AnalyzeTemperature, + AnalyzeTextPrompt, Asset, AssetMethod, AssetStatus, @@ -26,7 +35,7 @@ ChunkInfoStatus, CompletedChunk, CompletedChunkProofType, - Confidence, + CreateAnalyzeTaskResponse, CreateAssetUploadResponse, CreatedAt, EmbeddingAudioMetadata, @@ -62,6 +71,7 @@ ExpiresAt, FinishReason, ForbiddenErrorBody, + GeneratedTextData, GetUploadStatusResponse, HlsObject, HlsObjectStatus, @@ -94,6 +104,7 @@ NextPageToken, NonStreamAnalyzeResponse, NotFoundErrorBody, + One, Page, PageInfo, PresignedUrlChunk, @@ -103,7 +114,6 @@ RequestAdditionalPresignedUrLsResponse, ResponseFormat, ResponseFormatType, - ScoreSearchTerms, SearchItem, SearchItemClipsItem, SearchPool, @@ -131,7 +141,6 @@ TextEmbeddingResult, TextImageInputRequest, TextInputRequest, - ThresholdSearch, ThumbnailUrl, TokenUsage, TotalInnerMatches, @@ -139,8 +148,14 @@ TotalResults, TranscriptionData, TranscriptionDataItem, + Two, UpdatedAt, + Url, UserMetadata, + VideoContext, + VideoContext_AssetId, + VideoContext_Base64String, + VideoContext_Url, VideoEmbeddingMetadata, VideoEmbeddingTask, VideoEmbeddingTaskVideoEmbedding, @@ -163,8 +178,15 @@ VideoVector, VideoVectorSystemMetadata, ) -from .errors import BadRequestError, ForbiddenError, InternalServerError, NotFoundError, TooManyRequestsError -from . import assets, embed, entity_collections, indexes, multipart_upload, search, tasks +from .errors import ( + BadRequestError, + ConflictError, + ForbiddenError, + InternalServerError, + NotFoundError, + TooManyRequestsError, +) +from . import analyze_async, assets, embed, entity_collections, indexes, multipart_upload, search, tasks from .assets import AssetsCreateRequestMethod, AssetsListRequestAssetTypesItem, AssetsListResponse from .client import AsyncTwelveLabs, TwelveLabs from .entity_collections import EntityCollectionsListRequestSortBy, EntityCollectionsListResponse @@ -176,7 +198,6 @@ SearchCreateRequestOperator, SearchCreateRequestQueryMediaType, SearchCreateRequestSearchOptionsItem, - SearchCreateRequestSortOption, SearchCreateRequestTranscriptionOptionsItem, SearchRetrieveResponse, SearchRetrieveResponsePageInfo, @@ -191,6 +212,15 @@ from .version import __version__ __all__ = [ + "AnalyzeMaxTokens", + "AnalyzeTaskError", + "AnalyzeTaskResponse", + "AnalyzeTaskResult", + "AnalyzeTaskResultUsage", + "AnalyzeTaskStatus", + "AnalyzeTaskWebhookInfo", + "AnalyzeTemperature", + "AnalyzeTextPrompt", "Asset", "AssetMethod", "AssetStatus", @@ -219,7 +249,8 @@ "ChunkInfoStatus", "CompletedChunk", "CompletedChunkProofType", - "Confidence", + "ConflictError", + "CreateAnalyzeTaskResponse", "CreateAssetUploadRequestType", "CreateAssetUploadResponse", "CreatedAt", @@ -259,6 +290,7 @@ "FinishReason", "ForbiddenError", "ForbiddenErrorBody", + "GeneratedTextData", "GetUploadStatusResponse", "HlsObject", "HlsObjectStatus", @@ -296,6 +328,7 @@ "NonStreamAnalyzeResponse", "NotFoundError", "NotFoundErrorBody", + "One", "Page", "PageInfo", "PresignedUrlChunk", @@ -305,12 +338,10 @@ "RequestAdditionalPresignedUrLsResponse", "ResponseFormat", "ResponseFormatType", - "ScoreSearchTerms", "SearchCreateRequestGroupBy", "SearchCreateRequestOperator", "SearchCreateRequestQueryMediaType", "SearchCreateRequestSearchOptionsItem", - "SearchCreateRequestSortOption", "SearchCreateRequestTranscriptionOptionsItem", "SearchItem", "SearchItemClipsItem", @@ -346,7 +377,6 @@ "TextEmbeddingResult", "TextImageInputRequest", "TextInputRequest", - "ThresholdSearch", "ThumbnailUrl", "TokenUsage", "TooManyRequestsError", @@ -357,8 +387,14 @@ "TranscriptionDataItem", "TwelveLabs", "TwelveLabsEnvironment", + "Two", "UpdatedAt", + "Url", "UserMetadata", + "VideoContext", + "VideoContext_AssetId", + "VideoContext_Base64String", + "VideoContext_Url", "VideoEmbeddingMetadata", "VideoEmbeddingTask", "VideoEmbeddingTaskVideoEmbedding", @@ -381,6 +417,7 @@ "VideoVector", "VideoVectorSystemMetadata", "__version__", + "analyze_async", "assets", "embed", "entity_collections", diff --git a/src/twelvelabs/analyze_async/__init__.py b/src/twelvelabs/analyze_async/__init__.py new file mode 100644 index 0000000..19c44de --- /dev/null +++ b/src/twelvelabs/analyze_async/__init__.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from . import tasks +from .tasks import TasksListResponse + +__all__ = ["TasksListResponse", "tasks"] diff --git a/src/twelvelabs/analyze_async/client.py b/src/twelvelabs/analyze_async/client.py new file mode 100644 index 0000000..0cc918f --- /dev/null +++ b/src/twelvelabs/analyze_async/client.py @@ -0,0 +1,39 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from .raw_client import AsyncRawAnalyzeAsyncClient, RawAnalyzeAsyncClient +from .tasks.client import AsyncTasksClient, TasksClient + + +class AnalyzeAsyncClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawAnalyzeAsyncClient(client_wrapper=client_wrapper) + self.tasks = TasksClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawAnalyzeAsyncClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawAnalyzeAsyncClient + """ + return self._raw_client + + +class AsyncAnalyzeAsyncClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawAnalyzeAsyncClient(client_wrapper=client_wrapper) + self.tasks = AsyncTasksClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawAnalyzeAsyncClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawAnalyzeAsyncClient + """ + return self._raw_client diff --git a/src/twelvelabs/analyze_async/raw_client.py b/src/twelvelabs/analyze_async/raw_client.py new file mode 100644 index 0000000..da98413 --- /dev/null +++ b/src/twelvelabs/analyze_async/raw_client.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper + + +class RawAnalyzeAsyncClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + +class AsyncRawAnalyzeAsyncClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper diff --git a/src/twelvelabs/analyze_async/tasks/__init__.py b/src/twelvelabs/analyze_async/tasks/__init__.py new file mode 100644 index 0000000..c14a44c --- /dev/null +++ b/src/twelvelabs/analyze_async/tasks/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .types import TasksListResponse + +__all__ = ["TasksListResponse"] diff --git a/src/twelvelabs/analyze_async/tasks/client.py b/src/twelvelabs/analyze_async/tasks/client.py new file mode 100644 index 0000000..d9eff58 --- /dev/null +++ b/src/twelvelabs/analyze_async/tasks/client.py @@ -0,0 +1,501 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.request_options import RequestOptions +from ...types.analyze_max_tokens import AnalyzeMaxTokens +from ...types.analyze_task_response import AnalyzeTaskResponse +from ...types.analyze_task_status import AnalyzeTaskStatus +from ...types.analyze_temperature import AnalyzeTemperature +from ...types.analyze_text_prompt import AnalyzeTextPrompt +from ...types.create_analyze_task_response import CreateAnalyzeTaskResponse +from ...types.response_format import ResponseFormat +from ...types.video_context import VideoContext +from .raw_client import AsyncRawTasksClient, RawTasksClient +from .types.tasks_list_response import TasksListResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class TasksClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawTasksClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawTasksClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawTasksClient + """ + return self._raw_client + + def list( + self, + *, + page: typing.Optional[int] = None, + page_limit: typing.Optional[int] = None, + status: typing.Optional[AnalyzeTaskStatus] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> TasksListResponse: + """ + This method returns a list of the analysis tasks in your account. The platform returns your analysis tasks sorted by creation date, with the newest at the top of the list. + + Parameters + ---------- + page : typing.Optional[int] + A number that identifies the page to retrieve. + + **Default**: `1`. + + page_limit : typing.Optional[int] + The number of items to return on each page. + + **Default**: `10`. + **Max**: `50`. + + status : typing.Optional[AnalyzeTaskStatus] + Filter analysis tasks by status. + Possible values: `queued`, `pending`, `processing`, `ready`, `failed`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TasksListResponse + A list of analysis tasks has successfully been retrieved. + + Examples + -------- + from twelvelabs import TwelveLabs + + client = TwelveLabs( + api_key="YOUR_API_KEY", + ) + client.analyze_async.tasks.list( + page=1, + page_limit=10, + status="queued", + ) + """ + _response = self._raw_client.list( + page=page, page_limit=page_limit, status=status, request_options=request_options + ) + return _response.data + + def create( + self, + *, + video: VideoContext, + prompt: AnalyzeTextPrompt, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, + response_format: typing.Optional[ResponseFormat] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAnalyzeTaskResponse: + """ + This method asynchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 2 hours + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos longer than 1 hour + - Process videos asynchronously without blocking your application + + **Do not use this method for**: + - Videos for which you need immediate results or real-time streaming. Use the [`POST`](/v1.3/api-reference/analyze-videos/sync-analysis) method of the `/analyze` endpoint instead. + + Analyzing videos asynchronously requires three steps: + + 1. Create an analysis task using this method. The platform returns a task ID. + 2. Poll the status of the task using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. Wait until the status is `ready`. + 3. Retrieve the results from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. + + + This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. + + + Parameters + ---------- + video : VideoContext + + prompt : AnalyzeTextPrompt + + temperature : typing.Optional[AnalyzeTemperature] + + max_tokens : typing.Optional[AnalyzeMaxTokens] + + response_format : typing.Optional[ResponseFormat] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAnalyzeTaskResponse + An analysis task has successfully been created. + + Examples + -------- + from twelvelabs import TwelveLabs, VideoContext_Url + + client = TwelveLabs( + api_key="YOUR_API_KEY", + ) + client.analyze_async.tasks.create( + video=VideoContext_Url( + url="https://example.com/video.mp4", + ), + prompt="Generate a detailed summary of this video in 3-4 sentences", + temperature=0.2, + max_tokens=1000, + ) + """ + _response = self._raw_client.create( + video=video, + prompt=prompt, + temperature=temperature, + max_tokens=max_tokens, + response_format=response_format, + request_options=request_options, + ) + return _response.data + + def retrieve(self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> AnalyzeTaskResponse: + """ + This method retrieves the status and results of an analysis task. + + **Task statuses**: + - `queued`: The task is waiting to be processed. + - `pending`: The task is queued and waiting to start. + - `processing`: The platform is analyzing the video. + - `ready`: Processing is complete. Results are available in the response. + - `failed`: The task failed. No results were generated. + + Poll this method until `status` is `ready` or `failed`. When `status` is `ready`, use the results from the response. + + Parameters + ---------- + task_id : str + The unique identifier of the analysis task. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AnalyzeTaskResponse + Task status and results retrieved successfully + + Examples + -------- + from twelvelabs import TwelveLabs + + client = TwelveLabs( + api_key="YOUR_API_KEY", + ) + client.analyze_async.tasks.retrieve( + task_id="64f8d2c7e4a1b37f8a9c5d12", + ) + """ + _response = self._raw_client.retrieve(task_id, request_options=request_options) + return _response.data + + def delete(self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + This method deletes an analysis task. You can only delete tasks that are not currently being processed. + + Parameters + ---------- + task_id : str + The unique identifier of the analyze task. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from twelvelabs import TwelveLabs + + client = TwelveLabs( + api_key="YOUR_API_KEY", + ) + client.analyze_async.tasks.delete( + task_id="64f8d2c7e4a1b37f8a9c5d12", + ) + """ + _response = self._raw_client.delete(task_id, request_options=request_options) + return _response.data + + +class AsyncTasksClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawTasksClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawTasksClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawTasksClient + """ + return self._raw_client + + async def list( + self, + *, + page: typing.Optional[int] = None, + page_limit: typing.Optional[int] = None, + status: typing.Optional[AnalyzeTaskStatus] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> TasksListResponse: + """ + This method returns a list of the analysis tasks in your account. The platform returns your analysis tasks sorted by creation date, with the newest at the top of the list. + + Parameters + ---------- + page : typing.Optional[int] + A number that identifies the page to retrieve. + + **Default**: `1`. + + page_limit : typing.Optional[int] + The number of items to return on each page. + + **Default**: `10`. + **Max**: `50`. + + status : typing.Optional[AnalyzeTaskStatus] + Filter analysis tasks by status. + Possible values: `queued`, `pending`, `processing`, `ready`, `failed`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + TasksListResponse + A list of analysis tasks has successfully been retrieved. + + Examples + -------- + import asyncio + + from twelvelabs import AsyncTwelveLabs + + client = AsyncTwelveLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.analyze_async.tasks.list( + page=1, + page_limit=10, + status="queued", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.list( + page=page, page_limit=page_limit, status=status, request_options=request_options + ) + return _response.data + + async def create( + self, + *, + video: VideoContext, + prompt: AnalyzeTextPrompt, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, + response_format: typing.Optional[ResponseFormat] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAnalyzeTaskResponse: + """ + This method asynchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 2 hours + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos longer than 1 hour + - Process videos asynchronously without blocking your application + + **Do not use this method for**: + - Videos for which you need immediate results or real-time streaming. Use the [`POST`](/v1.3/api-reference/analyze-videos/sync-analysis) method of the `/analyze` endpoint instead. + + Analyzing videos asynchronously requires three steps: + + 1. Create an analysis task using this method. The platform returns a task ID. + 2. Poll the status of the task using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. Wait until the status is `ready`. + 3. Retrieve the results from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. + + + This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. + + + Parameters + ---------- + video : VideoContext + + prompt : AnalyzeTextPrompt + + temperature : typing.Optional[AnalyzeTemperature] + + max_tokens : typing.Optional[AnalyzeMaxTokens] + + response_format : typing.Optional[ResponseFormat] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAnalyzeTaskResponse + An analysis task has successfully been created. + + Examples + -------- + import asyncio + + from twelvelabs import AsyncTwelveLabs, VideoContext_Url + + client = AsyncTwelveLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.analyze_async.tasks.create( + video=VideoContext_Url( + url="https://example.com/video.mp4", + ), + prompt="Generate a detailed summary of this video in 3-4 sentences", + temperature=0.2, + max_tokens=1000, + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.create( + video=video, + prompt=prompt, + temperature=temperature, + max_tokens=max_tokens, + response_format=response_format, + request_options=request_options, + ) + return _response.data + + async def retrieve( + self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AnalyzeTaskResponse: + """ + This method retrieves the status and results of an analysis task. + + **Task statuses**: + - `queued`: The task is waiting to be processed. + - `pending`: The task is queued and waiting to start. + - `processing`: The platform is analyzing the video. + - `ready`: Processing is complete. Results are available in the response. + - `failed`: The task failed. No results were generated. + + Poll this method until `status` is `ready` or `failed`. When `status` is `ready`, use the results from the response. + + Parameters + ---------- + task_id : str + The unique identifier of the analysis task. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AnalyzeTaskResponse + Task status and results retrieved successfully + + Examples + -------- + import asyncio + + from twelvelabs import AsyncTwelveLabs + + client = AsyncTwelveLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.analyze_async.tasks.retrieve( + task_id="64f8d2c7e4a1b37f8a9c5d12", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.retrieve(task_id, request_options=request_options) + return _response.data + + async def delete(self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + This method deletes an analysis task. You can only delete tasks that are not currently being processed. + + Parameters + ---------- + task_id : str + The unique identifier of the analyze task. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from twelvelabs import AsyncTwelveLabs + + client = AsyncTwelveLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.analyze_async.tasks.delete( + task_id="64f8d2c7e4a1b37f8a9c5d12", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.delete(task_id, request_options=request_options) + return _response.data diff --git a/src/twelvelabs/analyze_async/tasks/raw_client.py b/src/twelvelabs/analyze_async/tasks/raw_client.py new file mode 100644 index 0000000..02ae1ea --- /dev/null +++ b/src/twelvelabs/analyze_async/tasks/raw_client.py @@ -0,0 +1,635 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ...core.api_error import ApiError +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.http_response import AsyncHttpResponse, HttpResponse +from ...core.jsonable_encoder import jsonable_encoder +from ...core.pydantic_utilities import parse_obj_as +from ...core.request_options import RequestOptions +from ...core.serialization import convert_and_respect_annotation_metadata +from ...errors.bad_request_error import BadRequestError +from ...errors.conflict_error import ConflictError +from ...errors.internal_server_error import InternalServerError +from ...errors.not_found_error import NotFoundError +from ...types.analyze_max_tokens import AnalyzeMaxTokens +from ...types.analyze_task_response import AnalyzeTaskResponse +from ...types.analyze_task_status import AnalyzeTaskStatus +from ...types.analyze_temperature import AnalyzeTemperature +from ...types.analyze_text_prompt import AnalyzeTextPrompt +from ...types.create_analyze_task_response import CreateAnalyzeTaskResponse +from ...types.error_response import ErrorResponse +from ...types.response_format import ResponseFormat +from ...types.video_context import VideoContext +from .types.tasks_list_response import TasksListResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawTasksClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list( + self, + *, + page: typing.Optional[int] = None, + page_limit: typing.Optional[int] = None, + status: typing.Optional[AnalyzeTaskStatus] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[TasksListResponse]: + """ + This method returns a list of the analysis tasks in your account. The platform returns your analysis tasks sorted by creation date, with the newest at the top of the list. + + Parameters + ---------- + page : typing.Optional[int] + A number that identifies the page to retrieve. + + **Default**: `1`. + + page_limit : typing.Optional[int] + The number of items to return on each page. + + **Default**: `10`. + **Max**: `50`. + + status : typing.Optional[AnalyzeTaskStatus] + Filter analysis tasks by status. + Possible values: `queued`, `pending`, `processing`, `ready`, `failed`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[TasksListResponse] + A list of analysis tasks has successfully been retrieved. + """ + _response = self._client_wrapper.httpx_client.request( + "analyze/tasks", + method="GET", + params={ + "page": page, + "page_limit": page_limit, + "status": status, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + TasksListResponse, + parse_obj_as( + type_=TasksListResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def create( + self, + *, + video: VideoContext, + prompt: AnalyzeTextPrompt, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, + response_format: typing.Optional[ResponseFormat] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateAnalyzeTaskResponse]: + """ + This method asynchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 2 hours + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos longer than 1 hour + - Process videos asynchronously without blocking your application + + **Do not use this method for**: + - Videos for which you need immediate results or real-time streaming. Use the [`POST`](/v1.3/api-reference/analyze-videos/sync-analysis) method of the `/analyze` endpoint instead. + + Analyzing videos asynchronously requires three steps: + + 1. Create an analysis task using this method. The platform returns a task ID. + 2. Poll the status of the task using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. Wait until the status is `ready`. + 3. Retrieve the results from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. + + + This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. + + + Parameters + ---------- + video : VideoContext + + prompt : AnalyzeTextPrompt + + temperature : typing.Optional[AnalyzeTemperature] + + max_tokens : typing.Optional[AnalyzeMaxTokens] + + response_format : typing.Optional[ResponseFormat] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateAnalyzeTaskResponse] + An analysis task has successfully been created. + """ + _response = self._client_wrapper.httpx_client.request( + "analyze/tasks", + method="POST", + json={ + "video": convert_and_respect_annotation_metadata( + object_=video, annotation=VideoContext, direction="write" + ), + "prompt": prompt, + "temperature": temperature, + "max_tokens": max_tokens, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormat, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateAnalyzeTaskResponse, + parse_obj_as( + type_=CreateAnalyzeTaskResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 500: + raise InternalServerError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def retrieve( + self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[AnalyzeTaskResponse]: + """ + This method retrieves the status and results of an analysis task. + + **Task statuses**: + - `queued`: The task is waiting to be processed. + - `pending`: The task is queued and waiting to start. + - `processing`: The platform is analyzing the video. + - `ready`: Processing is complete. Results are available in the response. + - `failed`: The task failed. No results were generated. + + Poll this method until `status` is `ready` or `failed`. When `status` is `ready`, use the results from the response. + + Parameters + ---------- + task_id : str + The unique identifier of the analysis task. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AnalyzeTaskResponse] + Task status and results retrieved successfully + """ + _response = self._client_wrapper.httpx_client.request( + f"analyze/tasks/{jsonable_encoder(task_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AnalyzeTaskResponse, + parse_obj_as( + type_=AnalyzeTaskResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 404: + raise NotFoundError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + This method deletes an analysis task. You can only delete tasks that are not currently being processed. + + Parameters + ---------- + task_id : str + The unique identifier of the analyze task. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"analyze/tasks/{jsonable_encoder(task_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 404: + raise NotFoundError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 409: + raise ConflictError( + headers=dict(_response.headers), + body=typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawTasksClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list( + self, + *, + page: typing.Optional[int] = None, + page_limit: typing.Optional[int] = None, + status: typing.Optional[AnalyzeTaskStatus] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[TasksListResponse]: + """ + This method returns a list of the analysis tasks in your account. The platform returns your analysis tasks sorted by creation date, with the newest at the top of the list. + + Parameters + ---------- + page : typing.Optional[int] + A number that identifies the page to retrieve. + + **Default**: `1`. + + page_limit : typing.Optional[int] + The number of items to return on each page. + + **Default**: `10`. + **Max**: `50`. + + status : typing.Optional[AnalyzeTaskStatus] + Filter analysis tasks by status. + Possible values: `queued`, `pending`, `processing`, `ready`, `failed`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[TasksListResponse] + A list of analysis tasks has successfully been retrieved. + """ + _response = await self._client_wrapper.httpx_client.request( + "analyze/tasks", + method="GET", + params={ + "page": page, + "page_limit": page_limit, + "status": status, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + TasksListResponse, + parse_obj_as( + type_=TasksListResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def create( + self, + *, + video: VideoContext, + prompt: AnalyzeTextPrompt, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, + response_format: typing.Optional[ResponseFormat] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateAnalyzeTaskResponse]: + """ + This method asynchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 2 hours + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos longer than 1 hour + - Process videos asynchronously without blocking your application + + **Do not use this method for**: + - Videos for which you need immediate results or real-time streaming. Use the [`POST`](/v1.3/api-reference/analyze-videos/sync-analysis) method of the `/analyze` endpoint instead. + + Analyzing videos asynchronously requires three steps: + + 1. Create an analysis task using this method. The platform returns a task ID. + 2. Poll the status of the task using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. Wait until the status is `ready`. + 3. Retrieve the results from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/analyze-videos/retrieve-analysis-task-status-results) method of the `/analyze/tasks/{task_id}` endpoint. + + + This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. + + + Parameters + ---------- + video : VideoContext + + prompt : AnalyzeTextPrompt + + temperature : typing.Optional[AnalyzeTemperature] + + max_tokens : typing.Optional[AnalyzeMaxTokens] + + response_format : typing.Optional[ResponseFormat] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateAnalyzeTaskResponse] + An analysis task has successfully been created. + """ + _response = await self._client_wrapper.httpx_client.request( + "analyze/tasks", + method="POST", + json={ + "video": convert_and_respect_annotation_metadata( + object_=video, annotation=VideoContext, direction="write" + ), + "prompt": prompt, + "temperature": temperature, + "max_tokens": max_tokens, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormat, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateAnalyzeTaskResponse, + parse_obj_as( + type_=CreateAnalyzeTaskResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 500: + raise InternalServerError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def retrieve( + self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[AnalyzeTaskResponse]: + """ + This method retrieves the status and results of an analysis task. + + **Task statuses**: + - `queued`: The task is waiting to be processed. + - `pending`: The task is queued and waiting to start. + - `processing`: The platform is analyzing the video. + - `ready`: Processing is complete. Results are available in the response. + - `failed`: The task failed. No results were generated. + + Poll this method until `status` is `ready` or `failed`. When `status` is `ready`, use the results from the response. + + Parameters + ---------- + task_id : str + The unique identifier of the analysis task. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AnalyzeTaskResponse] + Task status and results retrieved successfully + """ + _response = await self._client_wrapper.httpx_client.request( + f"analyze/tasks/{jsonable_encoder(task_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AnalyzeTaskResponse, + parse_obj_as( + type_=AnalyzeTaskResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 404: + raise NotFoundError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + This method deletes an analysis task. You can only delete tasks that are not currently being processed. + + Parameters + ---------- + task_id : str + The unique identifier of the analyze task. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"analyze/tasks/{jsonable_encoder(task_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 404: + raise NotFoundError( + headers=dict(_response.headers), + body=typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ), + ) + if _response.status_code == 409: + raise ConflictError( + headers=dict(_response.headers), + body=typing.cast( + ErrorResponse, + parse_obj_as( + type_=ErrorResponse, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/twelvelabs/analyze_async/tasks/types/__init__.py b/src/twelvelabs/analyze_async/tasks/types/__init__.py new file mode 100644 index 0000000..1a9c25b --- /dev/null +++ b/src/twelvelabs/analyze_async/tasks/types/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .tasks_list_response import TasksListResponse + +__all__ = ["TasksListResponse"] diff --git a/src/twelvelabs/analyze_async/tasks/types/tasks_list_response.py b/src/twelvelabs/analyze_async/tasks/types/tasks_list_response.py new file mode 100644 index 0000000..001b1ee --- /dev/null +++ b/src/twelvelabs/analyze_async/tasks/types/tasks_list_response.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ....types.analyze_task_response import AnalyzeTaskResponse +from ....types.page_info import PageInfo + + +class TasksListResponse(UniversalBaseModel): + data: typing.List[AnalyzeTaskResponse] = pydantic.Field() + """ + An array that contains up to `page_limit` analysis tasks. + """ + + page_info: PageInfo + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/base_client.py b/src/twelvelabs/base_client.py index 8d690a6..311befc 100644 --- a/src/twelvelabs/base_client.py +++ b/src/twelvelabs/base_client.py @@ -3,6 +3,7 @@ import typing import httpx +from .analyze_async.client import AnalyzeAsyncClient, AsyncAnalyzeAsyncClient from .assets.client import AssetsClient, AsyncAssetsClient from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from .core.request_options import RequestOptions @@ -14,9 +15,13 @@ from .raw_base_client import AsyncRawBaseClient, RawBaseClient from .search.client import AsyncSearchClient, SearchClient from .tasks.client import AsyncTasksClient, TasksClient +from .types.analyze_max_tokens import AnalyzeMaxTokens +from .types.analyze_temperature import AnalyzeTemperature +from .types.analyze_text_prompt import AnalyzeTextPrompt from .types.non_stream_analyze_response import NonStreamAnalyzeResponse from .types.response_format import ResponseFormat from .types.stream_analyze_response import StreamAnalyzeResponse +from .types.video_context import VideoContext # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -95,6 +100,7 @@ def __init__( self.entity_collections = EntityCollectionsClient(client_wrapper=self._client_wrapper) self.embed = EmbedClient(client_wrapper=self._client_wrapper) self.search = SearchClient(client_wrapper=self._client_wrapper) + self.analyze_async = AnalyzeAsyncClient(client_wrapper=self._client_wrapper) @property def with_raw_response(self) -> RawBaseClient: @@ -110,51 +116,53 @@ def with_raw_response(self) -> RawBaseClient: def analyze_stream( self, *, - video_id: str, - prompt: str, - temperature: typing.Optional[float] = OMIT, + prompt: AnalyzeTextPrompt, + video_id: typing.Optional[str] = OMIT, + video: typing.Optional[VideoContext] = OMIT, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, response_format: typing.Optional[ResponseFormat] = OMIT, - max_tokens: typing.Optional[int] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[StreamAnalyzeResponse]: """ - This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. + This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 1 hour + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos up to 1 hour + - Retrieve immediate results without waiting for asynchronous processing + - Stream text fragments in real-time for immediate processing and feedback + + **Do not use this method for**: + - Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - - This endpoint supports streaming responses. Parameters ---------- - video_id : str - The unique identifier of the video for which you wish to generate a text. - - prompt : str - A prompt that guides the model on the desired format or content. + prompt : AnalyzeTextPrompt - - - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. - - Your prompts can be instructive or descriptive, or you can also phrase them as questions. - - The maximum length of a prompt is 2,000 tokens. - + video_id : typing.Optional[str] + The unique identifier of the video to analyze. - **Examples**: + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead. - - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). - - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. + video : typing.Optional[VideoContext] - temperature : typing.Optional[float] - Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. - - **Default:** 0.2 - **Min:** 0 - **Max:** 1 + temperature : typing.Optional[AnalyzeTemperature] response_format : typing.Optional[ResponseFormat] - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. + max_tokens : typing.Optional[AnalyzeMaxTokens] request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -192,8 +200,9 @@ def analyze_stream( yield chunk """ with self._raw_client.analyze_stream( - video_id=video_id, prompt=prompt, + video_id=video_id, + video=video, temperature=temperature, response_format=response_format, max_tokens=max_tokens, @@ -204,51 +213,53 @@ def analyze_stream( def analyze( self, *, - video_id: str, - prompt: str, - temperature: typing.Optional[float] = OMIT, + prompt: AnalyzeTextPrompt, + video_id: typing.Optional[str] = OMIT, + video: typing.Optional[VideoContext] = OMIT, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, response_format: typing.Optional[ResponseFormat] = OMIT, - max_tokens: typing.Optional[int] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> NonStreamAnalyzeResponse: """ - This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. + This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 1 hour + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos up to 1 hour + - Retrieve immediate results without waiting for asynchronous processing + - Stream text fragments in real-time for immediate processing and feedback + + **Do not use this method for**: + - Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - - This endpoint supports streaming responses. Parameters ---------- - video_id : str - The unique identifier of the video for which you wish to generate a text. - - prompt : str - A prompt that guides the model on the desired format or content. + prompt : AnalyzeTextPrompt - - - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. - - Your prompts can be instructive or descriptive, or you can also phrase them as questions. - - The maximum length of a prompt is 2,000 tokens. - + video_id : typing.Optional[str] + The unique identifier of the video to analyze. - **Examples**: + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead. - - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). - - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. + video : typing.Optional[VideoContext] - temperature : typing.Optional[float] - Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. - - **Default:** 0.2 - **Min:** 0 - **Max:** 1 + temperature : typing.Optional[AnalyzeTemperature] response_format : typing.Optional[ResponseFormat] - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. + max_tokens : typing.Optional[AnalyzeMaxTokens] request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -284,8 +295,9 @@ def analyze( ) """ _response = self._raw_client.analyze( - video_id=video_id, prompt=prompt, + video_id=video_id, + video=video, temperature=temperature, response_format=response_format, max_tokens=max_tokens, @@ -367,6 +379,7 @@ def __init__( self.entity_collections = AsyncEntityCollectionsClient(client_wrapper=self._client_wrapper) self.embed = AsyncEmbedClient(client_wrapper=self._client_wrapper) self.search = AsyncSearchClient(client_wrapper=self._client_wrapper) + self.analyze_async = AsyncAnalyzeAsyncClient(client_wrapper=self._client_wrapper) @property def with_raw_response(self) -> AsyncRawBaseClient: @@ -382,51 +395,53 @@ def with_raw_response(self) -> AsyncRawBaseClient: async def analyze_stream( self, *, - video_id: str, - prompt: str, - temperature: typing.Optional[float] = OMIT, + prompt: AnalyzeTextPrompt, + video_id: typing.Optional[str] = OMIT, + video: typing.Optional[VideoContext] = OMIT, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, response_format: typing.Optional[ResponseFormat] = OMIT, - max_tokens: typing.Optional[int] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[StreamAnalyzeResponse]: """ - This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. + This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 1 hour + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos up to 1 hour + - Retrieve immediate results without waiting for asynchronous processing + - Stream text fragments in real-time for immediate processing and feedback + + **Do not use this method for**: + - Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - - This endpoint supports streaming responses. Parameters ---------- - video_id : str - The unique identifier of the video for which you wish to generate a text. - - prompt : str - A prompt that guides the model on the desired format or content. + prompt : AnalyzeTextPrompt - - - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. - - Your prompts can be instructive or descriptive, or you can also phrase them as questions. - - The maximum length of a prompt is 2,000 tokens. - + video_id : typing.Optional[str] + The unique identifier of the video to analyze. - **Examples**: + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead. - - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). - - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. + video : typing.Optional[VideoContext] - temperature : typing.Optional[float] - Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. - - **Default:** 0.2 - **Min:** 0 - **Max:** 1 + temperature : typing.Optional[AnalyzeTemperature] response_format : typing.Optional[ResponseFormat] - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. + max_tokens : typing.Optional[AnalyzeMaxTokens] request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -472,8 +487,9 @@ async def main() -> None: asyncio.run(main()) """ async with self._raw_client.analyze_stream( - video_id=video_id, prompt=prompt, + video_id=video_id, + video=video, temperature=temperature, response_format=response_format, max_tokens=max_tokens, @@ -485,51 +501,53 @@ async def main() -> None: async def analyze( self, *, - video_id: str, - prompt: str, - temperature: typing.Optional[float] = OMIT, + prompt: AnalyzeTextPrompt, + video_id: typing.Optional[str] = OMIT, + video: typing.Optional[VideoContext] = OMIT, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, response_format: typing.Optional[ResponseFormat] = OMIT, - max_tokens: typing.Optional[int] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> NonStreamAnalyzeResponse: """ - This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. + This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 1 hour + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos up to 1 hour + - Retrieve immediate results without waiting for asynchronous processing + - Stream text fragments in real-time for immediate processing and feedback + + **Do not use this method for**: + - Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - - This endpoint supports streaming responses. Parameters ---------- - video_id : str - The unique identifier of the video for which you wish to generate a text. - - prompt : str - A prompt that guides the model on the desired format or content. + prompt : AnalyzeTextPrompt - - - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. - - Your prompts can be instructive or descriptive, or you can also phrase them as questions. - - The maximum length of a prompt is 2,000 tokens. - + video_id : typing.Optional[str] + The unique identifier of the video to analyze. - **Examples**: + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead. - - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). - - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. + video : typing.Optional[VideoContext] - temperature : typing.Optional[float] - Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. - - **Default:** 0.2 - **Min:** 0 - **Max:** 1 + temperature : typing.Optional[AnalyzeTemperature] response_format : typing.Optional[ResponseFormat] - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. + max_tokens : typing.Optional[AnalyzeMaxTokens] request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -573,8 +591,9 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._raw_client.analyze( - video_id=video_id, prompt=prompt, + video_id=video_id, + video=video, temperature=temperature, response_format=response_format, max_tokens=max_tokens, diff --git a/src/twelvelabs/core/client_wrapper.py b/src/twelvelabs/core/client_wrapper.py index 0523237..fa7ec7d 100644 --- a/src/twelvelabs/core/client_wrapper.py +++ b/src/twelvelabs/core/client_wrapper.py @@ -22,10 +22,10 @@ def __init__( def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { - "User-Agent": "twelvelabs/1.2.1", + "User-Agent": "twelvelabs/1.2.2", "X-Fern-Language": "Python", "X-Fern-SDK-Name": "twelvelabs", - "X-Fern-SDK-Version": "1.2.1", + "X-Fern-SDK-Version": "1.2.2", **(self.get_custom_headers() or {}), } headers["x-api-key"] = self.api_key diff --git a/src/twelvelabs/embed/client.py b/src/twelvelabs/embed/client.py index d59d957..99769d8 100644 --- a/src/twelvelabs/embed/client.py +++ b/src/twelvelabs/embed/client.py @@ -37,7 +37,6 @@ def create( *, model_name: str, text: typing.Optional[str] = OMIT, - text_truncate: typing.Optional[str] = OMIT, image_url: typing.Optional[str] = OMIT, image_file: typing.Optional[core.File] = OMIT, audio_url: typing.Optional[str] = OMIT, @@ -82,27 +81,12 @@ def create( model_name : str The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. text : typing.Optional[str] The text for which you wish to create an embedding. **Example**: "Man with a dog crossing the street" - text_truncate : typing.Optional[str] - Specifies how the platform handles text that exceeds token limits. - - **Available options by model version**: - - **Marengo 3.0**: This parameter is deprecated. The platform automatically truncates text exceeding 500 tokens from the end. - - **Marengo 2.7**: Specifies truncation method for text exceeding 77 tokens: - - `start`: Removes tokens from the beginning - - `end`: Removes tokens from the end (default) - - `none`: Returns an error if the text is longer than the maximum token limit. - - **Default**: `end` - image_url : typing.Optional[str] The publicly accessible URL of the image for which you wish to create an embedding. This parameter is required for image embeddings if `image_file` is not provided. @@ -141,7 +125,6 @@ def create( _response = self._raw_client.create( model_name=model_name, text=text, - text_truncate=text_truncate, image_url=image_url, image_file=image_file, audio_url=audio_url, @@ -175,7 +158,6 @@ async def create( *, model_name: str, text: typing.Optional[str] = OMIT, - text_truncate: typing.Optional[str] = OMIT, image_url: typing.Optional[str] = OMIT, image_file: typing.Optional[core.File] = OMIT, audio_url: typing.Optional[str] = OMIT, @@ -220,27 +202,12 @@ async def create( model_name : str The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. text : typing.Optional[str] The text for which you wish to create an embedding. **Example**: "Man with a dog crossing the street" - text_truncate : typing.Optional[str] - Specifies how the platform handles text that exceeds token limits. - - **Available options by model version**: - - **Marengo 3.0**: This parameter is deprecated. The platform automatically truncates text exceeding 500 tokens from the end. - - **Marengo 2.7**: Specifies truncation method for text exceeding 77 tokens: - - `start`: Removes tokens from the beginning - - `end`: Removes tokens from the end (default) - - `none`: Returns an error if the text is longer than the maximum token limit. - - **Default**: `end` - image_url : typing.Optional[str] The publicly accessible URL of the image for which you wish to create an embedding. This parameter is required for image embeddings if `image_file` is not provided. @@ -287,7 +254,6 @@ async def main() -> None: _response = await self._raw_client.create( model_name=model_name, text=text, - text_truncate=text_truncate, image_url=image_url, image_file=image_file, audio_url=audio_url, diff --git a/src/twelvelabs/embed/raw_client.py b/src/twelvelabs/embed/raw_client.py index 77262e0..dd1adc7 100644 --- a/src/twelvelabs/embed/raw_client.py +++ b/src/twelvelabs/embed/raw_client.py @@ -25,7 +25,6 @@ def create( *, model_name: str, text: typing.Optional[str] = OMIT, - text_truncate: typing.Optional[str] = OMIT, image_url: typing.Optional[str] = OMIT, image_file: typing.Optional[core.File] = OMIT, audio_url: typing.Optional[str] = OMIT, @@ -70,27 +69,12 @@ def create( model_name : str The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. text : typing.Optional[str] The text for which you wish to create an embedding. **Example**: "Man with a dog crossing the street" - text_truncate : typing.Optional[str] - Specifies how the platform handles text that exceeds token limits. - - **Available options by model version**: - - **Marengo 3.0**: This parameter is deprecated. The platform automatically truncates text exceeding 500 tokens from the end. - - **Marengo 2.7**: Specifies truncation method for text exceeding 77 tokens: - - `start`: Removes tokens from the beginning - - `end`: Removes tokens from the end (default) - - `none`: Returns an error if the text is longer than the maximum token limit. - - **Default**: `end` - image_url : typing.Optional[str] The publicly accessible URL of the image for which you wish to create an embedding. This parameter is required for image embeddings if `image_file` is not provided. @@ -121,7 +105,6 @@ def create( data={ "model_name": model_name, "text": text, - "text_truncate": text_truncate, "image_url": image_url, "audio_url": audio_url, "audio_start_offset_sec": audio_start_offset_sec, @@ -170,7 +153,6 @@ async def create( *, model_name: str, text: typing.Optional[str] = OMIT, - text_truncate: typing.Optional[str] = OMIT, image_url: typing.Optional[str] = OMIT, image_file: typing.Optional[core.File] = OMIT, audio_url: typing.Optional[str] = OMIT, @@ -215,27 +197,12 @@ async def create( model_name : str The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. text : typing.Optional[str] The text for which you wish to create an embedding. **Example**: "Man with a dog crossing the street" - text_truncate : typing.Optional[str] - Specifies how the platform handles text that exceeds token limits. - - **Available options by model version**: - - **Marengo 3.0**: This parameter is deprecated. The platform automatically truncates text exceeding 500 tokens from the end. - - **Marengo 2.7**: Specifies truncation method for text exceeding 77 tokens: - - `start`: Removes tokens from the beginning - - `end`: Removes tokens from the end (default) - - `none`: Returns an error if the text is longer than the maximum token limit. - - **Default**: `end` - image_url : typing.Optional[str] The publicly accessible URL of the image for which you wish to create an embedding. This parameter is required for image embeddings if `image_file` is not provided. @@ -266,7 +233,6 @@ async def create( data={ "model_name": model_name, "text": text, - "text_truncate": text_truncate, "image_url": image_url, "audio_url": audio_url, "audio_start_offset_sec": audio_start_offset_sec, diff --git a/src/twelvelabs/embed/tasks/client.py b/src/twelvelabs/embed/tasks/client.py index 4224c37..a3d28ee 100644 --- a/src/twelvelabs/embed/tasks/client.py +++ b/src/twelvelabs/embed/tasks/client.py @@ -158,7 +158,6 @@ def create( model_name : str The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. video_file : typing.Optional[core.File] See core.File for more documentation @@ -295,11 +294,7 @@ def retrieve( The unique identifier of your video embedding task. embedding_option : typing.Optional[typing.Union[TasksRetrieveRequestEmbeddingOptionItem, typing.Sequence[TasksRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. The platform returns all available embeddings when you omit this parameter. @@ -479,7 +474,6 @@ async def create( model_name : str The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. video_file : typing.Optional[core.File] See core.File for more documentation @@ -634,11 +628,7 @@ async def retrieve( The unique identifier of your video embedding task. embedding_option : typing.Optional[typing.Union[TasksRetrieveRequestEmbeddingOptionItem, typing.Sequence[TasksRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. The platform returns all available embeddings when you omit this parameter. diff --git a/src/twelvelabs/embed/tasks/raw_client.py b/src/twelvelabs/embed/tasks/raw_client.py index 6788ccd..b24962a 100644 --- a/src/twelvelabs/embed/tasks/raw_client.py +++ b/src/twelvelabs/embed/tasks/raw_client.py @@ -176,7 +176,6 @@ def create( model_name : str The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. video_file : typing.Optional[core.File] See core.File for more documentation @@ -353,11 +352,7 @@ def retrieve( The unique identifier of your video embedding task. embedding_option : typing.Optional[typing.Union[TasksRetrieveRequestEmbeddingOptionItem, typing.Sequence[TasksRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. The platform returns all available embeddings when you omit this parameter. @@ -561,7 +556,6 @@ async def create( model_name : str The name of the model you want to use. The following models are available: - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `Marengo-retrieval-2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. video_file : typing.Optional[core.File] See core.File for more documentation @@ -738,11 +732,7 @@ async def retrieve( The unique identifier of your video embedding task. embedding_option : typing.Optional[typing.Union[TasksRetrieveRequestEmbeddingOptionItem, typing.Sequence[TasksRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. The platform returns all available embeddings when you omit this parameter. diff --git a/src/twelvelabs/embed/v_2/client.py b/src/twelvelabs/embed/v_2/client.py index b94245a..9af97af 100644 --- a/src/twelvelabs/embed/v_2/client.py +++ b/src/twelvelabs/embed/v_2/client.py @@ -52,10 +52,6 @@ def create( """ This endpoint synchronously creates embeddings for multimodal content and returns the results immediately in the response. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Create embeddings for text, images, audio, or video content - Retrieve immediate results without waiting for background processing @@ -101,7 +97,7 @@ def create( - `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source model_name : CreateEmbeddingsRequestModelName - The video understanding model to use. Only "marengo3.0" is supported. + The video understanding model to use. Value: "marengo3.0". text : typing.Optional[TextInputRequest] @@ -184,10 +180,6 @@ async def create( """ This endpoint synchronously creates embeddings for multimodal content and returns the results immediately in the response. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Create embeddings for text, images, audio, or video content - Retrieve immediate results without waiting for background processing @@ -233,7 +225,7 @@ async def create( - `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source model_name : CreateEmbeddingsRequestModelName - The video understanding model to use. Only "marengo3.0" is supported. + The video understanding model to use. Value: "marengo3.0". text : typing.Optional[TextInputRequest] diff --git a/src/twelvelabs/embed/v_2/raw_client.py b/src/twelvelabs/embed/v_2/raw_client.py index d19ad6c..a59a638 100644 --- a/src/twelvelabs/embed/v_2/raw_client.py +++ b/src/twelvelabs/embed/v_2/raw_client.py @@ -46,10 +46,6 @@ def create( """ This endpoint synchronously creates embeddings for multimodal content and returns the results immediately in the response. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Create embeddings for text, images, audio, or video content - Retrieve immediate results without waiting for background processing @@ -95,7 +91,7 @@ def create( - `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source model_name : CreateEmbeddingsRequestModelName - The video understanding model to use. Only "marengo3.0" is supported. + The video understanding model to use. Value: "marengo3.0". text : typing.Optional[TextInputRequest] @@ -217,10 +213,6 @@ async def create( """ This endpoint synchronously creates embeddings for multimodal content and returns the results immediately in the response. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Create embeddings for text, images, audio, or video content - Retrieve immediate results without waiting for background processing @@ -266,7 +258,7 @@ async def create( - `multi_input`: Creates a single embedding from up to 10 images. You can optionally include text to provide context. To reference specific images in your text, use placeholders in the following format: `<@name>`, where `name` matches the `name` field of a media source model_name : CreateEmbeddingsRequestModelName - The video understanding model to use. Only "marengo3.0" is supported. + The video understanding model to use. Value: "marengo3.0". text : typing.Optional[TextInputRequest] diff --git a/src/twelvelabs/embed/v_2/tasks/client.py b/src/twelvelabs/embed/v_2/tasks/client.py index df107ce..c0c0549 100644 --- a/src/twelvelabs/embed/v_2/tasks/client.py +++ b/src/twelvelabs/embed/v_2/tasks/client.py @@ -119,10 +119,6 @@ def create( """ This endpoint creates embeddings for audio and video content asynchronously. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Process audio or video files longer than 10 minutes - Process files up to 4 hours in duration @@ -163,7 +159,7 @@ def create( - `video`: Video content model_name : CreateAsyncEmbeddingRequestModelName - The model you wish to use. Only `"marengo3.0"` is supported. + The model you wish to use. Value: `"marengo3.0"`. audio : typing.Optional[AudioInputRequest] @@ -366,10 +362,6 @@ async def create( """ This endpoint creates embeddings for audio and video content asynchronously. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Process audio or video files longer than 10 minutes - Process files up to 4 hours in duration @@ -410,7 +402,7 @@ async def create( - `video`: Video content model_name : CreateAsyncEmbeddingRequestModelName - The model you wish to use. Only `"marengo3.0"` is supported. + The model you wish to use. Value: `"marengo3.0"`. audio : typing.Optional[AudioInputRequest] diff --git a/src/twelvelabs/embed/v_2/tasks/raw_client.py b/src/twelvelabs/embed/v_2/tasks/raw_client.py index fcb5506..3cdf44e 100644 --- a/src/twelvelabs/embed/v_2/tasks/raw_client.py +++ b/src/twelvelabs/embed/v_2/tasks/raw_client.py @@ -140,10 +140,6 @@ def create( """ This endpoint creates embeddings for audio and video content asynchronously. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Process audio or video files longer than 10 minutes - Process files up to 4 hours in duration @@ -184,7 +180,7 @@ def create( - `video`: Video content model_name : CreateAsyncEmbeddingRequestModelName - The model you wish to use. Only `"marengo3.0"` is supported. + The model you wish to use. Value: `"marengo3.0"`. audio : typing.Optional[AudioInputRequest] @@ -428,10 +424,6 @@ async def create( """ This endpoint creates embeddings for audio and video content asynchronously. - - This method only supports Marengo version 3.0 or newer. - - **When to use this endpoint**: - Process audio or video files longer than 10 minutes - Process files up to 4 hours in duration @@ -472,7 +464,7 @@ async def create( - `video`: Video content model_name : CreateAsyncEmbeddingRequestModelName - The model you wish to use. Only `"marengo3.0"` is supported. + The model you wish to use. Value: `"marengo3.0"`. audio : typing.Optional[AudioInputRequest] diff --git a/src/twelvelabs/errors/__init__.py b/src/twelvelabs/errors/__init__.py index 331c9ec..411432f 100644 --- a/src/twelvelabs/errors/__init__.py +++ b/src/twelvelabs/errors/__init__.py @@ -3,9 +3,17 @@ # isort: skip_file from .bad_request_error import BadRequestError +from .conflict_error import ConflictError from .forbidden_error import ForbiddenError from .internal_server_error import InternalServerError from .not_found_error import NotFoundError from .too_many_requests_error import TooManyRequestsError -__all__ = ["BadRequestError", "ForbiddenError", "InternalServerError", "NotFoundError", "TooManyRequestsError"] +__all__ = [ + "BadRequestError", + "ConflictError", + "ForbiddenError", + "InternalServerError", + "NotFoundError", + "TooManyRequestsError", +] diff --git a/src/twelvelabs/errors/conflict_error.py b/src/twelvelabs/errors/conflict_error.py new file mode 100644 index 0000000..a53ae50 --- /dev/null +++ b/src/twelvelabs/errors/conflict_error.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..core.api_error import ApiError +from ..types.error_response import ErrorResponse + + +class ConflictError(ApiError): + def __init__(self, body: ErrorResponse, headers: typing.Optional[typing.Dict[str, str]] = None): + super().__init__(status_code=409, headers=headers, body=body) diff --git a/src/twelvelabs/indexes/indexed_assets/client.py b/src/twelvelabs/indexes/indexed_assets/client.py index c2bac7b..79cb24a 100644 --- a/src/twelvelabs/indexes/indexed_assets/client.py +++ b/src/twelvelabs/indexes/indexed_assets/client.py @@ -294,11 +294,7 @@ def retrieve( The unique identifier of the indexed asset to retrieve. embedding_option : typing.Optional[typing.Union[IndexedAssetsRetrieveRequestEmbeddingOptionItem, typing.Sequence[IndexedAssetsRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. @@ -718,11 +714,7 @@ async def retrieve( The unique identifier of the indexed asset to retrieve. embedding_option : typing.Optional[typing.Union[IndexedAssetsRetrieveRequestEmbeddingOptionItem, typing.Sequence[IndexedAssetsRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. diff --git a/src/twelvelabs/indexes/indexed_assets/raw_client.py b/src/twelvelabs/indexes/indexed_assets/raw_client.py index 1699830..b5ee6a2 100644 --- a/src/twelvelabs/indexes/indexed_assets/raw_client.py +++ b/src/twelvelabs/indexes/indexed_assets/raw_client.py @@ -364,11 +364,7 @@ def retrieve( The unique identifier of the indexed asset to retrieve. embedding_option : typing.Optional[typing.Union[IndexedAssetsRetrieveRequestEmbeddingOptionItem, typing.Sequence[IndexedAssetsRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. @@ -876,11 +872,7 @@ async def retrieve( The unique identifier of the indexed asset to retrieve. embedding_option : typing.Optional[typing.Union[IndexedAssetsRetrieveRequestEmbeddingOptionItem, typing.Sequence[IndexedAssetsRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. diff --git a/src/twelvelabs/indexes/types/indexes_create_request_models_item.py b/src/twelvelabs/indexes/types/indexes_create_request_models_item.py index 3e828c9..2160176 100644 --- a/src/twelvelabs/indexes/types/indexes_create_request_models_item.py +++ b/src/twelvelabs/indexes/types/indexes_create_request_models_item.py @@ -13,8 +13,7 @@ class IndexesCreateRequestModelsItem(UniversalBaseModel): - **Embedding**: These models are proficient at performing tasks such as search and classification, enabling enhanced video understanding. - - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - - `marengo2.7`: **Deprecation notice**: Starting mid-March 2026, your videos will be automatically reindexed to Marengo 3.0. Marengo 2.7 will be deprecated once reindexing completes. See the [Migration guide](/v1.3/docs/get-started/migration-guide) for details. + - `marengo3.0`: Enhanced model with sports intelligence and extended content support. - **Generative**: These models generate text based on your videos. diff --git a/src/twelvelabs/indexes/videos/client.py b/src/twelvelabs/indexes/videos/client.py index 3ef476e..f6e4c8d 100644 --- a/src/twelvelabs/indexes/videos/client.py +++ b/src/twelvelabs/indexes/videos/client.py @@ -199,11 +199,7 @@ def retrieve( The unique identifier of the video to retrieve. embedding_option : typing.Optional[typing.Union[VideosRetrieveRequestEmbeddingOptionItem, typing.Sequence[VideosRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. @@ -246,7 +242,7 @@ def delete(self, index_id: str, video_id: str, *, request_options: typing.Option """ This method will be deprecated in a future version. New implementations should use the [Delete an indexed asset](/v1.3/api-reference/index-content/delete) method. - This method deletes all the information about the specified video. This action cannot be undone. + This method deletes all the information about the specified indexed video. This action cannot be undone. Parameters ---------- @@ -524,11 +520,7 @@ async def retrieve( The unique identifier of the video to retrieve. embedding_option : typing.Optional[typing.Union[VideosRetrieveRequestEmbeddingOptionItem, typing.Sequence[VideosRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. @@ -581,7 +573,7 @@ async def delete( """ This method will be deprecated in a future version. New implementations should use the [Delete an indexed asset](/v1.3/api-reference/index-content/delete) method. - This method deletes all the information about the specified video. This action cannot be undone. + This method deletes all the information about the specified indexed video. This action cannot be undone. Parameters ---------- diff --git a/src/twelvelabs/indexes/videos/raw_client.py b/src/twelvelabs/indexes/videos/raw_client.py index 6f8c38b..95535ce 100644 --- a/src/twelvelabs/indexes/videos/raw_client.py +++ b/src/twelvelabs/indexes/videos/raw_client.py @@ -223,11 +223,7 @@ def retrieve( The unique identifier of the video to retrieve. embedding_option : typing.Optional[typing.Union[VideosRetrieveRequestEmbeddingOptionItem, typing.Sequence[VideosRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. @@ -296,7 +292,7 @@ def delete( """ This method will be deprecated in a future version. New implementations should use the [Delete an indexed asset](/v1.3/api-reference/index-content/delete) method. - This method deletes all the information about the specified video. This action cannot be undone. + This method deletes all the information about the specified indexed video. This action cannot be undone. Parameters ---------- @@ -601,11 +597,7 @@ async def retrieve( The unique identifier of the video to retrieve. embedding_option : typing.Optional[typing.Union[VideosRetrieveRequestEmbeddingOptionItem, typing.Sequence[VideosRetrieveRequestEmbeddingOptionItem]]] - Specifies which types of embeddings to retrieve. Values vary depending on the version of the model: - - **Marengo 3.0**: `visual`, `audio`, `transcription`. - - **Marengo 2.7**: `visual-text`, `audio`. - - For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. + Specifies which types of embeddings to retrieve. Values: `visual`, `audio`, `transcription`. For details, see the [Embedding options](/v1.3/docs/concepts/modalities#embedding-options) section. To retrieve embeddings for a video, it must be indexed using the Marengo video understanding model. For details on enabling this model for an index, see the [Create an index](/reference/create-index) page. @@ -674,7 +666,7 @@ async def delete( """ This method will be deprecated in a future version. New implementations should use the [Delete an indexed asset](/v1.3/api-reference/index-content/delete) method. - This method deletes all the information about the specified video. This action cannot be undone. + This method deletes all the information about the specified indexed video. This action cannot be undone. Parameters ---------- diff --git a/src/twelvelabs/raw_base_client.py b/src/twelvelabs/raw_base_client.py index 46fce27..06d855c 100644 --- a/src/twelvelabs/raw_base_client.py +++ b/src/twelvelabs/raw_base_client.py @@ -13,9 +13,13 @@ from .core.serialization import convert_and_respect_annotation_metadata from .errors.bad_request_error import BadRequestError from .errors.too_many_requests_error import TooManyRequestsError +from .types.analyze_max_tokens import AnalyzeMaxTokens +from .types.analyze_temperature import AnalyzeTemperature +from .types.analyze_text_prompt import AnalyzeTextPrompt from .types.non_stream_analyze_response import NonStreamAnalyzeResponse from .types.response_format import ResponseFormat from .types.stream_analyze_response import StreamAnalyzeResponse +from .types.video_context import VideoContext # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -29,51 +33,53 @@ def __init__(self, *, client_wrapper: SyncClientWrapper): def analyze_stream( self, *, - video_id: str, - prompt: str, - temperature: typing.Optional[float] = OMIT, + prompt: AnalyzeTextPrompt, + video_id: typing.Optional[str] = OMIT, + video: typing.Optional[VideoContext] = OMIT, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, response_format: typing.Optional[ResponseFormat] = OMIT, - max_tokens: typing.Optional[int] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[HttpResponse[typing.Iterator[StreamAnalyzeResponse]]]: """ - This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. + This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 1 hour + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos up to 1 hour + - Retrieve immediate results without waiting for asynchronous processing + - Stream text fragments in real-time for immediate processing and feedback + + **Do not use this method for**: + - Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - - This endpoint supports streaming responses. Parameters ---------- - video_id : str - The unique identifier of the video for which you wish to generate a text. + prompt : AnalyzeTextPrompt - prompt : str - A prompt that guides the model on the desired format or content. + video_id : typing.Optional[str] + The unique identifier of the video to analyze. - - - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. - - Your prompts can be instructive or descriptive, or you can also phrase them as questions. - - The maximum length of a prompt is 2,000 tokens. - + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead. - **Examples**: + video : typing.Optional[VideoContext] - - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). - - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. - - temperature : typing.Optional[float] - Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. - - **Default:** 0.2 - **Min:** 0 - **Max:** 1 + temperature : typing.Optional[AnalyzeTemperature] response_format : typing.Optional[ResponseFormat] - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. + max_tokens : typing.Optional[AnalyzeMaxTokens] request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -88,6 +94,9 @@ def analyze_stream( method="POST", json={ "video_id": video_id, + "video": convert_and_respect_annotation_metadata( + object_=video, annotation=VideoContext, direction="write" + ), "prompt": prompt, "temperature": temperature, "response_format": convert_and_respect_annotation_metadata( @@ -159,51 +168,53 @@ def _iter(): def analyze( self, *, - video_id: str, - prompt: str, - temperature: typing.Optional[float] = OMIT, + prompt: AnalyzeTextPrompt, + video_id: typing.Optional[str] = OMIT, + video: typing.Optional[VideoContext] = OMIT, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, response_format: typing.Optional[ResponseFormat] = OMIT, - max_tokens: typing.Optional[int] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[NonStreamAnalyzeResponse]: """ - This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. + This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 1 hour + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos up to 1 hour + - Retrieve immediate results without waiting for asynchronous processing + - Stream text fragments in real-time for immediate processing and feedback + + **Do not use this method for**: + - Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - - This endpoint supports streaming responses. Parameters ---------- - video_id : str - The unique identifier of the video for which you wish to generate a text. + prompt : AnalyzeTextPrompt - prompt : str - A prompt that guides the model on the desired format or content. + video_id : typing.Optional[str] + The unique identifier of the video to analyze. - - - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. - - Your prompts can be instructive or descriptive, or you can also phrase them as questions. - - The maximum length of a prompt is 2,000 tokens. - + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead. - **Examples**: + video : typing.Optional[VideoContext] - - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). - - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. - - temperature : typing.Optional[float] - Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. - - **Default:** 0.2 - **Min:** 0 - **Max:** 1 + temperature : typing.Optional[AnalyzeTemperature] response_format : typing.Optional[ResponseFormat] - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. + max_tokens : typing.Optional[AnalyzeMaxTokens] request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -218,6 +229,9 @@ def analyze( method="POST", json={ "video_id": video_id, + "video": convert_and_respect_annotation_metadata( + object_=video, annotation=VideoContext, direction="write" + ), "prompt": prompt, "temperature": temperature, "response_format": convert_and_respect_annotation_metadata( @@ -278,51 +292,53 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper): async def analyze_stream( self, *, - video_id: str, - prompt: str, - temperature: typing.Optional[float] = OMIT, + prompt: AnalyzeTextPrompt, + video_id: typing.Optional[str] = OMIT, + video: typing.Optional[VideoContext] = OMIT, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, response_format: typing.Optional[ResponseFormat] = OMIT, - max_tokens: typing.Optional[int] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[StreamAnalyzeResponse]]]: """ - This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. + This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 1 hour + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos up to 1 hour + - Retrieve immediate results without waiting for asynchronous processing + - Stream text fragments in real-time for immediate processing and feedback + + **Do not use this method for**: + - Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - - This endpoint supports streaming responses. Parameters ---------- - video_id : str - The unique identifier of the video for which you wish to generate a text. + prompt : AnalyzeTextPrompt - prompt : str - A prompt that guides the model on the desired format or content. + video_id : typing.Optional[str] + The unique identifier of the video to analyze. - - - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. - - Your prompts can be instructive or descriptive, or you can also phrase them as questions. - - The maximum length of a prompt is 2,000 tokens. - + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead. - **Examples**: + video : typing.Optional[VideoContext] - - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). - - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. - - temperature : typing.Optional[float] - Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. - - **Default:** 0.2 - **Min:** 0 - **Max:** 1 + temperature : typing.Optional[AnalyzeTemperature] response_format : typing.Optional[ResponseFormat] - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. + max_tokens : typing.Optional[AnalyzeMaxTokens] request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -337,6 +353,9 @@ async def analyze_stream( method="POST", json={ "video_id": video_id, + "video": convert_and_respect_annotation_metadata( + object_=video, annotation=VideoContext, direction="write" + ), "prompt": prompt, "temperature": temperature, "response_format": convert_and_respect_annotation_metadata( @@ -408,51 +427,53 @@ async def _iter(): async def analyze( self, *, - video_id: str, - prompt: str, - temperature: typing.Optional[float] = OMIT, + prompt: AnalyzeTextPrompt, + video_id: typing.Optional[str] = OMIT, + video: typing.Optional[VideoContext] = OMIT, + temperature: typing.Optional[AnalyzeTemperature] = OMIT, response_format: typing.Optional[ResponseFormat] = OMIT, - max_tokens: typing.Optional[int] = OMIT, + max_tokens: typing.Optional[AnalyzeMaxTokens] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AsyncHttpResponse[NonStreamAnalyzeResponse]: """ - This endpoint analyzes your videos and creates fully customizable text based on your prompts, including but not limited to tables of content, action items, memos, and detailed analyses. + This method synchronously analyzes your videos and generates fully customizable text based on your prompts. + + + - Minimum duration: 4 seconds + - Maximum duration: 1 hour + - Formats: [FFmpeg supported formats](https://ffmpeg.org/ffmpeg-formats.html) + - Resolution: 360x360 to 5184x2160 pixels + - Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1. + + + **When to use this method**: + - Analyze videos up to 1 hour + - Retrieve immediate results without waiting for asynchronous processing + - Stream text fragments in real-time for immediate processing and feedback + + **Do not use this method for**: + - Videos longer than 1 hour. Use the [`POST`](/v1.3/api-reference/analyze-videos/create-async-analysis-task) method of the `/analyze/tasks` endpoint instead. - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - - This endpoint supports streaming responses. Parameters ---------- - video_id : str - The unique identifier of the video for which you wish to generate a text. + prompt : AnalyzeTextPrompt - prompt : str - A prompt that guides the model on the desired format or content. + video_id : typing.Optional[str] + The unique identifier of the video to analyze. - - - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt. - - Your prompts can be instructive or descriptive, or you can also phrase them as questions. - - The maximum length of a prompt is 2,000 tokens. - + This parameter will be deprecated and removed in a future version. Use the [`video`](/v1.3/api-reference/analyze-videos/sync-analysis#request.body.video) parameter instead. - **Examples**: + video : typing.Optional[VideoContext] - - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization). - - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks. - - temperature : typing.Optional[float] - Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output. - - **Default:** 0.2 - **Min:** 0 - **Max:** 1 + temperature : typing.Optional[AnalyzeTemperature] response_format : typing.Optional[ResponseFormat] - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. + max_tokens : typing.Optional[AnalyzeMaxTokens] request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -467,6 +488,9 @@ async def analyze( method="POST", json={ "video_id": video_id, + "video": convert_and_respect_annotation_metadata( + object_=video, annotation=VideoContext, direction="write" + ), "prompt": prompt, "temperature": temperature, "response_format": convert_and_respect_annotation_metadata( diff --git a/src/twelvelabs/search/__init__.py b/src/twelvelabs/search/__init__.py index 6da738b..61d3add 100644 --- a/src/twelvelabs/search/__init__.py +++ b/src/twelvelabs/search/__init__.py @@ -7,7 +7,6 @@ SearchCreateRequestOperator, SearchCreateRequestQueryMediaType, SearchCreateRequestSearchOptionsItem, - SearchCreateRequestSortOption, SearchCreateRequestTranscriptionOptionsItem, SearchRetrieveResponse, SearchRetrieveResponsePageInfo, @@ -18,7 +17,6 @@ "SearchCreateRequestOperator", "SearchCreateRequestQueryMediaType", "SearchCreateRequestSearchOptionsItem", - "SearchCreateRequestSortOption", "SearchCreateRequestTranscriptionOptionsItem", "SearchRetrieveResponse", "SearchRetrieveResponsePageInfo", diff --git a/src/twelvelabs/search/client.py b/src/twelvelabs/search/client.py index 5d444a3..07485db 100644 --- a/src/twelvelabs/search/client.py +++ b/src/twelvelabs/search/client.py @@ -6,13 +6,11 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.request_options import RequestOptions from ..types.search_results import SearchResults -from ..types.threshold_search import ThresholdSearch from .raw_client import AsyncRawSearchClient, RawSearchClient from .types.search_create_request_group_by import SearchCreateRequestGroupBy from .types.search_create_request_operator import SearchCreateRequestOperator from .types.search_create_request_query_media_type import SearchCreateRequestQueryMediaType from .types.search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem -from .types.search_create_request_sort_option import SearchCreateRequestSortOption from .types.search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem from .types.search_retrieve_response import SearchRetrieveResponse @@ -45,10 +43,7 @@ def create( query_media_file: typing.Optional[core.File] = OMIT, query_text: typing.Optional[str] = OMIT, transcription_options: typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] = OMIT, - adjust_confidence_level: typing.Optional[float] = OMIT, group_by: typing.Optional[SearchCreateRequestGroupBy] = OMIT, - threshold: typing.Optional[ThresholdSearch] = OMIT, - sort_option: typing.Optional[SearchCreateRequestSortOption] = OMIT, operator: typing.Optional[SearchCreateRequestOperator] = OMIT, page_limit: typing.Optional[int] = OMIT, filter: typing.Optional[str] = OMIT, @@ -66,14 +61,12 @@ def create( - Provide up to 10 images by specifying the following parameters multiple times: - `query_media_url`: Publicly accessible URL of your media file. - `query_media_file`: Local media file. - - Marengo 2.7 supports a single image per request. - - **Composed text and media queries** (Marengo 3.0 only): + **Composed text and media queries**: - Use the `query_text` parameter for your text query. - Set `query_media_type` to `image`. - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times. - **Entity search** (Marengo 3.0 only and in beta): + **Entity search** (beta): - To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter. @@ -91,8 +84,8 @@ def create( Available options: - `visual`: Searches visual content. - - `audio`: Searches non-speech audio (Marengo 3.0) or all audio (Marengo 2.7). - - `transcription`: Spoken words (Marengo 3.0 only) + - `audio`: Searches non-speech audio. + - `transcription`: Spoken words - You can specify multiple search options in conjunction with the [`operator`](/v1.3/api-reference/any-to-video-search/make-search-request#request.body.operator.operator) parameter described below to broaden or narrow your search. For example, to search using both visual and non-speech audio content, include this parameter two times in the request as shown below: @@ -103,7 +96,7 @@ def create( ``` - For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. + For guidance, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. query_media_type : typing.Optional[SearchCreateRequestQueryMediaType] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. @@ -126,10 +119,10 @@ def create( If you're using the Entity Search feature to search for specific persons in your video content, you must enclose the unique identifier of your entity between the `<@` and `>` markers. For example, to search for an entity with the ID `entity123`, use `<@entity123> is walking` as your query. - The maximum query length varies by model. Marengo 3.0 supports up to 500 tokens per query, while Marengo 2.7 supports up to 77 tokens per query. + Marengo supports up to 500 tokens per query. transcription_options : typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] - Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when using Marengo 3.0 with the `search_options` parameter containing the `transcription` value. + Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when the `search_options` parameter contains the `transcription` value. Available options: - `lexical`: Exact word matching @@ -139,16 +132,6 @@ def create( **Default**: `["lexical", "semantic"]`. - adjust_confidence_level : typing.Optional[float] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - This parameter specifies the strictness of the thresholds for assigning the high, medium, or low confidence levels to search results. If you use a lower value, the thresholds become more relaxed, and more search results will be classified as having high, medium, or low confidence levels. You can use this parameter to include a broader range of potentially relevant video clips, even if some results might be less precise. - - **Min**: 0 - **Max**: 1 - **Default:** 0.5 - group_by : typing.Optional[SearchCreateRequestGroupBy] Use this parameter to group or ungroup items in a response. It can take one of the following values: - `video`: The platform will group the matching video clips in the response by video. @@ -156,24 +139,6 @@ def create( **Default:** `clip` - threshold : typing.Optional[ThresholdSearch] - - sort_option : typing.Optional[SearchCreateRequestSortOption] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - - Use this parameter to specify the sort order for the response. - - When performing a search, the platform assigns a relevance ranking to each video clip that matches your search terms. By default, the search results are sorted by relevance ranking in ascending order, with 1 being the most relevant result. - - If you set this parameter to `score` and `group_by` is set to `video`, the platform will determine the highest relevance ranking (lowest number) for each video and sort the videos in the response by this ranking. For each video, the matching video clips will be sorted by relevance ranking in ascending order. - - If you set this parameter to `clip_count` and `group_by` is set to `video`, the platform will sort the videos in the response by the number of clips. For each video, the matching video clips will be sorted by relevance ranking in ascending order. You can use `clip_count` only when the matching video clips are grouped by video. - - - **Default:** `score` - operator : typing.Optional[SearchCreateRequestOperator] Combines multiple search options using `or` or `and`. Use `and` to find segments matching all search options. Use `or` to find segments matching any search option. For detailed guidance on using this parameter, see the [Combine multiple modalities](/v1.3/docs/concepts/modalities#combine-multiple-modalities) section. @@ -252,10 +217,7 @@ def create( query_media_file=query_media_file, query_text=query_text, transcription_options=transcription_options, - adjust_confidence_level=adjust_confidence_level, group_by=group_by, - threshold=threshold, - sort_option=sort_option, operator=operator, page_limit=page_limit, filter=filter, @@ -337,10 +299,7 @@ async def create( query_media_file: typing.Optional[core.File] = OMIT, query_text: typing.Optional[str] = OMIT, transcription_options: typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] = OMIT, - adjust_confidence_level: typing.Optional[float] = OMIT, group_by: typing.Optional[SearchCreateRequestGroupBy] = OMIT, - threshold: typing.Optional[ThresholdSearch] = OMIT, - sort_option: typing.Optional[SearchCreateRequestSortOption] = OMIT, operator: typing.Optional[SearchCreateRequestOperator] = OMIT, page_limit: typing.Optional[int] = OMIT, filter: typing.Optional[str] = OMIT, @@ -358,14 +317,12 @@ async def create( - Provide up to 10 images by specifying the following parameters multiple times: - `query_media_url`: Publicly accessible URL of your media file. - `query_media_file`: Local media file. - - Marengo 2.7 supports a single image per request. - - **Composed text and media queries** (Marengo 3.0 only): + **Composed text and media queries**: - Use the `query_text` parameter for your text query. - Set `query_media_type` to `image`. - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times. - **Entity search** (Marengo 3.0 only and in beta): + **Entity search** (beta): - To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter. @@ -383,8 +340,8 @@ async def create( Available options: - `visual`: Searches visual content. - - `audio`: Searches non-speech audio (Marengo 3.0) or all audio (Marengo 2.7). - - `transcription`: Spoken words (Marengo 3.0 only) + - `audio`: Searches non-speech audio. + - `transcription`: Spoken words - You can specify multiple search options in conjunction with the [`operator`](/v1.3/api-reference/any-to-video-search/make-search-request#request.body.operator.operator) parameter described below to broaden or narrow your search. For example, to search using both visual and non-speech audio content, include this parameter two times in the request as shown below: @@ -395,7 +352,7 @@ async def create( ``` - For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. + For guidance, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. query_media_type : typing.Optional[SearchCreateRequestQueryMediaType] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. @@ -418,10 +375,10 @@ async def create( If you're using the Entity Search feature to search for specific persons in your video content, you must enclose the unique identifier of your entity between the `<@` and `>` markers. For example, to search for an entity with the ID `entity123`, use `<@entity123> is walking` as your query. - The maximum query length varies by model. Marengo 3.0 supports up to 500 tokens per query, while Marengo 2.7 supports up to 77 tokens per query. + Marengo supports up to 500 tokens per query. transcription_options : typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] - Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when using Marengo 3.0 with the `search_options` parameter containing the `transcription` value. + Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when the `search_options` parameter contains the `transcription` value. Available options: - `lexical`: Exact word matching @@ -431,16 +388,6 @@ async def create( **Default**: `["lexical", "semantic"]`. - adjust_confidence_level : typing.Optional[float] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - This parameter specifies the strictness of the thresholds for assigning the high, medium, or low confidence levels to search results. If you use a lower value, the thresholds become more relaxed, and more search results will be classified as having high, medium, or low confidence levels. You can use this parameter to include a broader range of potentially relevant video clips, even if some results might be less precise. - - **Min**: 0 - **Max**: 1 - **Default:** 0.5 - group_by : typing.Optional[SearchCreateRequestGroupBy] Use this parameter to group or ungroup items in a response. It can take one of the following values: - `video`: The platform will group the matching video clips in the response by video. @@ -448,24 +395,6 @@ async def create( **Default:** `clip` - threshold : typing.Optional[ThresholdSearch] - - sort_option : typing.Optional[SearchCreateRequestSortOption] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - - Use this parameter to specify the sort order for the response. - - When performing a search, the platform assigns a relevance ranking to each video clip that matches your search terms. By default, the search results are sorted by relevance ranking in ascending order, with 1 being the most relevant result. - - If you set this parameter to `score` and `group_by` is set to `video`, the platform will determine the highest relevance ranking (lowest number) for each video and sort the videos in the response by this ranking. For each video, the matching video clips will be sorted by relevance ranking in ascending order. - - If you set this parameter to `clip_count` and `group_by` is set to `video`, the platform will sort the videos in the response by the number of clips. For each video, the matching video clips will be sorted by relevance ranking in ascending order. You can use `clip_count` only when the matching video clips are grouped by video. - - - **Default:** `score` - operator : typing.Optional[SearchCreateRequestOperator] Combines multiple search options using `or` or `and`. Use `and` to find segments matching all search options. Use `or` to find segments matching any search option. For detailed guidance on using this parameter, see the [Combine multiple modalities](/v1.3/docs/concepts/modalities#combine-multiple-modalities) section. @@ -552,10 +481,7 @@ async def main() -> None: query_media_file=query_media_file, query_text=query_text, transcription_options=transcription_options, - adjust_confidence_level=adjust_confidence_level, group_by=group_by, - threshold=threshold, - sort_option=sort_option, operator=operator, page_limit=page_limit, filter=filter, diff --git a/src/twelvelabs/search/raw_client.py b/src/twelvelabs/search/raw_client.py index 4eda3d5..fe5c930 100644 --- a/src/twelvelabs/search/raw_client.py +++ b/src/twelvelabs/search/raw_client.py @@ -13,12 +13,10 @@ from ..errors.bad_request_error import BadRequestError from ..errors.too_many_requests_error import TooManyRequestsError from ..types.search_results import SearchResults -from ..types.threshold_search import ThresholdSearch from .types.search_create_request_group_by import SearchCreateRequestGroupBy from .types.search_create_request_operator import SearchCreateRequestOperator from .types.search_create_request_query_media_type import SearchCreateRequestQueryMediaType from .types.search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem -from .types.search_create_request_sort_option import SearchCreateRequestSortOption from .types.search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem from .types.search_retrieve_response import SearchRetrieveResponse @@ -40,10 +38,7 @@ def create( query_media_file: typing.Optional[core.File] = OMIT, query_text: typing.Optional[str] = OMIT, transcription_options: typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] = OMIT, - adjust_confidence_level: typing.Optional[float] = OMIT, group_by: typing.Optional[SearchCreateRequestGroupBy] = OMIT, - threshold: typing.Optional[ThresholdSearch] = OMIT, - sort_option: typing.Optional[SearchCreateRequestSortOption] = OMIT, operator: typing.Optional[SearchCreateRequestOperator] = OMIT, page_limit: typing.Optional[int] = OMIT, filter: typing.Optional[str] = OMIT, @@ -61,14 +56,12 @@ def create( - Provide up to 10 images by specifying the following parameters multiple times: - `query_media_url`: Publicly accessible URL of your media file. - `query_media_file`: Local media file. - - Marengo 2.7 supports a single image per request. - - **Composed text and media queries** (Marengo 3.0 only): + **Composed text and media queries**: - Use the `query_text` parameter for your text query. - Set `query_media_type` to `image`. - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times. - **Entity search** (Marengo 3.0 only and in beta): + **Entity search** (beta): - To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter. @@ -86,8 +79,8 @@ def create( Available options: - `visual`: Searches visual content. - - `audio`: Searches non-speech audio (Marengo 3.0) or all audio (Marengo 2.7). - - `transcription`: Spoken words (Marengo 3.0 only) + - `audio`: Searches non-speech audio. + - `transcription`: Spoken words - You can specify multiple search options in conjunction with the [`operator`](/v1.3/api-reference/any-to-video-search/make-search-request#request.body.operator.operator) parameter described below to broaden or narrow your search. For example, to search using both visual and non-speech audio content, include this parameter two times in the request as shown below: @@ -98,7 +91,7 @@ def create( ``` - For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. + For guidance, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. query_media_type : typing.Optional[SearchCreateRequestQueryMediaType] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. @@ -121,10 +114,10 @@ def create( If you're using the Entity Search feature to search for specific persons in your video content, you must enclose the unique identifier of your entity between the `<@` and `>` markers. For example, to search for an entity with the ID `entity123`, use `<@entity123> is walking` as your query. - The maximum query length varies by model. Marengo 3.0 supports up to 500 tokens per query, while Marengo 2.7 supports up to 77 tokens per query. + Marengo supports up to 500 tokens per query. transcription_options : typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] - Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when using Marengo 3.0 with the `search_options` parameter containing the `transcription` value. + Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when the `search_options` parameter contains the `transcription` value. Available options: - `lexical`: Exact word matching @@ -134,16 +127,6 @@ def create( **Default**: `["lexical", "semantic"]`. - adjust_confidence_level : typing.Optional[float] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - This parameter specifies the strictness of the thresholds for assigning the high, medium, or low confidence levels to search results. If you use a lower value, the thresholds become more relaxed, and more search results will be classified as having high, medium, or low confidence levels. You can use this parameter to include a broader range of potentially relevant video clips, even if some results might be less precise. - - **Min**: 0 - **Max**: 1 - **Default:** 0.5 - group_by : typing.Optional[SearchCreateRequestGroupBy] Use this parameter to group or ungroup items in a response. It can take one of the following values: - `video`: The platform will group the matching video clips in the response by video. @@ -151,24 +134,6 @@ def create( **Default:** `clip` - threshold : typing.Optional[ThresholdSearch] - - sort_option : typing.Optional[SearchCreateRequestSortOption] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - - Use this parameter to specify the sort order for the response. - - When performing a search, the platform assigns a relevance ranking to each video clip that matches your search terms. By default, the search results are sorted by relevance ranking in ascending order, with 1 being the most relevant result. - - If you set this parameter to `score` and `group_by` is set to `video`, the platform will determine the highest relevance ranking (lowest number) for each video and sort the videos in the response by this ranking. For each video, the matching video clips will be sorted by relevance ranking in ascending order. - - If you set this parameter to `clip_count` and `group_by` is set to `video`, the platform will sort the videos in the response by the number of clips. For each video, the matching video clips will be sorted by relevance ranking in ascending order. You can use `clip_count` only when the matching video clips are grouped by video. - - - **Default:** `score` - operator : typing.Optional[SearchCreateRequestOperator] Combines multiple search options using `or` or `and`. Use `and` to find segments matching all search options. Use `or` to find segments matching any search option. For detailed guidance on using this parameter, see the [Combine multiple modalities](/v1.3/docs/concepts/modalities#combine-multiple-modalities) section. @@ -237,10 +202,7 @@ def create( "index_id": index_id, "search_options": search_options, "transcription_options": transcription_options, - "adjust_confidence_level": adjust_confidence_level, "group_by": group_by, - "threshold": threshold, - "sort_option": sort_option, "operator": operator, "page_limit": page_limit, "filter": filter, @@ -369,10 +331,7 @@ async def create( query_media_file: typing.Optional[core.File] = OMIT, query_text: typing.Optional[str] = OMIT, transcription_options: typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] = OMIT, - adjust_confidence_level: typing.Optional[float] = OMIT, group_by: typing.Optional[SearchCreateRequestGroupBy] = OMIT, - threshold: typing.Optional[ThresholdSearch] = OMIT, - sort_option: typing.Optional[SearchCreateRequestSortOption] = OMIT, operator: typing.Optional[SearchCreateRequestOperator] = OMIT, page_limit: typing.Optional[int] = OMIT, filter: typing.Optional[str] = OMIT, @@ -390,14 +349,12 @@ async def create( - Provide up to 10 images by specifying the following parameters multiple times: - `query_media_url`: Publicly accessible URL of your media file. - `query_media_file`: Local media file. - - Marengo 2.7 supports a single image per request. - - **Composed text and media queries** (Marengo 3.0 only): + **Composed text and media queries**: - Use the `query_text` parameter for your text query. - Set `query_media_type` to `image`. - Provide up to 10 images by specifying the `query_media_url` and `query_media_file` parameters multiple times. - **Entity search** (Marengo 3.0 only and in beta): + **Entity search** (beta): - To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter. @@ -415,8 +372,8 @@ async def create( Available options: - `visual`: Searches visual content. - - `audio`: Searches non-speech audio (Marengo 3.0) or all audio (Marengo 2.7). - - `transcription`: Spoken words (Marengo 3.0 only) + - `audio`: Searches non-speech audio. + - `transcription`: Spoken words - You can specify multiple search options in conjunction with the [`operator`](/v1.3/api-reference/any-to-video-search/make-search-request#request.body.operator.operator) parameter described below to broaden or narrow your search. For example, to search using both visual and non-speech audio content, include this parameter two times in the request as shown below: @@ -427,7 +384,7 @@ async def create( ``` - For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. + For guidance, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. query_media_type : typing.Optional[SearchCreateRequestQueryMediaType] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. @@ -450,10 +407,10 @@ async def create( If you're using the Entity Search feature to search for specific persons in your video content, you must enclose the unique identifier of your entity between the `<@` and `>` markers. For example, to search for an entity with the ID `entity123`, use `<@entity123> is walking` as your query. - The maximum query length varies by model. Marengo 3.0 supports up to 500 tokens per query, while Marengo 2.7 supports up to 77 tokens per query. + Marengo supports up to 500 tokens per query. transcription_options : typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] - Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when using Marengo 3.0 with the `search_options` parameter containing the `transcription` value. + Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when the `search_options` parameter contains the `transcription` value. Available options: - `lexical`: Exact word matching @@ -463,16 +420,6 @@ async def create( **Default**: `["lexical", "semantic"]`. - adjust_confidence_level : typing.Optional[float] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - This parameter specifies the strictness of the thresholds for assigning the high, medium, or low confidence levels to search results. If you use a lower value, the thresholds become more relaxed, and more search results will be classified as having high, medium, or low confidence levels. You can use this parameter to include a broader range of potentially relevant video clips, even if some results might be less precise. - - **Min**: 0 - **Max**: 1 - **Default:** 0.5 - group_by : typing.Optional[SearchCreateRequestGroupBy] Use this parameter to group or ungroup items in a response. It can take one of the following values: - `video`: The platform will group the matching video clips in the response by video. @@ -480,24 +427,6 @@ async def create( **Default:** `clip` - threshold : typing.Optional[ThresholdSearch] - - sort_option : typing.Optional[SearchCreateRequestSortOption] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - - Use this parameter to specify the sort order for the response. - - When performing a search, the platform assigns a relevance ranking to each video clip that matches your search terms. By default, the search results are sorted by relevance ranking in ascending order, with 1 being the most relevant result. - - If you set this parameter to `score` and `group_by` is set to `video`, the platform will determine the highest relevance ranking (lowest number) for each video and sort the videos in the response by this ranking. For each video, the matching video clips will be sorted by relevance ranking in ascending order. - - If you set this parameter to `clip_count` and `group_by` is set to `video`, the platform will sort the videos in the response by the number of clips. For each video, the matching video clips will be sorted by relevance ranking in ascending order. You can use `clip_count` only when the matching video clips are grouped by video. - - - **Default:** `score` - operator : typing.Optional[SearchCreateRequestOperator] Combines multiple search options using `or` or `and`. Use `and` to find segments matching all search options. Use `or` to find segments matching any search option. For detailed guidance on using this parameter, see the [Combine multiple modalities](/v1.3/docs/concepts/modalities#combine-multiple-modalities) section. @@ -566,10 +495,7 @@ async def create( "index_id": index_id, "search_options": search_options, "transcription_options": transcription_options, - "adjust_confidence_level": adjust_confidence_level, "group_by": group_by, - "threshold": threshold, - "sort_option": sort_option, "operator": operator, "page_limit": page_limit, "filter": filter, diff --git a/src/twelvelabs/search/types/__init__.py b/src/twelvelabs/search/types/__init__.py index 6d84d9a..14e498d 100644 --- a/src/twelvelabs/search/types/__init__.py +++ b/src/twelvelabs/search/types/__init__.py @@ -6,7 +6,6 @@ from .search_create_request_operator import SearchCreateRequestOperator from .search_create_request_query_media_type import SearchCreateRequestQueryMediaType from .search_create_request_search_options_item import SearchCreateRequestSearchOptionsItem -from .search_create_request_sort_option import SearchCreateRequestSortOption from .search_create_request_transcription_options_item import SearchCreateRequestTranscriptionOptionsItem from .search_retrieve_response import SearchRetrieveResponse from .search_retrieve_response_page_info import SearchRetrieveResponsePageInfo @@ -16,7 +15,6 @@ "SearchCreateRequestOperator", "SearchCreateRequestQueryMediaType", "SearchCreateRequestSearchOptionsItem", - "SearchCreateRequestSortOption", "SearchCreateRequestTranscriptionOptionsItem", "SearchRetrieveResponse", "SearchRetrieveResponsePageInfo", diff --git a/src/twelvelabs/search/types/search_create_request_sort_option.py b/src/twelvelabs/search/types/search_create_request_sort_option.py deleted file mode 100644 index 3922b7f..0000000 --- a/src/twelvelabs/search/types/search_create_request_sort_option.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SearchCreateRequestSortOption = typing.Union[typing.Literal["score", "clip_count"], typing.Any] diff --git a/src/twelvelabs/types/__init__.py b/src/twelvelabs/types/__init__.py index 5fedbba..b62eb8a 100644 --- a/src/twelvelabs/types/__init__.py +++ b/src/twelvelabs/types/__init__.py @@ -2,6 +2,15 @@ # isort: skip_file +from .analyze_max_tokens import AnalyzeMaxTokens +from .analyze_task_error import AnalyzeTaskError +from .analyze_task_response import AnalyzeTaskResponse +from .analyze_task_result import AnalyzeTaskResult +from .analyze_task_result_usage import AnalyzeTaskResultUsage +from .analyze_task_status import AnalyzeTaskStatus +from .analyze_task_webhook_info import AnalyzeTaskWebhookInfo +from .analyze_temperature import AnalyzeTemperature +from .analyze_text_prompt import AnalyzeTextPrompt from .asset import Asset from .asset_method import AssetMethod from .asset_status import AssetStatus @@ -25,7 +34,7 @@ from .chunk_info_status import ChunkInfoStatus from .completed_chunk import CompletedChunk from .completed_chunk_proof_type import CompletedChunkProofType -from .confidence import Confidence +from .create_analyze_task_response import CreateAnalyzeTaskResponse from .create_asset_upload_response import CreateAssetUploadResponse from .created_at import CreatedAt from .embedding_audio_metadata import EmbeddingAudioMetadata @@ -65,6 +74,7 @@ from .expires_at import ExpiresAt from .finish_reason import FinishReason from .forbidden_error_body import ForbiddenErrorBody +from .generated_text_data import GeneratedTextData from .get_upload_status_response import GetUploadStatusResponse from .hls_object import HlsObject from .hls_object_status import HlsObjectStatus @@ -97,6 +107,7 @@ from .next_page_token import NextPageToken from .non_stream_analyze_response import NonStreamAnalyzeResponse from .not_found_error_body import NotFoundErrorBody +from .one import One from .page import Page from .page_info import PageInfo from .presigned_url_chunk import PresignedUrlChunk @@ -106,7 +117,6 @@ from .request_additional_presigned_ur_ls_response import RequestAdditionalPresignedUrLsResponse from .response_format import ResponseFormat from .response_format_type import ResponseFormatType -from .score_search_terms import ScoreSearchTerms from .search_item import SearchItem from .search_item_clips_item import SearchItemClipsItem from .search_pool import SearchPool @@ -136,7 +146,6 @@ from .text_embedding_result import TextEmbeddingResult from .text_image_input_request import TextImageInputRequest from .text_input_request import TextInputRequest -from .threshold_search import ThresholdSearch from .thumbnail_url import ThumbnailUrl from .token_usage import TokenUsage from .total_inner_matches import TotalInnerMatches @@ -144,8 +153,11 @@ from .total_results import TotalResults from .transcription_data import TranscriptionData from .transcription_data_item import TranscriptionDataItem +from .two import Two from .updated_at import UpdatedAt +from .url import Url from .user_metadata import UserMetadata +from .video_context import VideoContext, VideoContext_AssetId, VideoContext_Base64String, VideoContext_Url from .video_embedding_metadata import VideoEmbeddingMetadata from .video_embedding_task import VideoEmbeddingTask from .video_embedding_task_video_embedding import VideoEmbeddingTaskVideoEmbedding @@ -167,6 +179,15 @@ from .video_vector_system_metadata import VideoVectorSystemMetadata __all__ = [ + "AnalyzeMaxTokens", + "AnalyzeTaskError", + "AnalyzeTaskResponse", + "AnalyzeTaskResult", + "AnalyzeTaskResultUsage", + "AnalyzeTaskStatus", + "AnalyzeTaskWebhookInfo", + "AnalyzeTemperature", + "AnalyzeTextPrompt", "Asset", "AssetMethod", "AssetStatus", @@ -190,7 +211,7 @@ "ChunkInfoStatus", "CompletedChunk", "CompletedChunkProofType", - "Confidence", + "CreateAnalyzeTaskResponse", "CreateAssetUploadResponse", "CreatedAt", "EmbeddingAudioMetadata", @@ -226,6 +247,7 @@ "ExpiresAt", "FinishReason", "ForbiddenErrorBody", + "GeneratedTextData", "GetUploadStatusResponse", "HlsObject", "HlsObjectStatus", @@ -258,6 +280,7 @@ "NextPageToken", "NonStreamAnalyzeResponse", "NotFoundErrorBody", + "One", "Page", "PageInfo", "PresignedUrlChunk", @@ -267,7 +290,6 @@ "RequestAdditionalPresignedUrLsResponse", "ResponseFormat", "ResponseFormatType", - "ScoreSearchTerms", "SearchItem", "SearchItemClipsItem", "SearchPool", @@ -295,7 +317,6 @@ "TextEmbeddingResult", "TextImageInputRequest", "TextInputRequest", - "ThresholdSearch", "ThumbnailUrl", "TokenUsage", "TotalInnerMatches", @@ -303,8 +324,14 @@ "TotalResults", "TranscriptionData", "TranscriptionDataItem", + "Two", "UpdatedAt", + "Url", "UserMetadata", + "VideoContext", + "VideoContext_AssetId", + "VideoContext_Base64String", + "VideoContext_Url", "VideoEmbeddingMetadata", "VideoEmbeddingTask", "VideoEmbeddingTaskVideoEmbedding", diff --git a/src/twelvelabs/types/confidence.py b/src/twelvelabs/types/analyze_max_tokens.py similarity index 73% rename from src/twelvelabs/types/confidence.py rename to src/twelvelabs/types/analyze_max_tokens.py index 13be0e1..3996e42 100644 --- a/src/twelvelabs/types/confidence.py +++ b/src/twelvelabs/types/analyze_max_tokens.py @@ -1,3 +1,3 @@ # This file was auto-generated by Fern from our API Definition. -Confidence = str +AnalyzeMaxTokens = int diff --git a/src/twelvelabs/types/analyze_task_error.py b/src/twelvelabs/types/analyze_task_error.py new file mode 100644 index 0000000..99d1d7f --- /dev/null +++ b/src/twelvelabs/types/analyze_task_error.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel + + +class AnalyzeTaskError(UniversalBaseModel): + """ + Details about why the task failed. + """ + + message: str = pydantic.Field() + """ + A message that describes why the task failed. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/analyze_task_response.py b/src/twelvelabs/types/analyze_task_response.py new file mode 100644 index 0000000..838ef79 --- /dev/null +++ b/src/twelvelabs/types/analyze_task_response.py @@ -0,0 +1,57 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from .analyze_task_error import AnalyzeTaskError +from .analyze_task_result import AnalyzeTaskResult +from .analyze_task_status import AnalyzeTaskStatus +from .analyze_task_webhook_info import AnalyzeTaskWebhookInfo + + +class AnalyzeTaskResponse(UniversalBaseModel): + """ + Represents the status and results of an analysis task. + """ + + task_id: str = pydantic.Field() + """ + The unique identifier of the analysis task. + """ + + status: AnalyzeTaskStatus + created_at: dt.datetime = pydantic.Field() + """ + A string representing the date and time, in RFC 3339 format (“YYYY-MM-DDTHH:mm:ssZ”), when the analysis task was created. + """ + + completed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + A string representing the date and time, in RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"), when the analysis task was completed or failed. The platform returns this field only if `status` is `ready` or `failed`. + """ + + result: typing.Optional[AnalyzeTaskResult] = pydantic.Field(default=None) + """ + An object that contains the generated text and additional information. The platform returns this object only when `status` is `ready`. + """ + + error: typing.Optional[AnalyzeTaskError] = pydantic.Field(default=None) + """ + Details about why the task failed. The platform returns this object only when `status` is `failed`. + """ + + webhooks: typing.Optional[typing.List[AnalyzeTaskWebhookInfo]] = pydantic.Field(default=None) + """ + The delivery status of each webhook endpoint. The platform omits this field when no webhooks are configured. You can register webhooks through the Playground. See the [Webhooks](/v1.3/docs/advanced/webhooks) page for details. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/analyze_task_result.py b/src/twelvelabs/types/analyze_task_result.py new file mode 100644 index 0000000..799e07e --- /dev/null +++ b/src/twelvelabs/types/analyze_task_result.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from .analyze_task_result_usage import AnalyzeTaskResultUsage +from .finish_reason import FinishReason +from .generated_text_data import GeneratedTextData + + +class AnalyzeTaskResult(UniversalBaseModel): + """ + The analysis results for a completed task. + """ + + generation_id: str = pydantic.Field() + """ + The unique identifier for the generation session. + """ + + data: GeneratedTextData + finish_reason: FinishReason + usage: AnalyzeTaskResultUsage = pydantic.Field() + """ + The number of tokens used in the generation. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/analyze_task_result_usage.py b/src/twelvelabs/types/analyze_task_result_usage.py new file mode 100644 index 0000000..5ed4848 --- /dev/null +++ b/src/twelvelabs/types/analyze_task_result_usage.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel + + +class AnalyzeTaskResultUsage(UniversalBaseModel): + """ + The number of tokens used in the generation. + """ + + output_tokens: int = pydantic.Field() + """ + The number of tokens in the generated text. + """ + + input_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The number of tokens in the input prompt. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/analyze_task_status.py b/src/twelvelabs/types/analyze_task_status.py new file mode 100644 index 0000000..4e1971b --- /dev/null +++ b/src/twelvelabs/types/analyze_task_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AnalyzeTaskStatus = typing.Union[typing.Literal["queued", "pending", "processing", "ready", "failed"], typing.Any] diff --git a/src/twelvelabs/types/analyze_task_webhook_info.py b/src/twelvelabs/types/analyze_task_webhook_info.py new file mode 100644 index 0000000..bf24376 --- /dev/null +++ b/src/twelvelabs/types/analyze_task_webhook_info.py @@ -0,0 +1,41 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel + + +class AnalyzeTaskWebhookInfo(UniversalBaseModel): + """ + The delivery status of a webhook endpoint. + """ + + url: str = pydantic.Field() + """ + The URL of the webhook endpoint that received the delivery. + """ + + delivered: bool = pydantic.Field() + """ + Indicates whether the platform successfully delivered the webhook. + """ + + attempts: int = pydantic.Field() + """ + The total number of delivery attempts for this URL. + """ + + last_error: typing.Optional[str] = pydantic.Field(default=None) + """ + The error message from the last failed delivery attempt. This field appears only when `delivered` is `false`. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/analyze_temperature.py b/src/twelvelabs/types/analyze_temperature.py new file mode 100644 index 0000000..6bfd6a6 --- /dev/null +++ b/src/twelvelabs/types/analyze_temperature.py @@ -0,0 +1,3 @@ +# This file was auto-generated by Fern from our API Definition. + +AnalyzeTemperature = float diff --git a/src/twelvelabs/types/score_search_terms.py b/src/twelvelabs/types/analyze_text_prompt.py similarity index 72% rename from src/twelvelabs/types/score_search_terms.py rename to src/twelvelabs/types/analyze_text_prompt.py index f537dbd..1b58d50 100644 --- a/src/twelvelabs/types/score_search_terms.py +++ b/src/twelvelabs/types/analyze_text_prompt.py @@ -1,3 +1,3 @@ # This file was auto-generated by Fern from our API Definition. -ScoreSearchTerms = float +AnalyzeTextPrompt = str diff --git a/src/twelvelabs/types/audio_input_request.py b/src/twelvelabs/types/audio_input_request.py index 4110be3..8eba83b 100644 --- a/src/twelvelabs/types/audio_input_request.py +++ b/src/twelvelabs/types/audio_input_request.py @@ -44,7 +44,7 @@ class AudioInputRequest(UniversalBaseModel): - `audio`: Generates embeddings based on audio content (sounds, music, effects) - `transcription`: Generates embeddings based on transcribed speech - You can specify multiple options to generate different types of embeddings for the same audio. + You can specify multiple values to generate different types of embeddings for the same audio. """ embedding_scope: typing.Optional[typing.List[AudioInputRequestEmbeddingScopeItem]] = pydantic.Field(default=None) @@ -68,7 +68,7 @@ class AudioInputRequest(UniversalBaseModel): Specify both values to receive separate and fused embeddings in the same response. - **Default**: `separate_embedding`. + **Default**: `separate_embedding`. """ if IS_PYDANTIC_V2: diff --git a/src/twelvelabs/types/create_analyze_task_response.py b/src/twelvelabs/types/create_analyze_task_response.py new file mode 100644 index 0000000..6d96210 --- /dev/null +++ b/src/twelvelabs/types/create_analyze_task_response.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from .analyze_task_status import AnalyzeTaskStatus + + +class CreateAnalyzeTaskResponse(UniversalBaseModel): + """ + Response when creating a new analysis task. + """ + + task_id: str = pydantic.Field() + """ + The unique identifier of the analysis task. + """ + + status: AnalyzeTaskStatus + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/embedding_data.py b/src/twelvelabs/types/embedding_data.py index 6de355c..5d89a3a 100644 --- a/src/twelvelabs/types/embedding_data.py +++ b/src/twelvelabs/types/embedding_data.py @@ -20,7 +20,7 @@ class EmbeddingData(UniversalBaseModel): embedding_option: typing.Optional[EmbeddingDataEmbeddingOption] = pydantic.Field(default=None) """ - The modality used to generate this embedding. + The modality used to generate this embedding. **Values**: - `visual`: Embedding based on visual content (video only) diff --git a/src/twelvelabs/types/generated_text_data.py b/src/twelvelabs/types/generated_text_data.py new file mode 100644 index 0000000..86e22b0 --- /dev/null +++ b/src/twelvelabs/types/generated_text_data.py @@ -0,0 +1,3 @@ +# This file was auto-generated by Fern from our API Definition. + +GeneratedTextData = str diff --git a/src/twelvelabs/types/import_log.py b/src/twelvelabs/types/import_log.py index 780ae89..1f93255 100644 --- a/src/twelvelabs/types/import_log.py +++ b/src/twelvelabs/types/import_log.py @@ -36,7 +36,7 @@ class ImportLog(UniversalBaseModel): video_status: typing.Optional[ImportLogVideoStatus] = pydantic.Field(default=None) """ - Counts of files in different statuses. See the [Task object](/v1.3/api-reference/upload-content/tasks/the-task-object) page for details on each status. + Counts of files in different statuses. See the [Task object](/v1.3/api-reference/upload-content/tasks/the-task-object) page details on possible values. """ failed_files: typing.Optional[typing.List[ImportLogFailedFilesItem]] = pydantic.Field(default=None) diff --git a/src/twelvelabs/types/import_log_video_status.py b/src/twelvelabs/types/import_log_video_status.py index de8e3e5..0e131a6 100644 --- a/src/twelvelabs/types/import_log_video_status.py +++ b/src/twelvelabs/types/import_log_video_status.py @@ -8,7 +8,7 @@ class ImportLogVideoStatus(UniversalBaseModel): """ - Counts of files in different statuses. See the [Task object](/v1.3/api-reference/upload-content/tasks/the-task-object) page for details on each status. + Counts of files in different statuses. See the [Task object](/v1.3/api-reference/upload-content/tasks/the-task-object) page details on possible values. """ ready: int diff --git a/src/twelvelabs/types/media_source.py b/src/twelvelabs/types/media_source.py index 0d8cdc3..f3a9dbd 100644 --- a/src/twelvelabs/types/media_source.py +++ b/src/twelvelabs/types/media_source.py @@ -29,7 +29,7 @@ class MediaSource(UniversalBaseModel): asset_id: typing.Optional[str] = pydantic.Field(default=None) """ - The unique identifier of an asset from a [direct](/v1.3/api-reference/upload-content) or [multipart](/v1.3/api-reference/upload-content/multipart-uploads) upload. + The unique identifier of an asset from a [direct](/v1.3/api-reference/upload-content/direct-uploads) or [multipart](/v1.3/api-reference/upload-content/multipart-uploads) upload. """ if IS_PYDANTIC_V2: diff --git a/src/twelvelabs/types/multi_input_media_source.py b/src/twelvelabs/types/multi_input_media_source.py index 69432ad..48fde88 100644 --- a/src/twelvelabs/types/multi_input_media_source.py +++ b/src/twelvelabs/types/multi_input_media_source.py @@ -25,12 +25,12 @@ class MultiInputMediaSource(UniversalBaseModel): """ The type of media. - **Value**: `image` + **Value**: `image` """ url: typing.Optional[str] = pydantic.Field(default=None) """ - The publicly accessible URL of the image file Use direct links to raw image files. Image hosting platforms and cloud storage sharing links are not supported. + The publicly accessible URL of the image file Use direct links to raw image files. Image hosting platforms and cloud storage sharing links are not supported. """ base_64_string: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="base64_string")] = ( diff --git a/src/twelvelabs/types/non_stream_analyze_response.py b/src/twelvelabs/types/non_stream_analyze_response.py index b45d096..981b89c 100644 --- a/src/twelvelabs/types/non_stream_analyze_response.py +++ b/src/twelvelabs/types/non_stream_analyze_response.py @@ -5,6 +5,7 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel from .finish_reason import FinishReason +from .generated_text_data import GeneratedTextData from .token_usage import TokenUsage @@ -18,11 +19,7 @@ class NonStreamAnalyzeResponse(UniversalBaseModel): Unique identifier of the response. """ - data: typing.Optional[str] = pydantic.Field(default=None) - """ - The generated text based on the prompt you provided. - """ - + data: typing.Optional[GeneratedTextData] = None finish_reason: typing.Optional[FinishReason] = None usage: typing.Optional[TokenUsage] = None diff --git a/src/twelvelabs/types/one.py b/src/twelvelabs/types/one.py new file mode 100644 index 0000000..1a17499 --- /dev/null +++ b/src/twelvelabs/types/one.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel + + +class One(UniversalBaseModel): + """ + Provide the video via a unique identifier of an asset. + """ + + asset_id: str = pydantic.Field() + """ + The unique identifier of an asset from a [direct](/v1.3/api-reference/upload-content/direct-uploads) or [multipart](/v1.3/api-reference/upload-content/multipart-uploads) upload. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/response_format.py b/src/twelvelabs/types/response_format.py index 6be5a8a..48bb997 100644 --- a/src/twelvelabs/types/response_format.py +++ b/src/twelvelabs/types/response_format.py @@ -9,7 +9,7 @@ class ResponseFormat(UniversalBaseModel): """ - Use this parameter to specify the format of the response. When you omit this parameter, the platform returns unstructured text. + Specifies the format of the response. When you omit this parameter, the platform returns unstructured text. """ type: ResponseFormatType = pydantic.Field() diff --git a/src/twelvelabs/types/search_item.py b/src/twelvelabs/types/search_item.py index dc098e2..5f43b51 100644 --- a/src/twelvelabs/types/search_item.py +++ b/src/twelvelabs/types/search_item.py @@ -4,10 +4,8 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .confidence import Confidence from .end_time import EndTime from .rank import Rank -from .score_search_terms import ScoreSearchTerms from .search_item_clips_item import SearchItemClipsItem from .start_time import StartTime from .thumbnail_url import ThumbnailUrl @@ -19,7 +17,6 @@ class SearchItem(UniversalBaseModel): An object that contains the search results. """ - score: typing.Optional[ScoreSearchTerms] = None start: typing.Optional[StartTime] = None end: typing.Optional[EndTime] = None video_id: typing.Optional[str] = pydantic.Field(default=None) @@ -27,7 +24,6 @@ class SearchItem(UniversalBaseModel): A string representing the unique identifier of the video. Once the platform indexes a video, it assigns a unique identifier. Note that this is different from the identifier of the video indexing task. """ - confidence: typing.Optional[Confidence] = None rank: typing.Optional[Rank] = None thumbnail_url: typing.Optional[ThumbnailUrl] = None transcription: typing.Optional[str] = pydantic.Field(default=None) diff --git a/src/twelvelabs/types/search_item_clips_item.py b/src/twelvelabs/types/search_item_clips_item.py index 1760908..1b4e671 100644 --- a/src/twelvelabs/types/search_item_clips_item.py +++ b/src/twelvelabs/types/search_item_clips_item.py @@ -4,20 +4,16 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel -from .confidence import Confidence from .end_time import EndTime from .rank import Rank -from .score_search_terms import ScoreSearchTerms from .start_time import StartTime from .thumbnail_url import ThumbnailUrl from .user_metadata import UserMetadata class SearchItemClipsItem(UniversalBaseModel): - score: typing.Optional[ScoreSearchTerms] = None start: typing.Optional[StartTime] = None end: typing.Optional[EndTime] = None - confidence: typing.Optional[Confidence] = None rank: typing.Optional[Rank] = None thumbnail_url: typing.Optional[ThumbnailUrl] = None transcription: typing.Optional[str] = pydantic.Field(default=None) diff --git a/src/twelvelabs/types/threshold_search.py b/src/twelvelabs/types/threshold_search.py deleted file mode 100644 index b114d15..0000000 --- a/src/twelvelabs/types/threshold_search.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ThresholdSearch = typing.Union[typing.Literal["high", "medium", "low", "none"], typing.Any] diff --git a/src/twelvelabs/types/two.py b/src/twelvelabs/types/two.py new file mode 100644 index 0000000..f4395f2 --- /dev/null +++ b/src/twelvelabs/types/two.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ..core.serialization import FieldMetadata + + +class Two(UniversalBaseModel): + """ + Provide the video via base64-encoded data. + """ + + base_64_string: typing_extensions.Annotated[str, FieldMetadata(alias="base64_string")] = pydantic.Field() + """ + The base64-encoded video data. The maximum size is 30MB. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/url.py b/src/twelvelabs/types/url.py new file mode 100644 index 0000000..9f58967 --- /dev/null +++ b/src/twelvelabs/types/url.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel + + +class Url(UniversalBaseModel): + """ + Provide the video via a URL. + """ + + url: str = pydantic.Field() + """ + The publicly accessible URL of the video file. + + Use direct links to raw media files. Video hosting platforms and cloud storage sharing links are not supported. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/twelvelabs/types/video_context.py b/src/twelvelabs/types/video_context.py new file mode 100644 index 0000000..47bee0b --- /dev/null +++ b/src/twelvelabs/types/video_context.py @@ -0,0 +1,67 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel +from ..core.serialization import FieldMetadata + + +class VideoContext_Url(UniversalBaseModel): + """ + An object specifying the source of the video content. Include exactly one source. + """ + + type: typing.Literal["url"] = "url" + url: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class VideoContext_AssetId(UniversalBaseModel): + """ + An object specifying the source of the video content. Include exactly one source. + """ + + type: typing.Literal["asset_id"] = "asset_id" + asset_id: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class VideoContext_Base64String(UniversalBaseModel): + """ + An object specifying the source of the video content. Include exactly one source. + """ + + type: typing.Literal["base64_string"] = "base64_string" + base_64_string: typing_extensions.Annotated[str, FieldMetadata(alias="base64_string")] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +VideoContext = typing.Union[VideoContext_Url, VideoContext_AssetId, VideoContext_Base64String] diff --git a/src/twelvelabs/types/video_input_request.py b/src/twelvelabs/types/video_input_request.py index 75156fa..aa50636 100644 --- a/src/twelvelabs/types/video_input_request.py +++ b/src/twelvelabs/types/video_input_request.py @@ -44,7 +44,7 @@ class VideoInputRequest(UniversalBaseModel): - `audio`: Generates embeddings based on audio content (sounds, music, effects) - `transcription`: Generates embeddings based on transcribed speech - You can specify multiple options to generate different types of embeddings for the same video. + You can specify multiple values to generate different types of embeddings for the same video. **Default**: `["visual", "audio", "transcription"]` """ @@ -64,7 +64,7 @@ class VideoInputRequest(UniversalBaseModel): embedding_type: typing.Optional[typing.List[VideoInputRequestEmbeddingTypeItem]] = pydantic.Field(default=None) """ - Specifies how to structure the embedding. Include this parameter only when `embedding_option` contains at least two values. + Specifies how to structure the embedding. Include this parameter only when `embedding_option` contains at least two values. **Values**: - `separate_embedding`: Returns separate embeddings per modality specified in `embedding_option` diff --git a/src/twelvelabs/wrapper/search_client_wrapper.py b/src/twelvelabs/wrapper/search_client_wrapper.py index 8a8cdae..10a8222 100644 --- a/src/twelvelabs/wrapper/search_client_wrapper.py +++ b/src/twelvelabs/wrapper/search_client_wrapper.py @@ -10,10 +10,6 @@ SearchCreateRequestSearchOptionsItem, ) from ..search.types.search_create_request_group_by import SearchCreateRequestGroupBy -from ..types.threshold_search import ThresholdSearch -from ..search.types.search_create_request_sort_option import ( - SearchCreateRequestSortOption, -) from ..search.types.search_create_request_operator import SearchCreateRequestOperator from ..types.search_results import SearchResults from ..core.request_options import RequestOptions @@ -62,10 +58,10 @@ def query( query_media_urls: typing.Optional[typing.List[str]] = None, query_media_files: typing.Optional[typing.List[core.File]] = None, query_text: typing.Optional[str] = OMIT, - adjust_confidence_level: typing.Optional[float] = OMIT, + adjust_confidence_level: typing.Optional[float] = OMIT, # Deprecated: ignored by the API. Use the `rank` field in the response instead. group_by: typing.Optional[SearchCreateRequestGroupBy] = OMIT, - threshold: typing.Optional[ThresholdSearch] = OMIT, - sort_option: typing.Optional[SearchCreateRequestSortOption] = OMIT, + threshold: typing.Optional[typing.Any] = OMIT, # Deprecated: ignored by the API. Use the `rank` field in the response instead. + sort_option: typing.Optional[str] = OMIT, # Deprecated: ignored by the API. Use the `rank` field in the response instead. operator: typing.Optional[SearchCreateRequestOperator] = OMIT, page_limit: typing.Optional[int] = OMIT, filter: typing.Optional[str] = OMIT, @@ -75,45 +71,45 @@ def query( ) -> SyncPager[SearchItem]: """ Use this endpoint to search for relevant matches in an index using text, media, or a combination of both as your query. - + **Text queries**: - Use the `query_text` parameter to specify your query. - + **Media queries**: - Set the `query_media_type` parameter to the corresponding media type (example: `image`). - Specify either one of the following parameters: - `query_media_url`: Publicly accessible URL of your media file. - `query_media_file`: Local media file. If both `query_media_url` and `query_media_file` are specified in the same request, `query_media_url` takes precedence. - + **Composed text and media queries** (Marengo 3.0 only): - Use the `query_text` parameter for your text query. - Set `query_media_type` to `image`. - Specify the image using either the `query_media_url` or the `query_media_file` parameter. - + Example: Provide an image of a car and include "red color" in your query to find red instances of that car model. - + When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [format requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements). - + This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - + Parameters ---------- index_id : str The unique identifier of the index to search. - + search_options : typing.List[SearchCreateRequestSearchOptionsItem] Specifies the modalities the video understanding model uses to find relevant information. - + Available options: - `visual`: Searches visual content. - `audio`: Searches non-speech audio (Marengo 3.0) or all audio (Marengo 2.7). - `transcription`: Spoken words (Marengo 3.0 only) - + - You can specify multiple search options in conjunction with the [`operator`](/v1.3/api-reference/any-to-video-search/make-search-request#request.body.operator.operator) parameter described below to broaden or narrow your search. For example, to search using both visual and non-speech audio content, include this parameter two times in the request as shown below: ```JSON @@ -122,70 +118,57 @@ def query( --form search_options=transcription \ ``` - + For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. - + query_media_type : typing.Optional[typing.Literal["image"]] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. - + query_media_url : typing.Optional[str] The publicly accessible URL of the media file you wish to use. This parameter is required for media queries if `query_media_file` is not provided. - + query_media_file : typing.Optional[core.File] See core.File for more documentation - + query_text : typing.Optional[str] The text query to search for. This parameter is required for text queries. Note that the platform supports full natural language-based search. You can use this parameter together with `query_media_type` and `query_media_url` or `query_media_file` to perform a composed image+text search. - - + + The maximum query length varies by model. Marengo 3.0 supports up to 500 tokens per query, while Marengo 2.7 supports up to 77 tokens per query. - + transcription_options : typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when using Marengo 3.0 with the `search_options` parameter containing the `transcription` value. - + Available options: - `lexical`: Exact word matching - `semantic`: Meaning-based matching - + For details on when to use each option, see the [Transcription options](/v1.3/docs/concepts/modalities#transcription-options) section. - + **Default**: `["lexical", "semantic"]`. - + adjust_confidence_level : typing.Optional[float] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - This parameter specifies the strictness of the thresholds for assigning the high, medium, or low confidence levels to search results. If you use a lower value, the thresholds become more relaxed, and more search results will be classified as having high, medium, or low confidence levels. You can use this parameter to include a broader range of potentially relevant video clips, even if some results might be less precise. - - **Min**: 0 - **Max**: 1 - **Default:** 0.5 - + .. deprecated:: + This parameter is deprecated and ignored by the API. + Use the `rank` field in the response instead. + group_by : typing.Optional[SearchCreateRequestGroupBy] Use this parameter to group or ungroup items in a response. It can take one of the following values: - `video`: The platform will group the matching video clips in the response by video. - `clip`: The matching video clips in the response will not be grouped. - + **Default:** `clip` - - threshold : typing.Optional[ThresholdSearch] - - sort_option : typing.Optional[SearchCreateRequestSortOption] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - - Use this parameter to specify the sort order for the response. - - When performing a search, the platform assigns a relevance ranking to each video clip that matches your search terms. By default, the search results are sorted by relevance ranking in ascending order, with 1 being the most relevant result. - - If you set this parameter to `score` and `group_by` is set to `video`, the platform will determine the highest relevance ranking (lowest number) for each video and sort the videos in the response by this ranking. For each video, the matching video clips will be sorted by relevance ranking in ascending order. - - If you set this parameter to `clip_count` and `group_by` is set to `video`, the platform will sort the videos in the response by the number of clips. For each video, the matching video clips will be sorted by relevance ranking in ascending order. You can use `clip_count` only when the matching video clips are grouped by video. - - - **Default:** `score` - + + threshold : typing.Optional[typing.Any] + .. deprecated:: + This parameter is deprecated and ignored by the API. + Use the `rank` field in the response instead. + + sort_option : typing.Optional[str] + .. deprecated:: + This parameter is deprecated and ignored by the API. + Use the `rank` field in the response instead. + operator : typing.Optional[SearchCreateRequestOperator] Combines multiple search options using `or` or `and`. Use `and` to find segments matching all search options. Use `or` to find segments matching any search option. For detailed guidance on using this parameter, see the [Combine multiple modalities](/v1.3/docs/concepts/modalities#combine-multiple-modalities) section. @@ -259,16 +242,14 @@ def query( _has_plural = query_media_urls is not None or query_media_files is not None if _has_plural: + # Note: adjust_confidence_level, threshold, sort_option are deprecated and not sent to the API. _data: typing.Dict[str, typing.Any] = { "index_id": index_id, "search_options": search_options, "query_media_type": query_media_type, "query_media_url": query_media_urls if query_media_urls is not None else query_media_url, "query_text": query_text, - "adjust_confidence_level": adjust_confidence_level, "group_by": group_by, - "threshold": threshold, - "sort_option": sort_option, "operator": operator, "page_limit": page_limit, "filter": filter, @@ -294,6 +275,7 @@ def query( parse_obj_as(type_=SearchResults, object_=_http_response.json()), ) else: + # Note: adjust_confidence_level, threshold, sort_option are deprecated and not sent to the API. _response = self.create( index_id=index_id, search_options=search_options, @@ -301,10 +283,7 @@ def query( query_media_url=query_media_url, query_media_file=query_media_file, query_text=query_text, - adjust_confidence_level=adjust_confidence_level, group_by=group_by, - threshold=threshold, - sort_option=sort_option, operator=operator, page_limit=page_limit, filter=filter, @@ -366,10 +345,10 @@ async def query( query_media_urls: typing.Optional[typing.List[str]] = None, query_media_files: typing.Optional[typing.List[core.File]] = None, query_text: typing.Optional[str] = OMIT, - adjust_confidence_level: typing.Optional[float] = OMIT, + adjust_confidence_level: typing.Optional[float] = OMIT, # Deprecated: ignored by the API. Use the `rank` field in the response instead. group_by: typing.Optional[SearchCreateRequestGroupBy] = OMIT, - threshold: typing.Optional[ThresholdSearch] = OMIT, - sort_option: typing.Optional[SearchCreateRequestSortOption] = OMIT, + threshold: typing.Optional[typing.Any] = OMIT, # Deprecated: ignored by the API. Use the `rank` field in the response instead. + sort_option: typing.Optional[str] = OMIT, # Deprecated: ignored by the API. Use the `rank` field in the response instead. operator: typing.Optional[SearchCreateRequestOperator] = OMIT, page_limit: typing.Optional[int] = OMIT, filter: typing.Optional[str] = OMIT, @@ -379,45 +358,45 @@ async def query( ) -> AsyncPager[SearchItem]: """ Use this endpoint to search for relevant matches in an index using text, media, or a combination of both as your query. - + **Text queries**: - Use the `query_text` parameter to specify your query. - + **Media queries**: - Set the `query_media_type` parameter to the corresponding media type (example: `image`). - Specify either one of the following parameters: - `query_media_url`: Publicly accessible URL of your media file. - `query_media_file`: Local media file. If both `query_media_url` and `query_media_file` are specified in the same request, `query_media_url` takes precedence. - + **Composed text and media queries** (Marengo 3.0 only): - Use the `query_text` parameter for your text query. - Set `query_media_type` to `image`. - Specify the image using either the `query_media_url` or the `query_media_file` parameter. - + Example: Provide an image of a car and include "red color" in your query to find red instances of that car model. - + When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [format requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements). - + This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page. - + Parameters ---------- index_id : str The unique identifier of the index to search. - + search_options : typing.List[SearchCreateRequestSearchOptionsItem] Specifies the modalities the video understanding model uses to find relevant information. - + Available options: - `visual`: Searches visual content. - `audio`: Searches non-speech audio (Marengo 3.0) or all audio (Marengo 2.7). - `transcription`: Spoken words (Marengo 3.0 only) - + - You can specify multiple search options in conjunction with the [`operator`](/v1.3/api-reference/any-to-video-search/make-search-request#request.body.operator.operator) parameter described below to broaden or narrow your search. For example, to search using both visual and non-speech audio content, include this parameter two times in the request as shown below: ```JSON @@ -426,99 +405,86 @@ async def query( --form search_options=transcription \ ``` - + For detailed guidance and version-specific behavior, see the [Search options](/v1.3/docs/concepts/modalities#search-options) section. - + query_media_type : typing.Optional[typing.Literal["image"]] The type of media you wish to use. This parameter is required for media queries. For example, to perform an image-based search, set this parameter to `image`. Use `query_text` together with this parameter when you want to perform a composed image+text search. - + query_media_url : typing.Optional[str] The publicly accessible URL of the media file you wish to use. This parameter is required for media queries if `query_media_file` is not provided. - + query_media_file : typing.Optional[core.File] See core.File for more documentation - + query_text : typing.Optional[str] The text query to search for. This parameter is required for text queries. Note that the platform supports full natural language-based search. You can use this parameter together with `query_media_type` and `query_media_url` or `query_media_file` to perform a composed image+text search. - - + + The maximum query length varies by model. Marengo 3.0 supports up to 500 tokens per query, while Marengo 2.7 supports up to 77 tokens per query. - + transcription_options : typing.Optional[typing.List[SearchCreateRequestTranscriptionOptionsItem]] Specifies how the platform matches your text query with the words spoken in the video. This parameter applies only when using Marengo 3.0 with the `search_options` parameter containing the `transcription` value. - + Available options: - `lexical`: Exact word matching - `semantic`: Meaning-based matching - + For details on when to use each option, see the [Transcription options](/v1.3/docs/concepts/modalities#transcription-options) section. - + **Default**: `["lexical", "semantic"]`. - + adjust_confidence_level : typing.Optional[float] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - This parameter specifies the strictness of the thresholds for assigning the high, medium, or low confidence levels to search results. If you use a lower value, the thresholds become more relaxed, and more search results will be classified as having high, medium, or low confidence levels. You can use this parameter to include a broader range of potentially relevant video clips, even if some results might be less precise. - - **Min**: 0 - **Max**: 1 - **Default:** 0.5 - + .. deprecated:: + This parameter is deprecated and ignored by the API. + Use the `rank` field in the response instead. + group_by : typing.Optional[SearchCreateRequestGroupBy] Use this parameter to group or ungroup items in a response. It can take one of the following values: - `video`: The platform will group the matching video clips in the response by video. - `clip`: The matching video clips in the response will not be grouped. - + **Default:** `clip` - - threshold : typing.Optional[ThresholdSearch] - - sort_option : typing.Optional[SearchCreateRequestSortOption] - - This parameter is deprecated in Marengo 3.0 and newer versions. Use the [`rank`](/v1.3/api-reference/any-to-video-search/make-search-request#response.body.data.rank) field in the response instead, which indicates the relevance ranking assigned by the model. - - - Use this parameter to specify the sort order for the response. - - When performing a search, the platform assigns a relevance ranking to each video clip that matches your search terms. By default, the search results are sorted by relevance ranking in ascending order, with 1 being the most relevant result. - - If you set this parameter to `score` and `group_by` is set to `video`, the platform will determine the highest relevance ranking (lowest number) for each video and sort the videos in the response by this ranking. For each video, the matching video clips will be sorted by relevance ranking in ascending order. - - If you set this parameter to `clip_count` and `group_by` is set to `video`, the platform will sort the videos in the response by the number of clips. For each video, the matching video clips will be sorted by relevance ranking in ascending order. You can use `clip_count` only when the matching video clips are grouped by video. - - - **Default:** `score` - + + threshold : typing.Optional[typing.Any] + .. deprecated:: + This parameter is deprecated and ignored by the API. + Use the `rank` field in the response instead. + + sort_option : typing.Optional[str] + .. deprecated:: + This parameter is deprecated and ignored by the API. + Use the `rank` field in the response instead. + operator : typing.Optional[SearchCreateRequestOperator] Combines multiple search options using `or` or `and`. Use `and` to find segments matching all search options. Use `or` to find segments matching any search option. For detailed guidance on using this parameter, see the [Combine multiple modalities](/v1.3/docs/concepts/modalities#combine-multiple-modalities) section. - + **Default**: `or`. - + page_limit : typing.Optional[int] The number of items to return on each page. When grouping by video, this parameter represents the number of videos per page. Otherwise, it represents the maximum number of video clips per page. - + **Max**: `50`. - + filter : typing.Optional[str] Specifies a stringified JSON object to filter your search results. Supports both system-generated metadata (example: video ID, duration) and user-defined metadata. - + **Syntax for filtering** - + The following table describes the supported data types, operators, and filter syntax: - + | Data type | Operator | Description | Syntax | |:----------|:---------|:------------|:-------| | String | `=` | Matches results equal to the specified value. | `{"field": "value"}` | Array of strings | `=` | Matches results with any value in the specified array. Supported only for `id`. | `{"id": ["value1", "value2"]}` | | Numeric (integer, float) | `=`, `lte`, `gte` | Matches results equal to or within a range of the specified value. | `{"field": number}` or `{"field": { "gte": number, "lte": number }}` | | Boolean | `=` | Matches results equal to the specified boolean value. | `{"field": true}` or `{"field": false}`. | - +
**System-generated metadata** - + The table below describes the system-generated metadata available for filtering your search results: - + | Field name | Description | Type | Example | |:-----------|:------------|:-----|:--------| | `id` | Filters by specific video IDs. | Array of strings | `{"id": ["67cec9caf45d9b64a58340fc", "67cec9baf45d9b64a58340fa"]}`. | @@ -527,60 +493,58 @@ async def query( | `height` | Filters by video height (in pixels). | Number or object with `gte` and `lte`. | `{"height": 1080}` or `{"height": { "gte": 720, "lte": 1080 }}`. | | `size` | Filters by video size (in bytes) | Number or object with `gte` and `lte`. | `{"size": 1048576}` or `{"size": { "gte": 1048576, "lte": 5242880}}` | | `filename` | Filters by the exact file name. | String | `{"filename": "Animal Encounters part 1"}` | - +
**User-defined metadata** - + To filter by user-defined metadata: 1. Add metadata to your video by calling the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/:index-id/videos/:video-id` endpoint 2. Reference the custom field in your filter object. For example, to filter videos where a custom field named `needsReview` of type boolean is `true`, use `{"needs_review": true}`. - + For more details and examples, see the [Filter search results](/v1.3/docs/guides/search/filtering) page. - + include_user_metadata : typing.Optional[bool] Specifies whether to include user-defined metadata in the search results. - + request_options : typing.Optional[RequestOptions] Request-specific configuration. - + Returns ------- SearchResults Successfully performed a search request. - + Examples -------- import asyncio - + from twelvelabs import AsyncTwelveLabs - + client = AsyncTwelveLabs( api_key="YOUR_API_KEY", ) - - + + async def main() -> None: await client.search.create( index_id="index_id", search_options=["visual"], ) - - + + asyncio.run(main()) """ _has_plural = query_media_urls is not None or query_media_files is not None if _has_plural: + # Note: adjust_confidence_level, threshold, sort_option are deprecated and not sent to the API. _data: typing.Dict[str, typing.Any] = { "index_id": index_id, "search_options": search_options, "query_media_type": query_media_type, "query_media_url": query_media_urls if query_media_urls is not None else query_media_url, "query_text": query_text, - "adjust_confidence_level": adjust_confidence_level, "group_by": group_by, - "threshold": threshold, - "sort_option": sort_option, "operator": operator, "page_limit": page_limit, "filter": filter, @@ -606,6 +570,7 @@ async def main() -> None: parse_obj_as(type_=SearchResults, object_=_http_response.json()), ) else: + # Note: adjust_confidence_level, threshold, sort_option are deprecated and not sent to the API. _response = await self.create( index_id=index_id, search_options=search_options, @@ -613,10 +578,7 @@ async def main() -> None: query_media_url=query_media_url, query_media_file=query_media_file, query_text=query_text, - adjust_confidence_level=adjust_confidence_level, group_by=group_by, - threshold=threshold, - sort_option=sort_option, operator=operator, page_limit=page_limit, filter=filter,