diff --git a/poetry.lock b/poetry.lock
index a19d7e2..3cc8b76 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -38,13 +38,13 @@ trio = ["trio (>=0.26.1)"]
[[package]]
name = "certifi"
-version = "2025.11.12"
+version = "2026.1.4"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.7"
files = [
- {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"},
- {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"},
+ {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"},
+ {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"},
]
[[package]]
@@ -222,13 +222,13 @@ files = [
[[package]]
name = "packaging"
-version = "25.0"
+version = "26.0"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
- {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
+ {file = "packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529"},
+ {file = "packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4"},
]
[[package]]
@@ -483,53 +483,58 @@ files = [
[[package]]
name = "tomli"
-version = "2.3.0"
+version = "2.4.0"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
- {file = "tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45"},
- {file = "tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba"},
- {file = "tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf"},
- {file = "tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441"},
- {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845"},
- {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c"},
- {file = "tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456"},
- {file = "tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be"},
- {file = "tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac"},
- {file = "tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22"},
- {file = "tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f"},
- {file = "tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52"},
- {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8"},
- {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6"},
- {file = "tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876"},
- {file = "tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878"},
- {file = "tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b"},
- {file = "tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae"},
- {file = "tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b"},
- {file = "tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf"},
- {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f"},
- {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05"},
- {file = "tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606"},
- {file = "tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999"},
- {file = "tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e"},
- {file = "tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3"},
- {file = "tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc"},
- {file = "tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0"},
- {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879"},
- {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005"},
- {file = "tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463"},
- {file = "tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8"},
- {file = "tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77"},
- {file = "tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf"},
- {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530"},
- {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b"},
- {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67"},
- {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f"},
- {file = "tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0"},
- {file = "tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba"},
- {file = "tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b"},
- {file = "tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549"},
+ {file = "tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867"},
+ {file = "tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9"},
+ {file = "tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95"},
+ {file = "tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76"},
+ {file = "tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d"},
+ {file = "tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576"},
+ {file = "tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a"},
+ {file = "tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa"},
+ {file = "tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614"},
+ {file = "tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1"},
+ {file = "tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8"},
+ {file = "tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a"},
+ {file = "tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1"},
+ {file = "tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b"},
+ {file = "tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51"},
+ {file = "tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729"},
+ {file = "tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da"},
+ {file = "tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3"},
+ {file = "tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0"},
+ {file = "tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e"},
+ {file = "tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4"},
+ {file = "tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e"},
+ {file = "tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c"},
+ {file = "tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f"},
+ {file = "tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86"},
+ {file = "tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87"},
+ {file = "tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132"},
+ {file = "tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6"},
+ {file = "tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc"},
+ {file = "tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66"},
+ {file = "tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d"},
+ {file = "tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702"},
+ {file = "tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8"},
+ {file = "tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776"},
+ {file = "tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475"},
+ {file = "tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2"},
+ {file = "tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9"},
+ {file = "tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0"},
+ {file = "tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df"},
+ {file = "tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d"},
+ {file = "tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f"},
+ {file = "tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b"},
+ {file = "tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087"},
+ {file = "tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd"},
+ {file = "tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4"},
+ {file = "tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a"},
+ {file = "tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index 9eef522..d99e091 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "twelvelabs"
[tool.poetry]
name = "twelvelabs"
-version = "1.2.0b0"
+version = "1.1.1"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 1547bf0..43487d1 100644
--- a/reference.md
+++ b/reference.md
@@ -11,6 +11,11 @@
-
+
+
+ This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+
+
This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
@@ -152,11 +157,11 @@ If you omit this parameter, the platform returns unstructured text.
-
-This endpoint analyzes videos and generates titles, topics, and hashtags.
-
-
-This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+
+This method analyzes videos and generates titles, topics, and hashtags.
@@ -224,136 +229,6 @@ Specifies the type of gist. Use one of the following values:
-
-
-
-
-client.generate(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-
-This endpoint is deprecated. Use the [`/analyze`](/v1.3/api-reference/analyze-videos/analyze) endpoint instead, which provides identical functionality.
-
-
-
-This endpoint generates open-ended texts based on your videos, including but not limited to tables of content, action items, memos, and detailed analyses.
-
-
-- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-- This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses) guide.
-
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from twelvelabs import TwelveLabs
-
-client = TwelveLabs(
- api_key="YOUR_API_KEY",
-)
-client.generate(
- video_id="6298d673f1090f1100476d4c",
- prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
- temperature=0.2,
- stream=True,
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**video_id:** `str` — The unique identifier of the video for which you wish to generate a text.
-
-
-
-
-
--
-
-**prompt:** `str`
-
-A prompt that guides the model on the desired format or content.
-
-
-- Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt.
-- Your prompts can be instructive or descriptive, or you can also phrase them as questions.
-- The maximum length of a prompt is 2,000 tokens.
-
-
-**Examples**:
-
-- Based on this video, I want to generate five keywords for SEO (Search Engine Optimization).
-- I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.
-
-
-
-
-
--
-
-**temperature:** `typing.Optional[float]`
-
-Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
-**Default:** 0.2
-**Min:** 0
-**Max:** 1
-
-
-
-
-
--
-
-**stream:** `typing.Optional[bool]`
-
-Set this parameter to `true` to enable streaming responses in the NDJSON format.
-
-**Default:** `true`
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
@@ -374,7 +249,7 @@ This endpoint analyzes your videos and creates fully customizable text based on
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-- This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+- This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
@@ -516,7 +391,7 @@ This endpoint analyzes your videos and creates fully customizable text based on
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
-- This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+- This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
@@ -1763,6 +1638,10 @@ This method creates an asset by uploading a file to the platform. Assets are med
- **Video analysis**: [Pegasus requirements](/v1.3/docs/concepts/models/pegasus#input-requirements)
- **Entity search**: [Marengo image requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements)
- **Create embeddings**: [Marengo requirements](/v1.3/docs/concepts/models/marengo#input-requirements)
+
+
+This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
@@ -2911,138 +2790,6 @@ client.entity_collections.update(
-
-
-
-
-## Manage entities
-client.manage_entities.list_all_entities(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-This method returns a list of entities from all entity collections.
-This is an internal API primarily used by the search interface.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from twelvelabs import TwelveLabs
-
-client = TwelveLabs(
- api_key="YOUR_API_KEY",
-)
-client.manage_entities.list_all_entities(
- page=1,
- page_limit=10,
- name="foo",
- status="processing",
- sort_by="created_at",
- sort_option="desc",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**page:** `typing.Optional[int]`
-
-A number that identifies the page to retrieve.
-
-**Default**: `1`.
-
-
-
-
-
--
-
-**page_limit:** `typing.Optional[int]`
-
-The number of items to return on each page.
-
-**Default**: `10`.
-**Max**: `50`.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Filter entities by name.
-
-
-
-
-
--
-
-**status:** `typing.Optional[ListAllEntitiesRequestStatus]` — Filter entities by status.
-
-
-
-
-
--
-
-**sort_by:** `typing.Optional[ListAllEntitiesRequestSortBy]` — Field to sort by.
-
-
-
-
-
--
-
-**sort_option:** `typing.Optional[str]`
-
-The sorting direction. The following options are available:
-- `asc`
-- `desc`
-
-**Default**: `desc`.
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
-
@@ -3087,7 +2834,8 @@ Parameters for embeddings:
- The Marengo video understanding model generates embeddings for all modalities in the same latent space. This shared space enables any-to-any searches across different types of content.
- You can create multiple types of embeddings in a single API call.
-- Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these transcriptions.
+- Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these
+- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
@@ -3266,12 +3014,9 @@ Use this endpoint to search for relevant matches in an index using text, media,
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
-
- When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [format requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
-
-
-
-This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+- When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
+- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
@@ -3789,13 +3534,17 @@ The number of items to return on each page.
This method creates a new video embedding task that uploads a video to the platform and creates one or multiple video embeddings.
+
+This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Upload options:
- **Local file**: Use the `video_file` parameter
- **Publicly accessible URL**: Use the `video_url` parameter.
Specify at least one option. If both are provided, `video_url` takes precedence.
-Your video files must meet the [format requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
+Your video files must meet the [requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
This endpoint allows you to upload files up to 2 GB in size. To upload larger files, use the [Multipart Upload API](/v1.3/api-reference/upload-content/multipart-uploads)
@@ -4163,6 +3912,10 @@ This endpoint synchronously creates embeddings for multimodal content and return
- Video resolution: 360x360 to 5184x2160 pixels
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
+
+
+This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
@@ -4450,6 +4203,10 @@ This endpoint creates embeddings for audio and video content asynchronously.
1. Create a task using this endpoint. The platform returns a task ID.
2. Poll for the status of the task using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint. Wait until the status is `ready`.
3. Retrieve the embeddings from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint.
+
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
@@ -5707,6 +5464,10 @@ Your asset must meet the requirements based on your workflow:
- **Video analysis**: [Pegasus requirements](/v1.3/docs/concepts/models/pegasus#input-requirements).
If you want to both search and analyze your videos, the most restrictive requirements apply.
+
+
+This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
diff --git a/src/twelvelabs/__init__.py b/src/twelvelabs/__init__.py
index 5df6c68..dbfb888 100644
--- a/src/twelvelabs/__init__.py
+++ b/src/twelvelabs/__init__.py
@@ -59,7 +59,6 @@
ExpiresAt,
FinishReason,
ForbiddenErrorBody,
- GenerateResponse,
GetUploadStatusResponse,
Gist,
GistRequestTypesItem,
@@ -161,13 +160,12 @@
VideoVectorSystemMetadata,
)
from .errors import BadRequestError, ForbiddenError, InternalServerError, NotFoundError, TooManyRequestsError
-from . import assets, embed, entity_collections, indexes, manage_entities, multipart_upload, search, tasks
+from . import assets, embed, entity_collections, indexes, multipart_upload, search, tasks
from .assets import AssetsCreateRequestMethod, AssetsListRequestAssetTypesItem, AssetsListResponse
from .client import AsyncTwelveLabs, TwelveLabs
from .entity_collections import EntityCollectionsListRequestSortBy, EntityCollectionsListResponse
from .environment import TwelveLabsEnvironment
from .indexes import IndexesCreateRequestModelsItem, IndexesCreateResponse, IndexesListResponse
-from .manage_entities import ListAllEntitiesRequestSortBy, ListAllEntitiesRequestStatus, ListAllEntitiesResponse
from .multipart_upload import CreateAssetUploadRequestType
from .search import (
SearchCreateRequestGroupBy,
@@ -254,7 +252,6 @@
"FinishReason",
"ForbiddenError",
"ForbiddenErrorBody",
- "GenerateResponse",
"GetUploadStatusResponse",
"Gist",
"GistRequestTypesItem",
@@ -281,9 +278,6 @@
"InternalServerError",
"InternalServerErrorBody",
"LimitPerPageSimple",
- "ListAllEntitiesRequestSortBy",
- "ListAllEntitiesRequestStatus",
- "ListAllEntitiesResponse",
"ListIncompleteUploadsResponse",
"MediaEmbeddingTask",
"MediaEmbeddingTaskAudioEmbedding",
@@ -383,7 +377,6 @@
"embed",
"entity_collections",
"indexes",
- "manage_entities",
"multipart_upload",
"search",
"tasks",
diff --git a/src/twelvelabs/assets/client.py b/src/twelvelabs/assets/client.py
index 405c15a..1cf7670 100644
--- a/src/twelvelabs/assets/client.py
+++ b/src/twelvelabs/assets/client.py
@@ -124,6 +124,10 @@ def create(
- **Entity search**: [Marengo image requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements)
- **Create embeddings**: [Marengo requirements](/v1.3/docs/concepts/models/marengo#input-requirements)
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
method : AssetsCreateRequestMethod
@@ -344,6 +348,10 @@ async def create(
- **Entity search**: [Marengo image requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements)
- **Create embeddings**: [Marengo requirements](/v1.3/docs/concepts/models/marengo#input-requirements)
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
method : AssetsCreateRequestMethod
diff --git a/src/twelvelabs/assets/raw_client.py b/src/twelvelabs/assets/raw_client.py
index 2f78edc..9544239 100644
--- a/src/twelvelabs/assets/raw_client.py
+++ b/src/twelvelabs/assets/raw_client.py
@@ -144,6 +144,10 @@ def create(
- **Entity search**: [Marengo image requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements)
- **Create embeddings**: [Marengo requirements](/v1.3/docs/concepts/models/marengo#input-requirements)
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
method : AssetsCreateRequestMethod
@@ -426,6 +430,10 @@ async def create(
- **Entity search**: [Marengo image requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements)
- **Create embeddings**: [Marengo requirements](/v1.3/docs/concepts/models/marengo#input-requirements)
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
method : AssetsCreateRequestMethod
diff --git a/src/twelvelabs/base_client.py b/src/twelvelabs/base_client.py
index 60396a3..5bf85e3 100644
--- a/src/twelvelabs/base_client.py
+++ b/src/twelvelabs/base_client.py
@@ -10,12 +10,10 @@
from .entity_collections.client import AsyncEntityCollectionsClient, EntityCollectionsClient
from .environment import TwelveLabsEnvironment
from .indexes.client import AsyncIndexesClient, IndexesClient
-from .manage_entities.client import AsyncManageEntitiesClient, ManageEntitiesClient
from .multipart_upload.client import AsyncMultipartUploadClient, MultipartUploadClient
from .raw_base_client import AsyncRawBaseClient, RawBaseClient
from .search.client import AsyncSearchClient, SearchClient
from .tasks.client import AsyncTasksClient, TasksClient
-from .types.generate_response import GenerateResponse
from .types.gist import Gist
from .types.gist_request_types_item import GistRequestTypesItem
from .types.non_stream_analyze_response import NonStreamAnalyzeResponse
@@ -98,7 +96,6 @@ def __init__(
self.assets = AssetsClient(client_wrapper=self._client_wrapper)
self.multipart_upload = MultipartUploadClient(client_wrapper=self._client_wrapper)
self.entity_collections = EntityCollectionsClient(client_wrapper=self._client_wrapper)
- self.manage_entities = ManageEntitiesClient(client_wrapper=self._client_wrapper)
self.embed = EmbedClient(client_wrapper=self._client_wrapper)
self.search = SearchClient(client_wrapper=self._client_wrapper)
@@ -125,6 +122,11 @@ def summarize(
request_options: typing.Optional[RequestOptions] = None,
) -> SummarizeResponse:
"""
+
+
+ This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+
+
This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
@@ -208,12 +210,12 @@ def gist(
request_options: typing.Optional[RequestOptions] = None,
) -> Gist:
"""
- This endpoint analyzes videos and generates titles, topics, and hashtags.
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+ This method analyzes videos and generates titles, topics, and hashtags.
+
Parameters
----------
video_id : str
@@ -248,89 +250,6 @@ def gist(
_response = self._raw_client.gist(video_id=video_id, types=types, request_options=request_options)
return _response.data
- def generate(
- self,
- *,
- video_id: str,
- prompt: str,
- temperature: typing.Optional[float] = OMIT,
- stream: typing.Optional[bool] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> GenerateResponse:
- """
-
- This endpoint is deprecated. Use the [`/analyze`](/v1.3/api-reference/analyze-videos/analyze) endpoint instead, which provides identical functionality.
-
-
-
- This endpoint generates open-ended texts based on your videos, including but not limited to tables of content, action items, memos, and detailed analyses.
-
-
- - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses) guide.
-
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video for which you wish to generate a text.
-
- prompt : str
- A prompt that guides the model on the desired format or content.
-
-
- - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt.
- - Your prompts can be instructive or descriptive, or you can also phrase them as questions.
- - The maximum length of a prompt is 2,000 tokens.
-
-
- **Examples**:
-
- - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization).
- - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.
-
- temperature : typing.Optional[float]
- Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
- **Default:** 0.2
- **Min:** 0
- **Max:** 1
-
- stream : typing.Optional[bool]
- Set this parameter to `true` to enable streaming responses in the NDJSON format.
-
- **Default:** `true`
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- GenerateResponse
- The specified video has successfully been processed.
-
- The maximum length of the response is 4,096 tokens.
-
-
- Examples
- --------
- from twelvelabs import TwelveLabs
-
- client = TwelveLabs(
- api_key="YOUR_API_KEY",
- )
- client.generate(
- video_id="6298d673f1090f1100476d4c",
- prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
- temperature=0.2,
- stream=True,
- )
- """
- _response = self._raw_client.generate(
- video_id=video_id, prompt=prompt, temperature=temperature, stream=stream, request_options=request_options
- )
- return _response.data
-
def analyze_stream(
self,
*,
@@ -346,7 +265,7 @@ def analyze_stream(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+ - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
Parameters
@@ -440,7 +359,7 @@ def analyze(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+ - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
Parameters
@@ -589,7 +508,6 @@ def __init__(
self.assets = AsyncAssetsClient(client_wrapper=self._client_wrapper)
self.multipart_upload = AsyncMultipartUploadClient(client_wrapper=self._client_wrapper)
self.entity_collections = AsyncEntityCollectionsClient(client_wrapper=self._client_wrapper)
- self.manage_entities = AsyncManageEntitiesClient(client_wrapper=self._client_wrapper)
self.embed = AsyncEmbedClient(client_wrapper=self._client_wrapper)
self.search = AsyncSearchClient(client_wrapper=self._client_wrapper)
@@ -616,6 +534,11 @@ async def summarize(
request_options: typing.Optional[RequestOptions] = None,
) -> SummarizeResponse:
"""
+
+
+ This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+
+
This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
@@ -707,12 +630,12 @@ async def gist(
request_options: typing.Optional[RequestOptions] = None,
) -> Gist:
"""
- This endpoint analyzes videos and generates titles, topics, and hashtags.
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+ This method analyzes videos and generates titles, topics, and hashtags.
+
Parameters
----------
video_id : str
@@ -755,97 +678,6 @@ async def main() -> None:
_response = await self._raw_client.gist(video_id=video_id, types=types, request_options=request_options)
return _response.data
- async def generate(
- self,
- *,
- video_id: str,
- prompt: str,
- temperature: typing.Optional[float] = OMIT,
- stream: typing.Optional[bool] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> GenerateResponse:
- """
-
- This endpoint is deprecated. Use the [`/analyze`](/v1.3/api-reference/analyze-videos/analyze) endpoint instead, which provides identical functionality.
-
-
-
- This endpoint generates open-ended texts based on your videos, including but not limited to tables of content, action items, memos, and detailed analyses.
-
-
- - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses) guide.
-
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video for which you wish to generate a text.
-
- prompt : str
- A prompt that guides the model on the desired format or content.
-
-
- - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt.
- - Your prompts can be instructive or descriptive, or you can also phrase them as questions.
- - The maximum length of a prompt is 2,000 tokens.
-
-
- **Examples**:
-
- - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization).
- - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.
-
- temperature : typing.Optional[float]
- Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
- **Default:** 0.2
- **Min:** 0
- **Max:** 1
-
- stream : typing.Optional[bool]
- Set this parameter to `true` to enable streaming responses in the NDJSON format.
-
- **Default:** `true`
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- GenerateResponse
- The specified video has successfully been processed.
-
- The maximum length of the response is 4,096 tokens.
-
-
- Examples
- --------
- import asyncio
-
- from twelvelabs import AsyncTwelveLabs
-
- client = AsyncTwelveLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.generate(
- video_id="6298d673f1090f1100476d4c",
- prompt="I want to generate a description for my video with the following format - Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.",
- temperature=0.2,
- stream=True,
- )
-
-
- asyncio.run(main())
- """
- _response = await self._raw_client.generate(
- video_id=video_id, prompt=prompt, temperature=temperature, stream=stream, request_options=request_options
- )
- return _response.data
-
async def analyze_stream(
self,
*,
@@ -861,7 +693,7 @@ async def analyze_stream(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+ - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
Parameters
@@ -964,7 +796,7 @@ async def analyze(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+ - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
Parameters
diff --git a/src/twelvelabs/core/client_wrapper.py b/src/twelvelabs/core/client_wrapper.py
index ec30a59..16234c4 100644
--- a/src/twelvelabs/core/client_wrapper.py
+++ b/src/twelvelabs/core/client_wrapper.py
@@ -22,10 +22,10 @@ def __init__(
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "twelvelabs/1.2.0b0",
+ "User-Agent": "twelvelabs/1.1.1",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "twelvelabs",
- "X-Fern-SDK-Version": "1.2.0b0",
+ "X-Fern-SDK-Version": "1.1.1",
**(self.get_custom_headers() or {}),
}
headers["x-api-key"] = self.api_key
diff --git a/src/twelvelabs/embed/client.py b/src/twelvelabs/embed/client.py
index d463648..7aa1bf7 100644
--- a/src/twelvelabs/embed/client.py
+++ b/src/twelvelabs/embed/client.py
@@ -73,7 +73,8 @@ def create(
- The Marengo video understanding model generates embeddings for all modalities in the same latent space. This shared space enables any-to-any searches across different types of content.
- You can create multiple types of embeddings in a single API call.
- - Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these transcriptions.
+ - Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these
+ - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
Parameters
@@ -210,7 +211,8 @@ async def create(
- The Marengo video understanding model generates embeddings for all modalities in the same latent space. This shared space enables any-to-any searches across different types of content.
- You can create multiple types of embeddings in a single API call.
- - Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these transcriptions.
+ - Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these
+ - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
Parameters
diff --git a/src/twelvelabs/embed/raw_client.py b/src/twelvelabs/embed/raw_client.py
index cbffc91..b2a4994 100644
--- a/src/twelvelabs/embed/raw_client.py
+++ b/src/twelvelabs/embed/raw_client.py
@@ -61,7 +61,8 @@ def create(
- The Marengo video understanding model generates embeddings for all modalities in the same latent space. This shared space enables any-to-any searches across different types of content.
- You can create multiple types of embeddings in a single API call.
- - Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these transcriptions.
+ - Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these
+ - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
Parameters
@@ -205,7 +206,8 @@ async def create(
- The Marengo video understanding model generates embeddings for all modalities in the same latent space. This shared space enables any-to-any searches across different types of content.
- You can create multiple types of embeddings in a single API call.
- - Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these transcriptions.
+ - Audio embeddings combine generic sound and human speech in a single embedding. For videos with transcriptions, you can retrieve transcriptions and then [create text embeddings](/v1.3/api-reference/create-embeddings-v1/text-image-audio-embeddings/create-text-image-audio-embeddings) from these
+ - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
Parameters
diff --git a/src/twelvelabs/embed/tasks/client.py b/src/twelvelabs/embed/tasks/client.py
index 58bc869..abefc7d 100644
--- a/src/twelvelabs/embed/tasks/client.py
+++ b/src/twelvelabs/embed/tasks/client.py
@@ -135,13 +135,17 @@ def create(
This method creates a new video embedding task that uploads a video to the platform and creates one or multiple video embeddings.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Upload options:
- **Local file**: Use the `video_file` parameter
- **Publicly accessible URL**: Use the `video_url` parameter.
Specify at least one option. If both are provided, `video_url` takes precedence.
- Your video files must meet the [format requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
+ Your video files must meet the [requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
This endpoint allows you to upload files up to 2 GB in size. To upload larger files, use the [Multipart Upload API](/v1.3/api-reference/upload-content/multipart-uploads)
@@ -452,13 +456,17 @@ async def create(
This method creates a new video embedding task that uploads a video to the platform and creates one or multiple video embeddings.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Upload options:
- **Local file**: Use the `video_file` parameter
- **Publicly accessible URL**: Use the `video_url` parameter.
Specify at least one option. If both are provided, `video_url` takes precedence.
- Your video files must meet the [format requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
+ Your video files must meet the [requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
This endpoint allows you to upload files up to 2 GB in size. To upload larger files, use the [Multipart Upload API](/v1.3/api-reference/upload-content/multipart-uploads)
diff --git a/src/twelvelabs/embed/tasks/raw_client.py b/src/twelvelabs/embed/tasks/raw_client.py
index 6df36da..3e18801 100644
--- a/src/twelvelabs/embed/tasks/raw_client.py
+++ b/src/twelvelabs/embed/tasks/raw_client.py
@@ -153,13 +153,17 @@ def create(
This method creates a new video embedding task that uploads a video to the platform and creates one or multiple video embeddings.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Upload options:
- **Local file**: Use the `video_file` parameter
- **Publicly accessible URL**: Use the `video_url` parameter.
Specify at least one option. If both are provided, `video_url` takes precedence.
- Your video files must meet the [format requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
+ Your video files must meet the [requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
This endpoint allows you to upload files up to 2 GB in size. To upload larger files, use the [Multipart Upload API](/v1.3/api-reference/upload-content/multipart-uploads)
@@ -534,13 +538,17 @@ async def create(
This method creates a new video embedding task that uploads a video to the platform and creates one or multiple video embeddings.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Upload options:
- **Local file**: Use the `video_file` parameter
- **Publicly accessible URL**: Use the `video_url` parameter.
Specify at least one option. If both are provided, `video_url` takes precedence.
- Your video files must meet the [format requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
+ Your video files must meet the [requirements](/v1.3/docs/concepts/models/marengo#video-file-requirements).
This endpoint allows you to upload files up to 2 GB in size. To upload larger files, use the [Multipart Upload API](/v1.3/api-reference/upload-content/multipart-uploads)
diff --git a/src/twelvelabs/embed/v_2/client.py b/src/twelvelabs/embed/v_2/client.py
index 3867073..a6d8cb8 100644
--- a/src/twelvelabs/embed/v_2/client.py
+++ b/src/twelvelabs/embed/v_2/client.py
@@ -80,6 +80,10 @@ def create(
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
@@ -203,6 +207,10 @@ async def create(
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
diff --git a/src/twelvelabs/embed/v_2/raw_client.py b/src/twelvelabs/embed/v_2/raw_client.py
index 1cf647a..2772749 100644
--- a/src/twelvelabs/embed/v_2/raw_client.py
+++ b/src/twelvelabs/embed/v_2/raw_client.py
@@ -74,6 +74,10 @@ def create(
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
@@ -234,6 +238,10 @@ async def create(
- Aspect ratio: Between 1:1 and 1:2.4, or between 2.4:1 and 1:1
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
diff --git a/src/twelvelabs/embed/v_2/tasks/client.py b/src/twelvelabs/embed/v_2/tasks/client.py
index ac1a2c3..df107ce 100644
--- a/src/twelvelabs/embed/v_2/tasks/client.py
+++ b/src/twelvelabs/embed/v_2/tasks/client.py
@@ -149,6 +149,10 @@ def create(
2. Poll for the status of the task using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint. Wait until the status is `ready`.
3. Retrieve the embeddings from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
@@ -392,6 +396,10 @@ async def create(
2. Poll for the status of the task using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint. Wait until the status is `ready`.
3. Retrieve the embeddings from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
diff --git a/src/twelvelabs/embed/v_2/tasks/raw_client.py b/src/twelvelabs/embed/v_2/tasks/raw_client.py
index 881e04a..fcb5506 100644
--- a/src/twelvelabs/embed/v_2/tasks/raw_client.py
+++ b/src/twelvelabs/embed/v_2/tasks/raw_client.py
@@ -170,6 +170,10 @@ def create(
2. Poll for the status of the task using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint. Wait until the status is `ready`.
3. Retrieve the embeddings from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
@@ -454,6 +458,10 @@ async def create(
2. Poll for the status of the task using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint. Wait until the status is `ready`.
3. Retrieve the embeddings from the response when the status is `ready` using the [`GET`](/v1.3/api-reference/create-embeddings-v2/retrieve-embeddings) method of the `/embed-v2/tasks/{task_id}` endpoint.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
diff --git a/src/twelvelabs/indexes/indexed_assets/client.py b/src/twelvelabs/indexes/indexed_assets/client.py
index 2c846cd..c2bac7b 100644
--- a/src/twelvelabs/indexes/indexed_assets/client.py
+++ b/src/twelvelabs/indexes/indexed_assets/client.py
@@ -210,6 +210,10 @@ def create(
If you want to both search and analyze your videos, the most restrictive requirements apply.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
index_id : str
@@ -622,6 +626,10 @@ async def create(
If you want to both search and analyze your videos, the most restrictive requirements apply.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
index_id : str
diff --git a/src/twelvelabs/indexes/indexed_assets/raw_client.py b/src/twelvelabs/indexes/indexed_assets/raw_client.py
index d656030..1699830 100644
--- a/src/twelvelabs/indexes/indexed_assets/raw_client.py
+++ b/src/twelvelabs/indexes/indexed_assets/raw_client.py
@@ -236,6 +236,10 @@ def create(
If you want to both search and analyze your videos, the most restrictive requirements apply.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
index_id : str
@@ -744,6 +748,10 @@ async def create(
If you want to both search and analyze your videos, the most restrictive requirements apply.
+
+ This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+
Parameters
----------
index_id : str
diff --git a/src/twelvelabs/manage_entities/__init__.py b/src/twelvelabs/manage_entities/__init__.py
deleted file mode 100644
index a7dc074..0000000
--- a/src/twelvelabs/manage_entities/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-# isort: skip_file
-
-from .types import ListAllEntitiesRequestSortBy, ListAllEntitiesRequestStatus, ListAllEntitiesResponse
-
-__all__ = ["ListAllEntitiesRequestSortBy", "ListAllEntitiesRequestStatus", "ListAllEntitiesResponse"]
diff --git a/src/twelvelabs/manage_entities/client.py b/src/twelvelabs/manage_entities/client.py
deleted file mode 100644
index c4799ea..0000000
--- a/src/twelvelabs/manage_entities/client.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
-from ..core.request_options import RequestOptions
-from .raw_client import AsyncRawManageEntitiesClient, RawManageEntitiesClient
-from .types.list_all_entities_request_sort_by import ListAllEntitiesRequestSortBy
-from .types.list_all_entities_request_status import ListAllEntitiesRequestStatus
-from .types.list_all_entities_response import ListAllEntitiesResponse
-
-
-class ManageEntitiesClient:
- def __init__(self, *, client_wrapper: SyncClientWrapper):
- self._raw_client = RawManageEntitiesClient(client_wrapper=client_wrapper)
-
- @property
- def with_raw_response(self) -> RawManageEntitiesClient:
- """
- Retrieves a raw implementation of this client that returns raw responses.
-
- Returns
- -------
- RawManageEntitiesClient
- """
- return self._raw_client
-
- def list_all_entities(
- self,
- *,
- page: typing.Optional[int] = None,
- page_limit: typing.Optional[int] = None,
- name: typing.Optional[str] = None,
- status: typing.Optional[ListAllEntitiesRequestStatus] = None,
- sort_by: typing.Optional[ListAllEntitiesRequestSortBy] = None,
- sort_option: typing.Optional[str] = None,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> ListAllEntitiesResponse:
- """
- This method returns a list of entities from all entity collections.
- This is an internal API primarily used by the search interface.
-
- Parameters
- ----------
- page : typing.Optional[int]
- A number that identifies the page to retrieve.
-
- **Default**: `1`.
-
- page_limit : typing.Optional[int]
- The number of items to return on each page.
-
- **Default**: `10`.
- **Max**: `50`.
-
- name : typing.Optional[str]
- Filter entities by name.
-
- status : typing.Optional[ListAllEntitiesRequestStatus]
- Filter entities by status.
-
- sort_by : typing.Optional[ListAllEntitiesRequestSortBy]
- Field to sort by.
-
- sort_option : typing.Optional[str]
- The sorting direction. The following options are available:
- - `asc`
- - `desc`
-
- **Default**: `desc`.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- ListAllEntitiesResponse
- The entities have been successfully retrieved.
-
- Examples
- --------
- from twelvelabs import TwelveLabs
-
- client = TwelveLabs(
- api_key="YOUR_API_KEY",
- )
- client.manage_entities.list_all_entities(
- page=1,
- page_limit=10,
- name="foo",
- status="processing",
- sort_by="created_at",
- sort_option="desc",
- )
- """
- _response = self._raw_client.list_all_entities(
- page=page,
- page_limit=page_limit,
- name=name,
- status=status,
- sort_by=sort_by,
- sort_option=sort_option,
- request_options=request_options,
- )
- return _response.data
-
-
-class AsyncManageEntitiesClient:
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
- self._raw_client = AsyncRawManageEntitiesClient(client_wrapper=client_wrapper)
-
- @property
- def with_raw_response(self) -> AsyncRawManageEntitiesClient:
- """
- Retrieves a raw implementation of this client that returns raw responses.
-
- Returns
- -------
- AsyncRawManageEntitiesClient
- """
- return self._raw_client
-
- async def list_all_entities(
- self,
- *,
- page: typing.Optional[int] = None,
- page_limit: typing.Optional[int] = None,
- name: typing.Optional[str] = None,
- status: typing.Optional[ListAllEntitiesRequestStatus] = None,
- sort_by: typing.Optional[ListAllEntitiesRequestSortBy] = None,
- sort_option: typing.Optional[str] = None,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> ListAllEntitiesResponse:
- """
- This method returns a list of entities from all entity collections.
- This is an internal API primarily used by the search interface.
-
- Parameters
- ----------
- page : typing.Optional[int]
- A number that identifies the page to retrieve.
-
- **Default**: `1`.
-
- page_limit : typing.Optional[int]
- The number of items to return on each page.
-
- **Default**: `10`.
- **Max**: `50`.
-
- name : typing.Optional[str]
- Filter entities by name.
-
- status : typing.Optional[ListAllEntitiesRequestStatus]
- Filter entities by status.
-
- sort_by : typing.Optional[ListAllEntitiesRequestSortBy]
- Field to sort by.
-
- sort_option : typing.Optional[str]
- The sorting direction. The following options are available:
- - `asc`
- - `desc`
-
- **Default**: `desc`.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- ListAllEntitiesResponse
- The entities have been successfully retrieved.
-
- Examples
- --------
- import asyncio
-
- from twelvelabs import AsyncTwelveLabs
-
- client = AsyncTwelveLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.manage_entities.list_all_entities(
- page=1,
- page_limit=10,
- name="foo",
- status="processing",
- sort_by="created_at",
- sort_option="desc",
- )
-
-
- asyncio.run(main())
- """
- _response = await self._raw_client.list_all_entities(
- page=page,
- page_limit=page_limit,
- name=name,
- status=status,
- sort_by=sort_by,
- sort_option=sort_option,
- request_options=request_options,
- )
- return _response.data
diff --git a/src/twelvelabs/manage_entities/raw_client.py b/src/twelvelabs/manage_entities/raw_client.py
deleted file mode 100644
index 7740a87..0000000
--- a/src/twelvelabs/manage_entities/raw_client.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from json.decoder import JSONDecodeError
-
-from ..core.api_error import ApiError
-from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
-from ..core.http_response import AsyncHttpResponse, HttpResponse
-from ..core.pydantic_utilities import parse_obj_as
-from ..core.request_options import RequestOptions
-from ..errors.bad_request_error import BadRequestError
-from .types.list_all_entities_request_sort_by import ListAllEntitiesRequestSortBy
-from .types.list_all_entities_request_status import ListAllEntitiesRequestStatus
-from .types.list_all_entities_response import ListAllEntitiesResponse
-
-
-class RawManageEntitiesClient:
- def __init__(self, *, client_wrapper: SyncClientWrapper):
- self._client_wrapper = client_wrapper
-
- def list_all_entities(
- self,
- *,
- page: typing.Optional[int] = None,
- page_limit: typing.Optional[int] = None,
- name: typing.Optional[str] = None,
- status: typing.Optional[ListAllEntitiesRequestStatus] = None,
- sort_by: typing.Optional[ListAllEntitiesRequestSortBy] = None,
- sort_option: typing.Optional[str] = None,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[ListAllEntitiesResponse]:
- """
- This method returns a list of entities from all entity collections.
- This is an internal API primarily used by the search interface.
-
- Parameters
- ----------
- page : typing.Optional[int]
- A number that identifies the page to retrieve.
-
- **Default**: `1`.
-
- page_limit : typing.Optional[int]
- The number of items to return on each page.
-
- **Default**: `10`.
- **Max**: `50`.
-
- name : typing.Optional[str]
- Filter entities by name.
-
- status : typing.Optional[ListAllEntitiesRequestStatus]
- Filter entities by status.
-
- sort_by : typing.Optional[ListAllEntitiesRequestSortBy]
- Field to sort by.
-
- sort_option : typing.Optional[str]
- The sorting direction. The following options are available:
- - `asc`
- - `desc`
-
- **Default**: `desc`.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- HttpResponse[ListAllEntitiesResponse]
- The entities have been successfully retrieved.
- """
- _response = self._client_wrapper.httpx_client.request(
- "entities",
- method="GET",
- params={
- "page": page,
- "page_limit": page_limit,
- "name": name,
- "status": status,
- "sort_by": sort_by,
- "sort_option": sort_option,
- },
- request_options=request_options,
- )
- try:
- if 200 <= _response.status_code < 300:
- _data = typing.cast(
- ListAllEntitiesResponse,
- parse_obj_as(
- type_=ListAllEntitiesResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- return HttpResponse(response=_response, data=_data)
- if _response.status_code == 400:
- raise BadRequestError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
-
-
-class AsyncRawManageEntitiesClient:
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
- self._client_wrapper = client_wrapper
-
- async def list_all_entities(
- self,
- *,
- page: typing.Optional[int] = None,
- page_limit: typing.Optional[int] = None,
- name: typing.Optional[str] = None,
- status: typing.Optional[ListAllEntitiesRequestStatus] = None,
- sort_by: typing.Optional[ListAllEntitiesRequestSortBy] = None,
- sort_option: typing.Optional[str] = None,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[ListAllEntitiesResponse]:
- """
- This method returns a list of entities from all entity collections.
- This is an internal API primarily used by the search interface.
-
- Parameters
- ----------
- page : typing.Optional[int]
- A number that identifies the page to retrieve.
-
- **Default**: `1`.
-
- page_limit : typing.Optional[int]
- The number of items to return on each page.
-
- **Default**: `10`.
- **Max**: `50`.
-
- name : typing.Optional[str]
- Filter entities by name.
-
- status : typing.Optional[ListAllEntitiesRequestStatus]
- Filter entities by status.
-
- sort_by : typing.Optional[ListAllEntitiesRequestSortBy]
- Field to sort by.
-
- sort_option : typing.Optional[str]
- The sorting direction. The following options are available:
- - `asc`
- - `desc`
-
- **Default**: `desc`.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- AsyncHttpResponse[ListAllEntitiesResponse]
- The entities have been successfully retrieved.
- """
- _response = await self._client_wrapper.httpx_client.request(
- "entities",
- method="GET",
- params={
- "page": page,
- "page_limit": page_limit,
- "name": name,
- "status": status,
- "sort_by": sort_by,
- "sort_option": sort_option,
- },
- request_options=request_options,
- )
- try:
- if 200 <= _response.status_code < 300:
- _data = typing.cast(
- ListAllEntitiesResponse,
- parse_obj_as(
- type_=ListAllEntitiesResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- return AsyncHttpResponse(response=_response, data=_data)
- if _response.status_code == 400:
- raise BadRequestError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/twelvelabs/manage_entities/types/__init__.py b/src/twelvelabs/manage_entities/types/__init__.py
deleted file mode 100644
index 1188c13..0000000
--- a/src/twelvelabs/manage_entities/types/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-# isort: skip_file
-
-from .list_all_entities_request_sort_by import ListAllEntitiesRequestSortBy
-from .list_all_entities_request_status import ListAllEntitiesRequestStatus
-from .list_all_entities_response import ListAllEntitiesResponse
-
-__all__ = ["ListAllEntitiesRequestSortBy", "ListAllEntitiesRequestStatus", "ListAllEntitiesResponse"]
diff --git a/src/twelvelabs/manage_entities/types/list_all_entities_request_sort_by.py b/src/twelvelabs/manage_entities/types/list_all_entities_request_sort_by.py
deleted file mode 100644
index c00df46..0000000
--- a/src/twelvelabs/manage_entities/types/list_all_entities_request_sort_by.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ListAllEntitiesRequestSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/twelvelabs/manage_entities/types/list_all_entities_request_status.py b/src/twelvelabs/manage_entities/types/list_all_entities_request_status.py
deleted file mode 100644
index 141716c..0000000
--- a/src/twelvelabs/manage_entities/types/list_all_entities_request_status.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ListAllEntitiesRequestStatus = typing.Union[typing.Literal["processing", "ready"], typing.Any]
diff --git a/src/twelvelabs/manage_entities/types/list_all_entities_response.py b/src/twelvelabs/manage_entities/types/list_all_entities_response.py
deleted file mode 100644
index 55a1592..0000000
--- a/src/twelvelabs/manage_entities/types/list_all_entities_response.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-import pydantic
-from ...core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-from ...types.internal_entity import InternalEntity
-from ...types.page_info import PageInfo
-
-
-class ListAllEntitiesResponse(UniversalBaseModel):
- data: typing.Optional[typing.List[InternalEntity]] = pydantic.Field(default=None)
- """
- An array containing the entities.
- """
-
- page_info: typing.Optional[PageInfo] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/twelvelabs/raw_base_client.py b/src/twelvelabs/raw_base_client.py
index 533001d..fd55260 100644
--- a/src/twelvelabs/raw_base_client.py
+++ b/src/twelvelabs/raw_base_client.py
@@ -13,7 +13,6 @@
from .core.serialization import convert_and_respect_annotation_metadata
from .errors.bad_request_error import BadRequestError
from .errors.too_many_requests_error import TooManyRequestsError
-from .types.generate_response import GenerateResponse
from .types.gist import Gist
from .types.gist_request_types_item import GistRequestTypesItem
from .types.non_stream_analyze_response import NonStreamAnalyzeResponse
@@ -41,6 +40,11 @@ def summarize(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[SummarizeResponse]:
"""
+
+
+ This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+
+
This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
@@ -155,12 +159,12 @@ def gist(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[Gist]:
"""
- This endpoint analyzes videos and generates titles, topics, and hashtags.
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+ This method analyzes videos and generates titles, topics, and hashtags.
+
Parameters
----------
video_id : str
@@ -230,122 +234,6 @@ def gist(
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
- def generate(
- self,
- *,
- video_id: str,
- prompt: str,
- temperature: typing.Optional[float] = OMIT,
- stream: typing.Optional[bool] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[GenerateResponse]:
- """
-
- This endpoint is deprecated. Use the [`/analyze`](/v1.3/api-reference/analyze-videos/analyze) endpoint instead, which provides identical functionality.
-
-
-
- This endpoint generates open-ended texts based on your videos, including but not limited to tables of content, action items, memos, and detailed analyses.
-
-
- - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses) guide.
-
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video for which you wish to generate a text.
-
- prompt : str
- A prompt that guides the model on the desired format or content.
-
-
- - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt.
- - Your prompts can be instructive or descriptive, or you can also phrase them as questions.
- - The maximum length of a prompt is 2,000 tokens.
-
-
- **Examples**:
-
- - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization).
- - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.
-
- temperature : typing.Optional[float]
- Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
- **Default:** 0.2
- **Min:** 0
- **Max:** 1
-
- stream : typing.Optional[bool]
- Set this parameter to `true` to enable streaming responses in the NDJSON format.
-
- **Default:** `true`
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- HttpResponse[GenerateResponse]
- The specified video has successfully been processed.
-
- The maximum length of the response is 4,096 tokens.
-
- """
- _response = self._client_wrapper.httpx_client.request(
- "generate",
- method="POST",
- json={
- "video_id": video_id,
- "prompt": prompt,
- "temperature": temperature,
- "stream": stream,
- },
- headers={
- "content-type": "application/json",
- },
- request_options=request_options,
- omit=OMIT,
- )
- try:
- if 200 <= _response.status_code < 300:
- _data = typing.cast(
- GenerateResponse,
- parse_obj_as(
- type_=GenerateResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- return HttpResponse(response=_response, data=_data)
- if _response.status_code == 400:
- raise BadRequestError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
-
@contextlib.contextmanager
def analyze_stream(
self,
@@ -362,7 +250,7 @@ def analyze_stream(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+ - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
Parameters
@@ -492,7 +380,7 @@ def analyze(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+ - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
Parameters
@@ -607,6 +495,11 @@ async def summarize(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[SummarizeResponse]:
"""
+
+
+ This endpoint will be sunset and removed. Use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint. Pass the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+
+
This endpoint analyzes videos and generates summaries, chapters, or highlights. Optionally, you can provide a prompt to customize the output.
@@ -721,12 +614,12 @@ async def gist(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[Gist]:
"""
- This endpoint analyzes videos and generates titles, topics, and hashtags.
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ This endpoint will be sunset and removed on February 15, 2026. Instead, use the [`POST`](/v1.3/api-reference/analyze-videos/analyze) method of the `/analyze` endpoint, passing the [`response_format`](/v1.3/api-reference/analyze-videos/analyze#request.body.response_format) parameter to specify the format of the response as structured JSON. For migration instructions, see the [Release notes](/v1.3/docs/get-started/release-notes#predefined-formats-for-video-analysis-will-be-sunset-and-removed) page.
+ This method analyzes videos and generates titles, topics, and hashtags.
+
Parameters
----------
video_id : str
@@ -796,122 +689,6 @@ async def gist(
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
- async def generate(
- self,
- *,
- video_id: str,
- prompt: str,
- temperature: typing.Optional[float] = OMIT,
- stream: typing.Optional[bool] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[GenerateResponse]:
- """
-
- This endpoint is deprecated. Use the [`/analyze`](/v1.3/api-reference/analyze-videos/analyze) endpoint instead, which provides identical functionality.
-
-
-
- This endpoint generates open-ended texts based on your videos, including but not limited to tables of content, action items, memos, and detailed analyses.
-
-
- - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses) guide.
-
-
- Parameters
- ----------
- video_id : str
- The unique identifier of the video for which you wish to generate a text.
-
- prompt : str
- A prompt that guides the model on the desired format or content.
-
-
- - Even though the model behind this endpoint is trained to a high degree of accuracy, the preciseness of the generated text may vary based on the nature and quality of the video and the clarity of the prompt.
- - Your prompts can be instructive or descriptive, or you can also phrase them as questions.
- - The maximum length of a prompt is 2,000 tokens.
-
-
- **Examples**:
-
- - Based on this video, I want to generate five keywords for SEO (Search Engine Optimization).
- - I want to generate a description for my video with the following format: Title of the video, followed by a summary in 2-3 sentences, highlighting the main topic, key events, and concluding remarks.
-
- temperature : typing.Optional[float]
- Controls the randomness of the text output generated by the model. A higher value generates more creative text, while a lower value produces more deterministic text output.
-
- **Default:** 0.2
- **Min:** 0
- **Max:** 1
-
- stream : typing.Optional[bool]
- Set this parameter to `true` to enable streaming responses in the NDJSON format.
-
- **Default:** `true`
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- AsyncHttpResponse[GenerateResponse]
- The specified video has successfully been processed.
-
- The maximum length of the response is 4,096 tokens.
-
- """
- _response = await self._client_wrapper.httpx_client.request(
- "generate",
- method="POST",
- json={
- "video_id": video_id,
- "prompt": prompt,
- "temperature": temperature,
- "stream": stream,
- },
- headers={
- "content-type": "application/json",
- },
- request_options=request_options,
- omit=OMIT,
- )
- try:
- if 200 <= _response.status_code < 300:
- _data = typing.cast(
- GenerateResponse,
- parse_obj_as(
- type_=GenerateResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- return AsyncHttpResponse(response=_response, data=_data)
- if _response.status_code == 400:
- raise BadRequestError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- if _response.status_code == 429:
- raise TooManyRequestsError(
- headers=dict(_response.headers),
- body=typing.cast(
- typing.Optional[typing.Any],
- parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- ),
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
- raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
-
@contextlib.asynccontextmanager
async def analyze_stream(
self,
@@ -928,7 +705,7 @@ async def analyze_stream(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+ - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
Parameters
@@ -1058,7 +835,7 @@ async def analyze(
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
- - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Open-ended analysis](/v1.3/docs/guides/analyze-videos/open-ended-analysis#streaming-responses).
+ - This endpoint supports streaming responses. For details on integrating this feature into your application, refer to the [Analyze videos](/v1.3/docs/guides/analyze-videos) page.
Parameters
diff --git a/src/twelvelabs/search/client.py b/src/twelvelabs/search/client.py
index fe109fd..270a4a3 100644
--- a/src/twelvelabs/search/client.py
+++ b/src/twelvelabs/search/client.py
@@ -79,12 +79,9 @@ def create(
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
-
- When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [format requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
-
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ - When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
+ - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
Parameters
@@ -371,12 +368,9 @@ async def create(
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
-
- When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [format requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
-
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ - When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
+ - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
Parameters
diff --git a/src/twelvelabs/search/raw_client.py b/src/twelvelabs/search/raw_client.py
index 15ab327..85870ff 100644
--- a/src/twelvelabs/search/raw_client.py
+++ b/src/twelvelabs/search/raw_client.py
@@ -74,12 +74,9 @@ def create(
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
-
- When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [format requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
-
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ - When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
+ - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
Parameters
@@ -403,12 +400,9 @@ async def create(
- To find a specific person in your videos, enclose the unique identifier of the entity you want to find in the `query_text` parameter.
-
- When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [format requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
-
-
-
- This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
+
+ - When using images in your search queries (either as media queries or in composed searches), ensure your image files meet the [requirements](/v1.3/docs/concepts/models/marengo#image-file-requirements).
+ - This endpoint is rate-limited. For details, see the [Rate limits](/v1.3/docs/get-started/rate-limits) page.
Parameters
diff --git a/src/twelvelabs/types/__init__.py b/src/twelvelabs/types/__init__.py
index 64dd12d..1880b3e 100644
--- a/src/twelvelabs/types/__init__.py
+++ b/src/twelvelabs/types/__init__.py
@@ -62,7 +62,6 @@
from .expires_at import ExpiresAt
from .finish_reason import FinishReason
from .forbidden_error_body import ForbiddenErrorBody
-from .generate_response import GenerateResponse
from .get_upload_status_response import GetUploadStatusResponse
from .gist import Gist
from .gist_request_types_item import GistRequestTypesItem
@@ -222,7 +221,6 @@
"ExpiresAt",
"FinishReason",
"ForbiddenErrorBody",
- "GenerateResponse",
"GetUploadStatusResponse",
"Gist",
"GistRequestTypesItem",
diff --git a/src/twelvelabs/types/generate_response.py b/src/twelvelabs/types/generate_response.py
deleted file mode 100644
index 0c458c3..0000000
--- a/src/twelvelabs/types/generate_response.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-from .non_stream_analyze_response import NonStreamAnalyzeResponse
-from .stream_analyze_response import StreamAnalyzeResponse
-
-GenerateResponse = typing.Union[StreamAnalyzeResponse, NonStreamAnalyzeResponse]
diff --git a/src/twelvelabs/types/media_source.py b/src/twelvelabs/types/media_source.py
index 8ec873e..0d8cdc3 100644
--- a/src/twelvelabs/types/media_source.py
+++ b/src/twelvelabs/types/media_source.py
@@ -10,7 +10,7 @@
class MediaSource(UniversalBaseModel):
"""
- An object specifying the source of the media file.
+ An object specifying the source of the media file. You must provide exactly one of `url`, `base64_string`, or `asset_id`.
"""
base_64_string: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="base64_string")] = (