diff --git a/poetry.lock b/poetry.lock
index 794340b..a19d7e2 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -60,13 +60,13 @@ files = [
[[package]]
name = "exceptiongroup"
-version = "1.3.0"
+version = "1.3.1"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
- {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"},
- {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"},
+ {file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"},
+ {file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index e7c505d..d99e091 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "twelvelabs"
[tool.poetry]
name = "twelvelabs"
-version = "1.1.0"
+version = "1.1.1"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index e023b9e..d246595 100644
--- a/reference.md
+++ b/reference.md
@@ -1632,10 +1632,7 @@ client.indexes.delete(
This method returns a list of assets in your account.
-
-- The platform returns your assets sorted by creation date, with the newest at the top of the list.
-- The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+The platform returns your assets sorted by creation date, with the newest at the top of the list.
@@ -1749,7 +1746,7 @@ The number of items to return on each page.
-
-This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -1823,9 +1820,7 @@ typing.Optional[core.File]` — See core.File for more documentation
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+URL uploads have a maximum limit of 4GB.
@@ -2147,7 +2142,7 @@ client.multipart_upload.create(
-
-**filename:** `str` — Original filename of the asset
+**filename:** `str` — The original file name of the asset.
@@ -2194,14 +2189,14 @@ The total size of the file in bytes. The platform uses this value to:
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
-Use this endpoint to:
+Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
@@ -2303,11 +2298,10 @@ The number of items to return on each page.
-
-This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
@@ -4178,7 +4172,6 @@ client = TwelveLabs(
)
client.embed.v_2.create(
input_type="text",
- model_name="marengo3.0",
text=TextInputRequest(
input_text="man walking a dog",
),
@@ -4198,15 +4191,17 @@ client.embed.v_2.create(
-
-**input_type:** `CreateEmbeddingsRequestInputType` — The type of content for which you wish to create embeddings.
-
-
-
+**input_type:** `CreateEmbeddingsRequestInputType`
+
+The type of content for the embeddings.
-
--
-**model_name:** `str` — The video understanding model you wish to use.
+**Values**:
+- `audio`: Creates embeddings for an audio file
+- `video`: Creates embeddings for a video file
+- `image`: Creates embeddings for an image file
+- `text`: Creates embeddings for text input
+- `text_image`: Creates embeddings for text and an image.
@@ -4461,7 +4456,6 @@ client = TwelveLabs(
)
client.embed.v_2.tasks.create(
input_type="video",
- model_name="marengo3.0",
video=VideoInputRequest(
media_source=MediaSource(
url="https://user-bucket.com/video/long-video.mp4",
@@ -4494,7 +4488,7 @@ client.embed.v_2.tasks.create(
**input_type:** `CreateAsyncEmbeddingRequestInputType`
-The type of content for which you wish to create embeddings.
+The type of content for the embeddings.
**Values**:
- `audio`: Audio files
@@ -4506,14 +4500,6 @@ The type of content for which you wish to create embeddings.
-
-**model_name:** `str` — The model you wish to use.
-
-
-
-
-
--
-
**audio:** `typing.Optional[AudioInputRequest]`
@@ -5624,7 +5610,7 @@ status=ready&status=validating
-
-**created_at:** `typing.Optional[str]` — Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+**created_at:** `typing.Optional[str]` — Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
@@ -5779,20 +5765,20 @@ client.indexes.indexed_assets.create(
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
-**Common use cases**:
+Use this method to:
-- Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+- Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
-- Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+- Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
-- Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+- Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -5874,7 +5860,7 @@ To retrieve embeddings for a video, it must be indexed using the Marengo video u
-
-**transcription:** `typing.Optional[bool]` — The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+**transcription:** `typing.Optional[bool]` — Specifies whether to retrieve a transcription of the spoken words.
@@ -5985,7 +5971,7 @@ client.indexes.indexed_assets.delete(
-
-Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
@@ -6499,7 +6485,7 @@ client.indexes.videos.delete(
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
-Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
diff --git a/src/twelvelabs/assets/client.py b/src/twelvelabs/assets/client.py
index c98547d..405c15a 100644
--- a/src/twelvelabs/assets/client.py
+++ b/src/twelvelabs/assets/client.py
@@ -44,10 +44,7 @@ def list(
"""
This method returns a list of assets in your account.
-
- - The platform returns your assets sorted by creation date, with the newest at the top of the list.
- - The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+ The platform returns your assets sorted by creation date, with the newest at the top of the list.
Parameters
----------
@@ -111,7 +108,7 @@ def create(
request_options: typing.Optional[RequestOptions] = None,
) -> Asset:
"""
- This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+ This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -138,9 +135,7 @@ def create(
url : typing.Optional[str]
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+ URL uploads have a maximum limit of 4GB.
filename : typing.Optional[str]
The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL.
@@ -260,10 +255,7 @@ async def list(
"""
This method returns a list of assets in your account.
-
- - The platform returns your assets sorted by creation date, with the newest at the top of the list.
- - The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+ The platform returns your assets sorted by creation date, with the newest at the top of the list.
Parameters
----------
@@ -336,7 +328,7 @@ async def create(
request_options: typing.Optional[RequestOptions] = None,
) -> Asset:
"""
- This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+ This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -363,9 +355,7 @@ async def create(
url : typing.Optional[str]
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+ URL uploads have a maximum limit of 4GB.
filename : typing.Optional[str]
The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL.
diff --git a/src/twelvelabs/assets/raw_client.py b/src/twelvelabs/assets/raw_client.py
index fca924d..2f78edc 100644
--- a/src/twelvelabs/assets/raw_client.py
+++ b/src/twelvelabs/assets/raw_client.py
@@ -39,10 +39,7 @@ def list(
"""
This method returns a list of assets in your account.
-
- - The platform returns your assets sorted by creation date, with the newest at the top of the list.
- - The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+ The platform returns your assets sorted by creation date, with the newest at the top of the list.
Parameters
----------
@@ -131,7 +128,7 @@ def create(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[Asset]:
"""
- This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+ This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -158,9 +155,7 @@ def create(
url : typing.Optional[str]
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+ URL uploads have a maximum limit of 4GB.
filename : typing.Optional[str]
The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL.
@@ -323,10 +318,7 @@ async def list(
"""
This method returns a list of assets in your account.
-
- - The platform returns your assets sorted by creation date, with the newest at the top of the list.
- - The platform automatically deletes assets that are not associated with any entity after 72 hours.
-
+ The platform returns your assets sorted by creation date, with the newest at the top of the list.
Parameters
----------
@@ -418,7 +410,7 @@ async def create(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[Asset]:
"""
- This method creates an asset by uploading a file to the platform. Assets are files (such as images, audio, or video) that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
+ This method creates an asset by uploading a file to the platform. Assets are media files that you can use in downstream workflows, including indexing, analyzing video content, and creating entities.
**Supported content**: Video, audio, and images.
@@ -445,9 +437,7 @@ async def create(
url : typing.Optional[str]
Specify this parameter to upload a file from a publicly accessible URL. This parameter is required when `method` is set to `url`.
-
- URL uploads are limited to 4GB.
-
+ URL uploads have a maximum limit of 4GB.
filename : typing.Optional[str]
The optional filename of the asset. If not provided, the platform will determine the filename from the file or URL.
diff --git a/src/twelvelabs/core/client_wrapper.py b/src/twelvelabs/core/client_wrapper.py
index 434f508..16234c4 100644
--- a/src/twelvelabs/core/client_wrapper.py
+++ b/src/twelvelabs/core/client_wrapper.py
@@ -22,10 +22,10 @@ def __init__(
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "twelvelabs/1.1.0",
+ "User-Agent": "twelvelabs/1.1.1",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "twelvelabs",
- "X-Fern-SDK-Version": "1.1.0",
+ "X-Fern-SDK-Version": "1.1.1",
**(self.get_custom_headers() or {}),
}
headers["x-api-key"] = self.api_key
diff --git a/src/twelvelabs/embed/v_2/client.py b/src/twelvelabs/embed/v_2/client.py
index 286e0dd..9adaa2c 100644
--- a/src/twelvelabs/embed/v_2/client.py
+++ b/src/twelvelabs/embed/v_2/client.py
@@ -38,7 +38,6 @@ def create(
self,
*,
input_type: CreateEmbeddingsRequestInputType,
- model_name: str,
text: typing.Optional[TextInputRequest] = OMIT,
image: typing.Optional[ImageInputRequest] = OMIT,
text_image: typing.Optional[TextImageInputRequest] = OMIT,
@@ -82,10 +81,15 @@ def create(
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
- model_name : str
- The video understanding model you wish to use.
+
+ **Values**:
+ - `audio`: Creates embeddings for an audio file
+ - `video`: Creates embeddings for a video file
+ - `image`: Creates embeddings for an image file
+ - `text`: Creates embeddings for text input
+ - `text_image`: Creates embeddings for text and an image.
text : typing.Optional[TextInputRequest]
@@ -114,7 +118,6 @@ def create(
)
client.embed.v_2.create(
input_type="text",
- model_name="marengo3.0",
text=TextInputRequest(
input_text="man walking a dog",
),
@@ -122,7 +125,6 @@ def create(
"""
_response = self._raw_client.create(
input_type=input_type,
- model_name=model_name,
text=text,
image=image,
text_image=text_image,
@@ -153,7 +155,6 @@ async def create(
self,
*,
input_type: CreateEmbeddingsRequestInputType,
- model_name: str,
text: typing.Optional[TextInputRequest] = OMIT,
image: typing.Optional[ImageInputRequest] = OMIT,
text_image: typing.Optional[TextImageInputRequest] = OMIT,
@@ -197,10 +198,15 @@ async def create(
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
+
- model_name : str
- The video understanding model you wish to use.
+ **Values**:
+ - `audio`: Creates embeddings for an audio file
+ - `video`: Creates embeddings for a video file
+ - `image`: Creates embeddings for an image file
+ - `text`: Creates embeddings for text input
+ - `text_image`: Creates embeddings for text and an image.
text : typing.Optional[TextInputRequest]
@@ -234,7 +240,6 @@ async def create(
async def main() -> None:
await client.embed.v_2.create(
input_type="text",
- model_name="marengo3.0",
text=TextInputRequest(
input_text="man walking a dog",
),
@@ -245,7 +250,6 @@ async def main() -> None:
"""
_response = await self._raw_client.create(
input_type=input_type,
- model_name=model_name,
text=text,
image=image,
text_image=text_image,
diff --git a/src/twelvelabs/embed/v_2/raw_client.py b/src/twelvelabs/embed/v_2/raw_client.py
index 4cd1b47..4f4756b 100644
--- a/src/twelvelabs/embed/v_2/raw_client.py
+++ b/src/twelvelabs/embed/v_2/raw_client.py
@@ -32,7 +32,6 @@ def create(
self,
*,
input_type: CreateEmbeddingsRequestInputType,
- model_name: str,
text: typing.Optional[TextInputRequest] = OMIT,
image: typing.Optional[ImageInputRequest] = OMIT,
text_image: typing.Optional[TextImageInputRequest] = OMIT,
@@ -76,10 +75,15 @@ def create(
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
- model_name : str
- The video understanding model you wish to use.
+
+ **Values**:
+ - `audio`: Creates embeddings for an audio file
+ - `video`: Creates embeddings for a video file
+ - `image`: Creates embeddings for an image file
+ - `text`: Creates embeddings for text input
+ - `text_image`: Creates embeddings for text and an image.
text : typing.Optional[TextInputRequest]
@@ -104,7 +108,6 @@ def create(
method="POST",
json={
"input_type": input_type,
- "model_name": model_name,
"text": convert_and_respect_annotation_metadata(
object_=text, annotation=TextInputRequest, direction="write"
),
@@ -120,6 +123,7 @@ def create(
"video": convert_and_respect_annotation_metadata(
object_=video, annotation=VideoInputRequest, direction="write"
),
+ "model_name": "marengo3.0",
},
headers={
"content-type": "application/json",
@@ -184,7 +188,6 @@ async def create(
self,
*,
input_type: CreateEmbeddingsRequestInputType,
- model_name: str,
text: typing.Optional[TextInputRequest] = OMIT,
image: typing.Optional[ImageInputRequest] = OMIT,
text_image: typing.Optional[TextImageInputRequest] = OMIT,
@@ -228,10 +231,15 @@ async def create(
Parameters
----------
input_type : CreateEmbeddingsRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
+
- model_name : str
- The video understanding model you wish to use.
+ **Values**:
+ - `audio`: Creates embeddings for an audio file
+ - `video`: Creates embeddings for a video file
+ - `image`: Creates embeddings for an image file
+ - `text`: Creates embeddings for text input
+ - `text_image`: Creates embeddings for text and an image.
text : typing.Optional[TextInputRequest]
@@ -256,7 +264,6 @@ async def create(
method="POST",
json={
"input_type": input_type,
- "model_name": model_name,
"text": convert_and_respect_annotation_metadata(
object_=text, annotation=TextInputRequest, direction="write"
),
@@ -272,6 +279,7 @@ async def create(
"video": convert_and_respect_annotation_metadata(
object_=video, annotation=VideoInputRequest, direction="write"
),
+ "model_name": "marengo3.0",
},
headers={
"content-type": "application/json",
diff --git a/src/twelvelabs/embed/v_2/tasks/client.py b/src/twelvelabs/embed/v_2/tasks/client.py
index e8e681a..ea9b345 100644
--- a/src/twelvelabs/embed/v_2/tasks/client.py
+++ b/src/twelvelabs/embed/v_2/tasks/client.py
@@ -110,7 +110,6 @@ def create(
self,
*,
input_type: CreateAsyncEmbeddingRequestInputType,
- model_name: str,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
@@ -151,15 +150,12 @@ def create(
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
**Values**:
- `audio`: Audio files
- `video`: Video content
- model_name : str
- The model you wish to use.
-
audio : typing.Optional[AudioInputRequest]
video : typing.Optional[VideoInputRequest]
@@ -187,7 +183,6 @@ def create(
)
client.embed.v_2.tasks.create(
input_type="audio",
- model_name="marengo3.0",
audio=AudioInputRequest(
media_source=MediaSource(
url="https://user-bucket.com/audio/long-audio.wav",
@@ -205,7 +200,7 @@ def create(
)
"""
_response = self._raw_client.create(
- input_type=input_type, model_name=model_name, audio=audio, video=video, request_options=request_options
+ input_type=input_type, audio=audio, video=video, request_options=request_options
)
return _response.data
@@ -352,7 +347,6 @@ async def create(
self,
*,
input_type: CreateAsyncEmbeddingRequestInputType,
- model_name: str,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
@@ -393,15 +387,12 @@ async def create(
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
**Values**:
- `audio`: Audio files
- `video`: Video content
- model_name : str
- The model you wish to use.
-
audio : typing.Optional[AudioInputRequest]
video : typing.Optional[VideoInputRequest]
@@ -434,7 +425,6 @@ async def create(
async def main() -> None:
await client.embed.v_2.tasks.create(
input_type="audio",
- model_name="marengo3.0",
audio=AudioInputRequest(
media_source=MediaSource(
url="https://user-bucket.com/audio/long-audio.wav",
@@ -455,7 +445,7 @@ async def main() -> None:
asyncio.run(main())
"""
_response = await self._raw_client.create(
- input_type=input_type, model_name=model_name, audio=audio, video=video, request_options=request_options
+ input_type=input_type, audio=audio, video=video, request_options=request_options
)
return _response.data
diff --git a/src/twelvelabs/embed/v_2/tasks/raw_client.py b/src/twelvelabs/embed/v_2/tasks/raw_client.py
index c559a01..9bc150e 100644
--- a/src/twelvelabs/embed/v_2/tasks/raw_client.py
+++ b/src/twelvelabs/embed/v_2/tasks/raw_client.py
@@ -131,7 +131,6 @@ def create(
self,
*,
input_type: CreateAsyncEmbeddingRequestInputType,
- model_name: str,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
@@ -172,15 +171,12 @@ def create(
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
**Values**:
- `audio`: Audio files
- `video`: Video content
- model_name : str
- The model you wish to use.
-
audio : typing.Optional[AudioInputRequest]
video : typing.Optional[VideoInputRequest]
@@ -198,13 +194,13 @@ def create(
method="POST",
json={
"input_type": input_type,
- "model_name": model_name,
"audio": convert_and_respect_annotation_metadata(
object_=audio, annotation=AudioInputRequest, direction="write"
),
"video": convert_and_respect_annotation_metadata(
object_=video, annotation=VideoInputRequest, direction="write"
),
+ "model_name": "marengo3.0",
},
headers={
"content-type": "application/json",
@@ -415,7 +411,6 @@ async def create(
self,
*,
input_type: CreateAsyncEmbeddingRequestInputType,
- model_name: str,
audio: typing.Optional[AudioInputRequest] = OMIT,
video: typing.Optional[VideoInputRequest] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
@@ -456,15 +451,12 @@ async def create(
Parameters
----------
input_type : CreateAsyncEmbeddingRequestInputType
- The type of content for which you wish to create embeddings.
+ The type of content for the embeddings.
**Values**:
- `audio`: Audio files
- `video`: Video content
- model_name : str
- The model you wish to use.
-
audio : typing.Optional[AudioInputRequest]
video : typing.Optional[VideoInputRequest]
@@ -482,13 +474,13 @@ async def create(
method="POST",
json={
"input_type": input_type,
- "model_name": model_name,
"audio": convert_and_respect_annotation_metadata(
object_=audio, annotation=AudioInputRequest, direction="write"
),
"video": convert_and_respect_annotation_metadata(
object_=video, annotation=VideoInputRequest, direction="write"
),
+ "model_name": "marengo3.0",
},
headers={
"content-type": "application/json",
diff --git a/src/twelvelabs/indexes/indexed_assets/client.py b/src/twelvelabs/indexes/indexed_assets/client.py
index 34e00c8..2c846cd 100644
--- a/src/twelvelabs/indexes/indexed_assets/client.py
+++ b/src/twelvelabs/indexes/indexed_assets/client.py
@@ -122,7 +122,7 @@ def list(
Filter by size. Expressed in bytes.
created_at : typing.Optional[str]
- Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+ Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
updated_at : typing.Optional[str]
This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time.
@@ -263,20 +263,20 @@ def retrieve(
"""
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
- **Common use cases**:
+ Use this method to:
- - Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+ - Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
- - Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+ - Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
- - Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+ - Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -301,7 +301,7 @@ def retrieve(
transcription : typing.Optional[bool]
- The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+ Specifies whether to retrieve a transcription of the spoken words.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -378,7 +378,7 @@ def update(
request_options: typing.Optional[RequestOptions] = None,
) -> None:
"""
- Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
Parameters
----------
@@ -525,7 +525,7 @@ async def list(
Filter by size. Expressed in bytes.
created_at : typing.Optional[str]
- Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+ Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
updated_at : typing.Optional[str]
This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time.
@@ -683,20 +683,20 @@ async def retrieve(
"""
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
- **Common use cases**:
+ Use this method to:
- - Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+ - Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
- - Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+ - Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
- - Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+ - Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -721,7 +721,7 @@ async def retrieve(
transcription : typing.Optional[bool]
- The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+ Specifies whether to retrieve a transcription of the spoken words.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -814,7 +814,7 @@ async def update(
request_options: typing.Optional[RequestOptions] = None,
) -> None:
"""
- Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
Parameters
----------
diff --git a/src/twelvelabs/indexes/indexed_assets/raw_client.py b/src/twelvelabs/indexes/indexed_assets/raw_client.py
index f82e1b6..d656030 100644
--- a/src/twelvelabs/indexes/indexed_assets/raw_client.py
+++ b/src/twelvelabs/indexes/indexed_assets/raw_client.py
@@ -120,7 +120,7 @@ def list(
Filter by size. Expressed in bytes.
created_at : typing.Optional[str]
- Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+ Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
updated_at : typing.Optional[str]
This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time.
@@ -333,20 +333,20 @@ def retrieve(
"""
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
- **Common use cases**:
+ Use this method to:
- - Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+ - Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
- - Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+ - Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
- - Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+ - Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -371,7 +371,7 @@ def retrieve(
transcription : typing.Optional[bool]
- The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+ Specifies whether to retrieve a transcription of the spoken words.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -481,7 +481,7 @@ def update(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[None]:
"""
- Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
Parameters
----------
@@ -625,7 +625,7 @@ async def list(
Filter by size. Expressed in bytes.
created_at : typing.Optional[str]
- Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets whose indexing tasks were created on the specified date at or after the given time.
+ Filter indexed assets by the creation date and time of their associated indexing tasks, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns indexed assets created on or after the specified date and time.
updated_at : typing.Optional[str]
This filter applies only to indexed assets updated using the [`PUT`](/v1.3/api-reference/videos/update) method of the `/indexes/{index-id}/indexed-assets/{indexed-asset-id}` endpoint. It filters indexed assets by the last update date and time, in the RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"). The platform returns the indexed assets that were last updated on the specified date at or after the given time.
@@ -841,20 +841,20 @@ async def retrieve(
"""
This method retrieves information about an indexed asset, including its status, metadata, and optional embeddings or transcription.
- **Common use cases**:
+ Use this method to:
- - Monitor indexing progress:
- - Call this endpoint after creating an indexed asset
- - Check the `status` field until it shows `ready`
- - Once ready, your content is available for search and analysis
+ - Monitor the indexing progress:
+ - Call this endpoint after creating an indexed asset
+ - Check the `status` field until it shows `ready`
+ - Once ready, your content is available for search and analysis
- - Retrieve asset metadata:
- - Retrieve system metadata (duration, resolution, filename)
- - Access user-defined metadata
+ - Retrieve the asset metadata:
+ - Retrieve system metadata (duration, resolution, filename)
+ - Access user-defined metadata
- - Retrieve embeddings:
- - Include the `embedding_option` parameter to retrieve video embeddings
- - Requires the Marengo video understanding model to be enabled in your index
+ - Retrieve the embeddings:
+ - Include the `embeddingOption` parameter to retrieve video embeddings
+ - Requires the Marengo video understanding model to be enabled in your index
- Retrieve transcriptions:
- Set the `transcription` parameter to `true` to retrieve spoken words from your video
@@ -879,7 +879,7 @@ async def retrieve(
transcription : typing.Optional[bool]
- The parameter indicates whether to retrieve a transcription of the spoken words for the indexed asset.
+ Specifies whether to retrieve a transcription of the spoken words.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -989,7 +989,7 @@ async def update(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[None]:
"""
- Use this method to update one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of an indexed asset. Also, can delete a field by setting it to `null`.
Parameters
----------
diff --git a/src/twelvelabs/indexes/videos/client.py b/src/twelvelabs/indexes/videos/client.py
index c2273e2..3ef476e 100644
--- a/src/twelvelabs/indexes/videos/client.py
+++ b/src/twelvelabs/indexes/videos/client.py
@@ -289,7 +289,7 @@ def update(
"""
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
- Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
Parameters
----------
@@ -632,7 +632,7 @@ async def update(
"""
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
- Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
Parameters
----------
diff --git a/src/twelvelabs/indexes/videos/raw_client.py b/src/twelvelabs/indexes/videos/raw_client.py
index 455986f..6f8c38b 100644
--- a/src/twelvelabs/indexes/videos/raw_client.py
+++ b/src/twelvelabs/indexes/videos/raw_client.py
@@ -348,7 +348,7 @@ def update(
"""
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
- Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
Parameters
----------
@@ -726,7 +726,7 @@ async def update(
"""
This method will be deprecated in a future version. New implementations should use the [Partial update indexed asset](/v1.3/api-reference/index-content/update) method.
- Use this method to update one or more fields of the metadata of a video. Also, can delete a field by setting it to null.
+ This method updates one or more fields of the metadata of a video. Also, can delete a field by setting it to `null`.
Parameters
----------
diff --git a/src/twelvelabs/multipart_upload/client.py b/src/twelvelabs/multipart_upload/client.py
index 569e97d..3c74333 100644
--- a/src/twelvelabs/multipart_upload/client.py
+++ b/src/twelvelabs/multipart_upload/client.py
@@ -102,7 +102,7 @@ def create(
Parameters
----------
filename : str
- Original filename of the asset
+ The original file name of the asset.
total_size : int
The total size of the file in bytes. The platform uses this value to:
@@ -144,14 +144,14 @@ def get_status(
"""
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
- Use this endpoint to:
+ Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+ You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
Parameters
----------
@@ -205,11 +205,10 @@ def report_chunk_batch(
request_options: typing.Optional[RequestOptions] = None,
) -> ReportChunkBatchResponse:
"""
- This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+ This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
Parameters
----------
@@ -393,7 +392,7 @@ async def create(
Parameters
----------
filename : str
- Original filename of the asset
+ The original file name of the asset.
total_size : int
The total size of the file in bytes. The platform uses this value to:
@@ -445,14 +444,14 @@ async def get_status(
"""
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
- Use this endpoint to:
+ Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+ You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
Parameters
----------
@@ -517,11 +516,10 @@ async def report_chunk_batch(
request_options: typing.Optional[RequestOptions] = None,
) -> ReportChunkBatchResponse:
"""
- This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+ This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
Parameters
----------
diff --git a/src/twelvelabs/multipart_upload/raw_client.py b/src/twelvelabs/multipart_upload/raw_client.py
index 35e4a10..37df0b5 100644
--- a/src/twelvelabs/multipart_upload/raw_client.py
+++ b/src/twelvelabs/multipart_upload/raw_client.py
@@ -149,7 +149,7 @@ def create(
Parameters
----------
filename : str
- Original filename of the asset
+ The original file name of the asset.
total_size : int
The total size of the file in bytes. The platform uses this value to:
@@ -238,14 +238,14 @@ def get_status(
"""
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
- Use this endpoint to:
+ Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+ You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
Parameters
----------
@@ -359,11 +359,10 @@ def report_chunk_batch(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[ReportChunkBatchResponse]:
"""
- This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+ This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
Parameters
----------
@@ -670,7 +669,7 @@ async def create(
Parameters
----------
filename : str
- Original filename of the asset
+ The original file name of the asset.
total_size : int
The total size of the file in bytes. The platform uses this value to:
@@ -759,14 +758,14 @@ async def get_status(
"""
This method provides information about an upload session, including its current status, chunk-level progress, and completion state.
- Use this endpoint to:
+ Use this method to:
- Verify upload completion (`status` = `completed`)
- Identify any failed chunks that require a retry
- Monitor the upload progress by comparing `uploaded_size` with `total_size`
- Determine if the session has expired
- Retrieve the status information for each chunk
- You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
+ You must call this method after reporting chunk completion to confirm the upload has transitioned to the `completed` status before using the asset.
Parameters
----------
@@ -883,11 +882,10 @@ async def report_chunk_batch(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[ReportChunkBatchResponse]:
"""
- This method notifies the platform which chunks have been successfully uploaded. When all chunks are reported, the platform finalizes the upload.
+ This method reports successfully uploaded chunks to the platform. The platform finalizes the upload after you report all chunks.
+
-
For optimal performance, report chunks in batches and in any order.
-
Parameters
----------
diff --git a/src/twelvelabs/types/asset.py b/src/twelvelabs/types/asset.py
index 97e6e26..816784c 100644
--- a/src/twelvelabs/types/asset.py
+++ b/src/twelvelabs/types/asset.py
@@ -32,10 +32,10 @@ class Asset(UniversalBaseModel):
status: typing.Optional[AssetStatus] = pydantic.Field(default=None)
"""
- Indicates the current state of the asset.
+ Indicates the current status of the asset.
**Values**:
- - `waiting`: The platform is preparing to process the upload
+ - `failed`: The platform failed to process the upload
- `processing`: The platform is processing the uploaded file
- `ready`: The asset is ready to use
"""
@@ -50,20 +50,6 @@ class Asset(UniversalBaseModel):
The MIME type of the asset file.
"""
- url: typing.Optional[str] = pydantic.Field(default=None)
- """
- The URL where you can access the asset file. Use this URL to preview or download the asset.
-
-
- This URL expires after the time specified in the `url_expires_at` field. After expiration, you must retrieve the asset again to obtain a new URL.
-
- """
-
- url_expires_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- The date and time, in RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"), when the URL expires. After this time, the URL in the `url` field becomes invalid. Retrieve the asset again to obtain a new URL.
- """
-
created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
"""
The date and time, in RFC 3339 format ("YYYY-MM-DDTHH:mm:ssZ"), when the asset was created.
diff --git a/src/twelvelabs/types/asset_status.py b/src/twelvelabs/types/asset_status.py
index afbbc51..80a1487 100644
--- a/src/twelvelabs/types/asset_status.py
+++ b/src/twelvelabs/types/asset_status.py
@@ -2,4 +2,4 @@
import typing
-AssetStatus = typing.Union[typing.Literal["waiting", "processing", "ready"], typing.Any]
+AssetStatus = typing.Union[typing.Literal["failed", "processing", "ready"], typing.Any]
diff --git a/src/twelvelabs/types/create_asset_upload_response.py b/src/twelvelabs/types/create_asset_upload_response.py
index ba9364c..43bbc80 100644
--- a/src/twelvelabs/types/create_asset_upload_response.py
+++ b/src/twelvelabs/types/create_asset_upload_response.py
@@ -26,12 +26,11 @@ class CreateAssetUploadResponse(UniversalBaseModel):
upload_urls: typing.Optional[typing.List[PresignedUrlChunk]] = pydantic.Field(default=None)
"""
- The initial set of presigned URLs for uploading chunks. Each URL corresponds to a specific chunk.
+ An array containing the initial set of presigned URLs for uploading chunks. Each URL corresponds to a specific chunk.
-