From 521192bfff0186993ee141f7e9e71e66c9fc2884 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Fri, 29 Nov 2024 11:20:32 +0000
Subject: [PATCH] SDK regeneration
---
poetry.lock | 261 +-
pyproject.toml | 2 +-
reference.md | 2937 ++++++++++-------
src/elevenlabs/__init__.py | 211 +-
src/elevenlabs/audio_isolation/client.py | 52 -
src/elevenlabs/base_client.py | 12 +-
src/elevenlabs/conversational_ai/__init__.py | 13 +
src/elevenlabs/conversational_ai/client.py | 2209 +++++++++++++
.../conversational_ai/types/__init__.py | 13 +
...nvai_agents_agent_id_patch_secrets_item.py | 48 +
src/elevenlabs/core/client_wrapper.py | 2 +-
src/elevenlabs/dubbing/client.py | 32 -
src/elevenlabs/history/__init__.py | 3 +
src/elevenlabs/history/client.py | 21 +
src/elevenlabs/history/types/__init__.py | 5 +
.../types/history_get_all_request_source.py | 5 +
src/elevenlabs/projects/__init__.py | 4 +-
src/elevenlabs/projects/client.py | 65 +-
src/elevenlabs/projects/types/__init__.py | 3 +-
.../types/projects_add_request_fiction.py | 5 +
src/elevenlabs/speech_to_speech/client.py | 80 +-
.../text_to_sound_effects/client.py | 34 -
src/elevenlabs/text_to_speech/client.py | 16 +-
src/elevenlabs/text_to_voice/__init__.py | 3 +
src/elevenlabs/text_to_voice/client.py | 79 +-
.../text_to_voice/types/__init__.py | 5 +
...e_create_previews_request_output_format.py | 20 +
src/elevenlabs/types/__init__.py | 196 +-
....py => add_agent_secret_response_model.py} | 10 +-
.../add_knowledge_base_response_model.py | 19 +
src/elevenlabs/types/agent_ban.py | 22 +
src/elevenlabs/types/agent_config.py | 30 +
src/elevenlabs/types/agent_config_override.py | 22 +
.../types/agent_metadata_response_model.py | 19 +
.../types/agent_platform_settings.py | 28 +
.../types/agent_summary_response_model.py | 21 +
src/elevenlabs/types/allowlist_item.py | 19 +
.../types/array_json_schema_property.py | 30 +
.../types/array_json_schema_property_items.py | 13 +
.../types/asr_conversational_config.py | 25 +
src/elevenlabs/types/asr_input_format.py | 7 +
src/elevenlabs/types/asr_provider.py | 5 +
src/elevenlabs/types/asr_quality.py | 5 +
src/elevenlabs/types/auth_settings.py | 22 +
src/elevenlabs/types/authorization_method.py | 7 +
src/elevenlabs/types/ban_reason_type.py | 5 +
src/elevenlabs/types/breakdown_types.py | 2 +-
src/elevenlabs/types/chapter_response.py | 1 +
src/elevenlabs/types/client_event.py | 21 +
src/elevenlabs/types/client_tool_config.py | 35 +
.../types/conv_ai_new_secret_config.py | 20 +
.../types/conv_ai_secret_locator.py | 23 +
.../types/conv_ai_stored_secret_config.py | 20 +
.../conversation_charging_common_model.py | 19 +
src/elevenlabs/types/conversation_config.py | 21 +
.../conversation_config_client_override.py | 22 +
...versation_history_analysis_common_model.py | 29 +
...evaluation_criteria_result_common_model.py | 22 +
...versation_history_metadata_common_model.py | 25 +
...rsation_history_transcript_common_model.py | 26 +
...on_history_transcript_common_model_role.py | 5 +
...story_transcript_tool_call_common_model.py | 22 +
...ory_transcript_tool_result_common_model.py | 23 +
.../conversation_initiation_client_data.py | 21 +
.../conversation_signed_url_response_model.py | 19 +
.../conversation_summary_response_model.py | 28 +
...versation_summary_response_model_status.py | 5 +
.../types/conversation_token_db_model.py | 23 +
.../types/conversation_token_purpose.py | 5 +
src/elevenlabs/types/conversational_config.py | 36 +
.../types/create_agent_response_model.py | 19 +
src/elevenlabs/types/custom_llm.py | 22 +
.../data_collection_result_common_model.py | 23 +
src/elevenlabs/types/embed_config.py | 38 +
src/elevenlabs/types/embed_config_avatar.py | 58 +
src/elevenlabs/types/embed_variant.py | 5 +
src/elevenlabs/types/evaluation_settings.py | 25 +
.../types/evaluation_success_result.py | 5 +
.../types/get_agent_embed_response_model.py | 21 +
.../types/get_agent_link_response_model.py | 21 +
.../types/get_agent_response_model.py | 36 +
.../types/get_agents_page_response_model.py | 22 +
.../types/get_conversation_response_model.py | 30 +
.../get_conversation_response_model_status.py | 5 +
.../get_conversations_page_response_model.py | 22 +
.../types/get_knowledge_base_reponse_model.py | 22 +
.../get_knowledge_base_reponse_model_type.py | 5 +
src/elevenlabs/types/image_avatar.py | 19 +
.../types/knowledge_base_locator.py | 22 +
.../types/knowledge_base_locator_type.py | 5 +
.../types/literal_json_schema_property.py | 21 +
.../literal_json_schema_property_type.py | 5 +
src/elevenlabs/types/llm.py | 21 +
.../types/object_json_schema_property.py | 29 +
...t_json_schema_property_properties_value.py | 13 +
src/elevenlabs/types/orb_avatar.py | 20 +
.../types/post_agent_avatar_response_model.py | 20 +
.../types/project_extended_response_model.py | 6 +
...response_model_apply_text_normalization.py | 7 +
...project_extended_response_model_fiction.py | 5 +
src/elevenlabs/types/project_response.py | 4 +
.../types/project_response_model_fiction.py | 5 +
src/elevenlabs/types/prompt_agent.py | 37 +
src/elevenlabs/types/prompt_agent_override.py | 19 +
.../types/prompt_agent_tools_item.py | 56 +
.../types/prompt_evaluation_criteria.py | 26 +
...ronunciation_dictionary_version_locator.py | 26 +
.../types/query_params_json_schema.py | 21 +
.../types/reader_resource_response_model.py | 21 +
...r_resource_response_model_resource_type.py | 5 +
...o_provider_response_model_provider_type.py | 5 -
.../types/tts_conversational_config.py | 31 +
.../tts_conversational_config_override.py | 19 +
.../types/tts_conversational_model.py | 5 +
.../types/tts_optimize_streaming_latency.py | 3 +
src/elevenlabs/types/tts_output_format.py | 7 +
src/elevenlabs/types/turn_config.py | 21 +
src/elevenlabs/types/turn_mode.py | 5 +
src/elevenlabs/types/url_avatar.py | 19 +
src/elevenlabs/types/user.py | 2 +
src/elevenlabs/types/voice.py | 1 +
.../types/voice_preview_response_model.py | 5 +-
.../types/voice_previews_response_model.py | 1 +
.../types/voice_sharing_response.py | 2 +
.../types/webhook_tool_api_schema_config.py | 40 +
.../webhook_tool_api_schema_config_method.py | 5 +
...api_schema_config_request_headers_value.py | 6 +
src/elevenlabs/types/webhook_tool_config.py | 34 +
src/elevenlabs/voice_generation/client.py | 4 +-
src/elevenlabs/workspace/client.py | 127 -
130 files changed, 6529 insertions(+), 1723 deletions(-)
create mode 100644 src/elevenlabs/conversational_ai/__init__.py
create mode 100644 src/elevenlabs/conversational_ai/client.py
create mode 100644 src/elevenlabs/conversational_ai/types/__init__.py
create mode 100644 src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py
create mode 100644 src/elevenlabs/history/types/__init__.py
create mode 100644 src/elevenlabs/history/types/history_get_all_request_source.py
create mode 100644 src/elevenlabs/projects/types/projects_add_request_fiction.py
create mode 100644 src/elevenlabs/text_to_voice/types/__init__.py
create mode 100644 src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py
rename src/elevenlabs/types/{sso_provider_response_model.py => add_agent_secret_response_model.py} (66%)
create mode 100644 src/elevenlabs/types/add_knowledge_base_response_model.py
create mode 100644 src/elevenlabs/types/agent_ban.py
create mode 100644 src/elevenlabs/types/agent_config.py
create mode 100644 src/elevenlabs/types/agent_config_override.py
create mode 100644 src/elevenlabs/types/agent_metadata_response_model.py
create mode 100644 src/elevenlabs/types/agent_platform_settings.py
create mode 100644 src/elevenlabs/types/agent_summary_response_model.py
create mode 100644 src/elevenlabs/types/allowlist_item.py
create mode 100644 src/elevenlabs/types/array_json_schema_property.py
create mode 100644 src/elevenlabs/types/array_json_schema_property_items.py
create mode 100644 src/elevenlabs/types/asr_conversational_config.py
create mode 100644 src/elevenlabs/types/asr_input_format.py
create mode 100644 src/elevenlabs/types/asr_provider.py
create mode 100644 src/elevenlabs/types/asr_quality.py
create mode 100644 src/elevenlabs/types/auth_settings.py
create mode 100644 src/elevenlabs/types/authorization_method.py
create mode 100644 src/elevenlabs/types/ban_reason_type.py
create mode 100644 src/elevenlabs/types/client_event.py
create mode 100644 src/elevenlabs/types/client_tool_config.py
create mode 100644 src/elevenlabs/types/conv_ai_new_secret_config.py
create mode 100644 src/elevenlabs/types/conv_ai_secret_locator.py
create mode 100644 src/elevenlabs/types/conv_ai_stored_secret_config.py
create mode 100644 src/elevenlabs/types/conversation_charging_common_model.py
create mode 100644 src/elevenlabs/types/conversation_config.py
create mode 100644 src/elevenlabs/types/conversation_config_client_override.py
create mode 100644 src/elevenlabs/types/conversation_history_analysis_common_model.py
create mode 100644 src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py
create mode 100644 src/elevenlabs/types/conversation_history_metadata_common_model.py
create mode 100644 src/elevenlabs/types/conversation_history_transcript_common_model.py
create mode 100644 src/elevenlabs/types/conversation_history_transcript_common_model_role.py
create mode 100644 src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py
create mode 100644 src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py
create mode 100644 src/elevenlabs/types/conversation_initiation_client_data.py
create mode 100644 src/elevenlabs/types/conversation_signed_url_response_model.py
create mode 100644 src/elevenlabs/types/conversation_summary_response_model.py
create mode 100644 src/elevenlabs/types/conversation_summary_response_model_status.py
create mode 100644 src/elevenlabs/types/conversation_token_db_model.py
create mode 100644 src/elevenlabs/types/conversation_token_purpose.py
create mode 100644 src/elevenlabs/types/conversational_config.py
create mode 100644 src/elevenlabs/types/create_agent_response_model.py
create mode 100644 src/elevenlabs/types/custom_llm.py
create mode 100644 src/elevenlabs/types/data_collection_result_common_model.py
create mode 100644 src/elevenlabs/types/embed_config.py
create mode 100644 src/elevenlabs/types/embed_config_avatar.py
create mode 100644 src/elevenlabs/types/embed_variant.py
create mode 100644 src/elevenlabs/types/evaluation_settings.py
create mode 100644 src/elevenlabs/types/evaluation_success_result.py
create mode 100644 src/elevenlabs/types/get_agent_embed_response_model.py
create mode 100644 src/elevenlabs/types/get_agent_link_response_model.py
create mode 100644 src/elevenlabs/types/get_agent_response_model.py
create mode 100644 src/elevenlabs/types/get_agents_page_response_model.py
create mode 100644 src/elevenlabs/types/get_conversation_response_model.py
create mode 100644 src/elevenlabs/types/get_conversation_response_model_status.py
create mode 100644 src/elevenlabs/types/get_conversations_page_response_model.py
create mode 100644 src/elevenlabs/types/get_knowledge_base_reponse_model.py
create mode 100644 src/elevenlabs/types/get_knowledge_base_reponse_model_type.py
create mode 100644 src/elevenlabs/types/image_avatar.py
create mode 100644 src/elevenlabs/types/knowledge_base_locator.py
create mode 100644 src/elevenlabs/types/knowledge_base_locator_type.py
create mode 100644 src/elevenlabs/types/literal_json_schema_property.py
create mode 100644 src/elevenlabs/types/literal_json_schema_property_type.py
create mode 100644 src/elevenlabs/types/llm.py
create mode 100644 src/elevenlabs/types/object_json_schema_property.py
create mode 100644 src/elevenlabs/types/object_json_schema_property_properties_value.py
create mode 100644 src/elevenlabs/types/orb_avatar.py
create mode 100644 src/elevenlabs/types/post_agent_avatar_response_model.py
create mode 100644 src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py
create mode 100644 src/elevenlabs/types/project_extended_response_model_fiction.py
create mode 100644 src/elevenlabs/types/project_response_model_fiction.py
create mode 100644 src/elevenlabs/types/prompt_agent.py
create mode 100644 src/elevenlabs/types/prompt_agent_override.py
create mode 100644 src/elevenlabs/types/prompt_agent_tools_item.py
create mode 100644 src/elevenlabs/types/prompt_evaluation_criteria.py
create mode 100644 src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py
create mode 100644 src/elevenlabs/types/query_params_json_schema.py
create mode 100644 src/elevenlabs/types/reader_resource_response_model.py
create mode 100644 src/elevenlabs/types/reader_resource_response_model_resource_type.py
delete mode 100644 src/elevenlabs/types/sso_provider_response_model_provider_type.py
create mode 100644 src/elevenlabs/types/tts_conversational_config.py
create mode 100644 src/elevenlabs/types/tts_conversational_config_override.py
create mode 100644 src/elevenlabs/types/tts_conversational_model.py
create mode 100644 src/elevenlabs/types/tts_optimize_streaming_latency.py
create mode 100644 src/elevenlabs/types/tts_output_format.py
create mode 100644 src/elevenlabs/types/turn_config.py
create mode 100644 src/elevenlabs/types/turn_mode.py
create mode 100644 src/elevenlabs/types/url_avatar.py
create mode 100644 src/elevenlabs/types/webhook_tool_api_schema_config.py
create mode 100644 src/elevenlabs/types/webhook_tool_api_schema_config_method.py
create mode 100644 src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py
create mode 100644 src/elevenlabs/types/webhook_tool_config.py
diff --git a/poetry.lock b/poetry.lock
index a4c918e..31db4ba 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -199,13 +199,13 @@ files = [
[[package]]
name = "httpcore"
-version = "1.0.6"
+version = "1.0.7"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"},
- {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"},
+ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
+ {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
]
[package.dependencies]
@@ -220,13 +220,13 @@ trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
-version = "0.27.2"
+version = "0.28.0"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
- {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
+ {file = "httpx-0.28.0-py3-none-any.whl", hash = "sha256:dc0b419a0cfeb6e8b34e85167c0da2671206f5095f1baa9663d23bcfd6b535fc"},
+ {file = "httpx-0.28.0.tar.gz", hash = "sha256:0858d3bab51ba7e386637f22a61d8ccddaeec5f3fe4209da3a6168dbb91573e0"},
]
[package.dependencies]
@@ -234,7 +234,6 @@ anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
-sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
@@ -327,13 +326,13 @@ files = [
[[package]]
name = "packaging"
-version = "24.1"
+version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
- {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
+ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
+ {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
[[package]]
@@ -364,6 +363,8 @@ files = [
{file = "PyAudio-0.2.14-cp311-cp311-win_amd64.whl", hash = "sha256:bbeb01d36a2f472ae5ee5e1451cacc42112986abe622f735bb870a5db77cf903"},
{file = "PyAudio-0.2.14-cp312-cp312-win32.whl", hash = "sha256:5fce4bcdd2e0e8c063d835dbe2860dac46437506af509353c7f8114d4bacbd5b"},
{file = "PyAudio-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:12f2f1ba04e06ff95d80700a78967897a489c05e093e3bffa05a84ed9c0a7fa3"},
+ {file = "PyAudio-0.2.14-cp313-cp313-win32.whl", hash = "sha256:95328285b4dab57ea8c52a4a996cb52be6d629353315be5bfda403d15932a497"},
+ {file = "PyAudio-0.2.14-cp313-cp313-win_amd64.whl", hash = "sha256:692d8c1446f52ed2662120bcd9ddcb5aa2b71f38bda31e58b19fb4672fffba69"},
{file = "PyAudio-0.2.14-cp38-cp38-win32.whl", hash = "sha256:858caf35b05c26d8fc62f1efa2e8f53d5fa1a01164842bd622f70ddc41f55000"},
{file = "PyAudio-0.2.14-cp38-cp38-win_amd64.whl", hash = "sha256:2dac0d6d675fe7e181ba88f2de88d321059b69abd52e3f4934a8878e03a7a074"},
{file = "PyAudio-0.2.14-cp39-cp39-win32.whl", hash = "sha256:f745109634a7c19fa4d6b8b7d6967c3123d988c9ade0cd35d4295ee1acdb53e9"},
@@ -376,22 +377,19 @@ test = ["numpy"]
[[package]]
name = "pydantic"
-version = "2.9.2"
+version = "2.10.2"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
- {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
+ {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"},
+ {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
-pydantic-core = "2.23.4"
-typing-extensions = [
- {version = ">=4.12.2", markers = "python_version >= \"3.13\""},
- {version = ">=4.6.1", markers = "python_version < \"3.13\""},
-]
+pydantic-core = "2.27.1"
+typing-extensions = ">=4.12.2"
[package.extras]
email = ["email-validator (>=2.0.0)"]
@@ -399,100 +397,111 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.23.4"
+version = "2.27.1"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
- {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
- {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
- {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
- {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
- {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
- {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
- {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
- {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
- {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
- {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
- {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
- {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
- {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
- {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
- {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
- {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
- {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
- {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
- {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
- {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
- {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
- {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
- {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
- {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
- {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
- {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
- {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
- {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
- {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
- {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
- {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
- {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
- {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
- {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
- {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
- {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
- {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
- {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
- {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
- {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
- {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
- {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
- {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
- {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"},
+ {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"},
+ {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"},
+ {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"},
+ {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"},
+ {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"},
+ {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"},
+ {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"},
+ {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"},
+ {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"},
+ {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"},
+ {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"},
+ {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"},
+ {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"},
+ {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"},
+ {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"},
+ {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"},
+ {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"},
+ {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"},
+ {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"},
+ {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"},
+ {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"},
+ {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"},
+ {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"},
+ {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"},
]
[package.dependencies]
@@ -624,13 +633,43 @@ files = [
[[package]]
name = "tomli"
-version = "2.0.2"
+version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
- {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
- {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
+ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
+ {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
+ {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
+ {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
+ {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
+ {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
+ {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
+ {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
+ {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
+ {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index 287f8ae..3a07be8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "elevenlabs"
-version = "1.13.0"
+version = "1.13.1"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 3fa8ba0..5ab4d72 100644
--- a/reference.md
+++ b/reference.md
@@ -75,6 +75,22 @@ client.history.get_all(
-
+**search:** `typing.Optional[str]` — search term used for filtering
+
+
+
+
+
+-
+
+**source:** `typing.Optional[HistoryGetAllRequestSource]` — Source of the generated history item
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -376,235 +392,7 @@ client.history.download(
## TextToSoundEffects
-client.text_to_sound_effects.convert(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Converts a text of your choice into sound
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.text_to_sound_effects.convert(
- text="string",
- duration_seconds=1.1,
- prompt_influence=1.1,
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**text:** `str` — The text that will get converted into a sound effect.
-
-
-
-
-
--
-
-**duration_seconds:** `typing.Optional[float]` — The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None.
-
-
-
-
-
--
-
-**prompt_influence:** `typing.Optional[float]` — A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3.
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
-
-
-
-
-
-
-
-
-
-
-
## AudioIsolation
-client.audio_isolation.audio_isolation(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Removes background noise from audio
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.audio_isolation.audio_isolation()
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**audio:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
-
-
-
-
-
-
-
-
-
-
-
-client.audio_isolation.audio_isolation_stream(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Removes background noise from audio and streams the result
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.audio_isolation.audio_isolation_stream()
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**audio:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
-
-
-
-
-
-
-
-
-
-
-
## Samples
client.samples.delete(...)
@@ -895,7 +683,7 @@ client.text_to_speech.convert(
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1086,7 +874,7 @@ client.text_to_speech.convert_with_timestamps(
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1286,7 +1074,7 @@ client.text_to_speech.convert_as_stream(
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1479,7 +1267,7 @@ client.text_to_speech.stream_with_timestamps(
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1550,7 +1338,8 @@ client.text_to_speech.stream_with_timestamps(
## SpeechToSpeech
-client.speech_to_speech.convert(...)
+## VoiceGeneration
+client.voice_generation.generate_parameters()
-
@@ -1562,7 +1351,7 @@ client.text_to_speech.stream_with_timestamps(
-
-Create speech by combining the content and emotion of the uploaded audio with a voice of your choice.
+Get possible parameters for the /v1/voice-generation/generate-voice endpoint.
@@ -1582,12 +1371,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.speech_to_speech.convert(
- voice_id="string",
- enable_logging=True,
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
-)
+client.voice_generation.generate_parameters()
```
@@ -1603,49 +1387,81 @@ client.speech_to_speech.convert(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-
-**audio:** `from __future__ import annotations
-core.File` — See core.File for more documentation
-
+
+client.voice_generation.generate(...)
-
-**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
-
-
-
+#### 📝 Description
-
-**optimize_streaming_latency:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
-
-
-
-
-
-**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio.
-
+Generate a random voice based on parameters. This method returns a generated_voice_id in the response header, and a sample of the voice in the body. If you like the generated voice call /v1/voice-generation/create-voice with the generated_voice_id to create the voice.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.voice_generation.generate(
+ gender="female",
+ accent="american",
+ age="middle_aged",
+ accent_strength=2.0,
+ text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**gender:** `Gender` — Category code corresponding to the gender of the generated voice. Possible values: female, male.
+
-
-**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
+**accent:** `str` — Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian.
@@ -1653,7 +1469,7 @@ core.File` — See core.File for more documentation
-
-**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
+**age:** `Age` — Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old.
@@ -1661,7 +1477,7 @@ core.File` — See core.File for more documentation
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**accent_strength:** `float` — The strength of the accent of the generated voice. Has to be between 0.3 and 2.0.
@@ -1669,7 +1485,7 @@ core.File` — See core.File for more documentation
-
-**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
+**text:** `str` — Text to generate, text length has to be between 100 and 1000.
@@ -1689,7 +1505,7 @@ core.File` — See core.File for more documentation
-client.speech_to_speech.convert_as_stream(...)
+client.voice_generation.create_a_previously_generated_voice(...)
-
@@ -1701,7 +1517,7 @@ core.File` — See core.File for more documentation
-
-Create speech by combining the content and emotion of the uploaded audio with a voice of your choice and returns an audio stream.
+Create a previously generated voice. This endpoint should be called after you fetched a generated_voice_id using /v1/voice-generation/generate-voice.
@@ -1721,11 +1537,10 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.speech_to_speech.convert_as_stream(
- voice_id="string",
- enable_logging="0",
- optimize_streaming_latency="mp3_22050_32",
- output_format="string",
+client.voice_generation.create_a_previously_generated_voice(
+ voice_name="Alex",
+ voice_description="Middle-aged American woman",
+ generated_voice_id="rbVJFu6SGRD1dbWpKnWl",
)
```
@@ -1742,54 +1557,7 @@ client.speech_to_speech.convert_as_stream(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
-
-
-
-
--
-
-**audio:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**enable_logging:** `typing.Optional[OptimizeStreamingLatency]` — You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
-
-
-
-
-
--
-
-**optimize_streaming_latency:** `typing.Optional[OutputFormat]` — The output format of the generated audio.
-
-
-
-
-
--
-
-**output_format:** `typing.Optional[str]`
-
-Output format of the generated audio. Must be one of:
-mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
-mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
-mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
-mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
-mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
-mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
-pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
-pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
-pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
-pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
-ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+**voice_name:** `str` — Name to use for the created voice.
@@ -1797,7 +1565,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-
-**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
+**voice_description:** `str` — Description to use for the created voice.
@@ -1805,7 +1573,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-
-**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
+**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
@@ -1813,7 +1581,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF.
@@ -1821,7 +1589,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-
-**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
+**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
@@ -1829,7 +1597,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -1841,8 +1609,8 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-## VoiceGeneration
-client.voice_generation.generate_parameters()
+## TextToVoice
+client.text_to_voice.create_previews(...)
-
@@ -1854,7 +1622,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-
-Get possible parameters for the /v1/voice-generation/generate-voice endpoint.
+Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
@@ -1874,7 +1642,10 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voice_generation.generate_parameters()
+client.text_to_voice.create_previews(
+ voice_description="A sassy little squeaky mouse",
+ text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.",
+)
```
@@ -1890,6 +1661,51 @@ client.voice_generation.generate_parameters()
-
+**voice_description:** `str` — Description to use for the created voice.
+
+
+
+
+
+-
+
+**text:** `str` — Text to generate, text length has to be between 100 and 1000.
+
+
+
+
+
+-
+
+**output_format:** `typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]`
+
+Output format of the generated audio. Must be one of:
+mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
+mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
+mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
+mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
+mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
+mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
+pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
+pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
+pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
+pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
+ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+
+
+
+
+
+-
+
+**auto_generate_text:** `typing.Optional[bool]` — Whether to automatically generate a text suitable for the voice description.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -1902,7 +1718,7 @@ client.voice_generation.generate_parameters()
-client.voice_generation.generate(...)
+client.text_to_voice.create_voice_from_preview(...)
-
@@ -1914,7 +1730,7 @@ client.voice_generation.generate_parameters()
-
-Generate a random voice based on parameters. This method returns a generated_voice_id in the response header, and a sample of the voice in the body. If you like the generated voice call /v1/voice-generation/create-voice with the generated_voice_id to create the voice.
+Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews.
@@ -1934,12 +1750,10 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voice_generation.generate(
- gender="female",
- accent="american",
- age="middle_aged",
- accent_strength=2.0,
- text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”",
+client.text_to_voice.create_voice_from_preview(
+ voice_name="Little squeaky mouse",
+ voice_description="A sassy little squeaky mouse",
+ generated_voice_id="37HceQefKmEi3bGovXjL",
)
```
@@ -1956,7 +1770,7 @@ client.voice_generation.generate(
-
-**gender:** `Gender` — Category code corresponding to the gender of the generated voice. Possible values: female, male.
+**voice_name:** `str` — Name to use for the created voice.
@@ -1964,7 +1778,7 @@ client.voice_generation.generate(
-
-**accent:** `str` — Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian.
+**voice_description:** `str` — Description to use for the created voice.
@@ -1972,7 +1786,7 @@ client.voice_generation.generate(
-
-**age:** `Age` — Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old.
+**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
@@ -1980,7 +1794,7 @@ client.voice_generation.generate(
-
-**accent_strength:** `float` — The strength of the accent of the generated voice. Has to be between 0.3 and 2.0.
+**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
@@ -1988,7 +1802,7 @@ client.voice_generation.generate(
-
-**text:** `str` — Text to generate, text length has to be between 100 and 1000.
+**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF.
@@ -1996,7 +1810,7 @@ client.voice_generation.generate(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2008,7 +1822,8 @@ client.voice_generation.generate(
-client.voice_generation.create_a_previously_generated_voice(...)
+## User
+client.user.get_subscription()
-
@@ -2020,7 +1835,7 @@ client.voice_generation.generate(
-
-Create a previously generated voice. This endpoint should be called after you fetched a generated_voice_id using /v1/voice-generation/generate-voice.
+Gets extended information about the users subscription
@@ -2040,11 +1855,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voice_generation.create_a_previously_generated_voice(
- voice_name="Alex",
- voice_description="Middle-aged American woman",
- generated_voice_id="rbVJFu6SGRD1dbWpKnWl",
-)
+client.user.get_subscription()
```
@@ -2060,42 +1871,62 @@ client.voice_generation.create_a_previously_generated_voice(
-
-**voice_name:** `str` — Name to use for the created voice.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**voice_description:** `str` — Description to use for the created voice.
-
+
+client.user.get()
-
-**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
-
-
-
+#### 📝 Description
-
-**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF.
-
+
+-
+
+Gets information about the user
+
+
+#### 🔌 Usage
+
-
-**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
-
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.user.get()
+
+```
+
+
+
+#### ⚙️ Parameters
+
+
+-
-
@@ -2112,8 +1943,8 @@ client.voice_generation.create_a_previously_generated_voice(
-## TextToVoice
-client.text_to_voice.create_previews(...)
+## voices
+client.voices.get_all(...)
-
@@ -2125,7 +1956,7 @@ client.voice_generation.create_a_previously_generated_voice(
-
-Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
+Gets a list of all available voices for a user.
@@ -2145,10 +1976,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.text_to_voice.create_previews(
- voice_description="voice_description",
- text="text",
-)
+client.voices.get_all()
```
@@ -2164,15 +1992,7 @@ client.text_to_voice.create_previews(
-
-**voice_description:** `str` — Description to use for the created voice.
-
-
-
-
-
--
-
-**text:** `str` — Text to generate, text length has to be between 100 and 1000.
+**show_legacy:** `typing.Optional[bool]` — If set to true, legacy premade voices will be included in responses from /v1/voices
@@ -2192,7 +2012,7 @@ client.text_to_voice.create_previews(
-client.text_to_voice.create_voice_from_preview(...)
+client.voices.get_default_settings()
-
@@ -2204,7 +2024,7 @@ client.text_to_voice.create_previews(
-
-Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews.
+Gets the default settings for voices. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
@@ -2224,11 +2044,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.text_to_voice.create_voice_from_preview(
- voice_name="voice_name",
- voice_description="voice_description",
- generated_voice_id="generated_voice_id",
-)
+client.voices.get_default_settings()
```
@@ -2244,46 +2060,6 @@ client.text_to_voice.create_voice_from_preview(
-
-**voice_name:** `str` — Name to use for the created voice.
-
-
-
-
-
--
-
-**voice_description:** `str` — Description to use for the created voice.
-
-
-
-
-
--
-
-**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
-
-
-
-
-
--
-
-**labels:** `typing.Optional[typing.Dict[str, str]]` — Optional, metadata to add to the created voice. Defaults to None.
-
-
-
-
-
--
-
-**played_not_selected_voice_ids:** `typing.Optional[typing.Sequence[str]]` — List of voice ids that the user has played but not selected. Used for RLHF.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2296,8 +2072,7 @@ client.text_to_voice.create_voice_from_preview(
-## User
-client.user.get_subscription()
+client.voices.get_settings(...)
-
@@ -2309,7 +2084,7 @@ client.text_to_voice.create_voice_from_preview(
-
-Gets extended information about the users subscription
+Returns the settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
@@ -2329,7 +2104,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.user.get_subscription()
+client.voices.get_settings(
+ voice_id="2EiwWnXFnvU5JabPnv8n",
+)
```
@@ -2345,6 +2122,14 @@ client.user.get_subscription()
-
+**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2357,7 +2142,7 @@ client.user.get_subscription()
-client.user.get()
+client.voices.get(...)
-
@@ -2369,7 +2154,7 @@ client.user.get_subscription()
-
-Gets information about the user
+Returns metadata about a specific voice.
@@ -2389,7 +2174,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.user.get()
+client.voices.get(
+ voice_id="29vD33N1CtxCmqQRPOHJ",
+)
```
@@ -2405,6 +2192,22 @@ client.user.get()
-
+**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+
+
+
+
+
+-
+
+**with_settings:** `typing.Optional[bool]` — If set will return settings information corresponding to the voice, requires authorization.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2417,8 +2220,7 @@ client.user.get()
-## voices
-client.voices.get_all(...)
+client.voices.delete(...)
-
@@ -2430,7 +2232,7 @@ client.user.get()
-
-Gets a list of all available voices for a user.
+Deletes a voice by its ID.
@@ -2450,7 +2252,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.get_all()
+client.voices.delete(
+ voice_id="29vD33N1CtxCmqQRPOHJ",
+)
```
@@ -2466,7 +2270,7 @@ client.voices.get_all()
-
-**show_legacy:** `typing.Optional[bool]` — If set to true, legacy premade voices will be included in responses from /v1/voices
+**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
@@ -2486,7 +2290,7 @@ client.voices.get_all()
-client.voices.get_default_settings()
+client.voices.edit_settings(...)
-
@@ -2498,7 +2302,7 @@ client.voices.get_all()
-
-Gets the default settings for voices. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
+Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
@@ -2513,12 +2317,19 @@ Gets the default settings for voices. "similarity_boost" corresponds to"Clarity
-
```python
-from elevenlabs import ElevenLabs
+from elevenlabs import ElevenLabs, VoiceSettings
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.get_default_settings()
+client.voices.edit_settings(
+ voice_id="29vD33N1CtxCmqQRPOHJ",
+ request=VoiceSettings(
+ stability=0.1,
+ similarity_boost=0.3,
+ style=0.2,
+ ),
+)
```
@@ -2534,6 +2345,22 @@ client.voices.get_default_settings()
-
+**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+
+
+
+
+
+-
+
+**request:** `VoiceSettings`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2546,7 +2373,7 @@ client.voices.get_default_settings()
-client.voices.get_settings(...)
+client.voices.add(...)
-
@@ -2558,7 +2385,7 @@ client.voices.get_default_settings()
-
-Returns the settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
+Add a new voice to your collection of voices in VoiceLab.
@@ -2578,8 +2405,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.get_settings(
- voice_id="2EiwWnXFnvU5JabPnv8n",
+client.voices.add(
+ name="Alex",
)
```
@@ -2596,7 +2423,41 @@ client.voices.get_settings(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website.
+
+
+
+
+
+-
+
+**files:** `from __future__ import annotations
+
+typing.List[core.File]` — See core.File for more documentation
+
+
+
+
+
+-
+
+**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — How would you describe the voice?
+
+
+
+
+
+-
+
+**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice.
@@ -2616,7 +2477,7 @@ client.voices.get_settings(
-client.voices.get(...)
+client.voices.edit(...)
-
@@ -2628,7 +2489,7 @@ client.voices.get_settings(
-
-Returns metadata about a specific voice.
+Edit a voice created by you.
@@ -2648,8 +2509,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.get(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+client.voices.edit(
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ name="George",
)
```
@@ -2674,7 +2536,41 @@ client.voices.get(
-
-**with_settings:** `typing.Optional[bool]` — If set will return settings information corresponding to the voice, requires authorization.
+**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website.
+
+
+
+
+
+-
+
+**files:** `from __future__ import annotations
+
+typing.Optional[typing.List[core.File]]` — See core.File for more documentation
+
+
+
+
+
+-
+
+**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — How would you describe the voice?
+
+
+
+
+
+-
+
+**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice.
@@ -2694,7 +2590,7 @@ client.voices.get(
-client.voices.delete(...)
+client.voices.add_sharing_voice(...)
-
@@ -2706,7 +2602,7 @@ client.voices.get(
-
-Deletes a voice by its ID.
+Add a sharing voice to your collection of voices in VoiceLab.
@@ -2726,8 +2622,10 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.delete(
- voice_id="29vD33N1CtxCmqQRPOHJ",
+client.voices.add_sharing_voice(
+ public_user_id="63e84100a6bf7874ba37a1bab9a31828a379ec94b891b401653b655c5110880f",
+ voice_id="sB1b5zUrxQVAFl2PhZFp",
+ new_name="Alita",
)
```
@@ -2744,6 +2642,14 @@ client.voices.delete(
-
+**public_user_id:** `str` — Public user ID used to publicly identify ElevenLabs users.
+
+
+
+
+
+-
+
**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
@@ -2752,6 +2658,14 @@ client.voices.delete(
-
+**new_name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2764,7 +2678,7 @@ client.voices.delete(
-client.voices.edit_settings(...)
+client.voices.get_shared(...)
-
@@ -2776,7 +2690,7 @@ client.voices.delete(
-
-Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
+Gets a list of shared voices.
@@ -2791,18 +2705,15 @@ Edit your settings for a specific voice. "similarity_boost" corresponds to"Clari
-
```python
-from elevenlabs import ElevenLabs, VoiceSettings
+from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.edit_settings(
- voice_id="29vD33N1CtxCmqQRPOHJ",
- request=VoiceSettings(
- stability=0.1,
- similarity_boost=0.3,
- style=0.2,
- ),
+client.voices.get_shared(
+ page_size=1,
+ gender="female",
+ language="en",
)
```
@@ -2819,7 +2730,7 @@ client.voices.edit_settings(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+**page_size:** `typing.Optional[int]` — How many shared voices to return at maximum. Can not exceed 100, defaults to 30.
@@ -2827,7 +2738,7 @@ client.voices.edit_settings(
-
-**request:** `VoiceSettings`
+**category:** `typing.Optional[str]` — voice category used for filtering
@@ -2835,69 +2746,63 @@ client.voices.edit_settings(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**gender:** `typing.Optional[str]` — gender used for filtering
-
-
+
+-
+**age:** `typing.Optional[str]` — age used for filtering
+
-
-client.voices.add(...)
-
-#### 📝 Description
-
-
--
+**accent:** `typing.Optional[str]` — accent used for filtering
+
+
+
-
-Add a new voice to your collection of voices in VoiceLab.
-
-
+**language:** `typing.Optional[str]` — language used for filtering
+
-#### 🔌 Usage
-
-
+**search:** `typing.Optional[str]` — search term used for filtering
+
+
+
+
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.voices.add(
- name="Alex",
-)
-
-```
-
-
+**use_cases:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — use-case used for filtering
+
-#### ⚙️ Parameters
-
-
+**descriptives:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — search term used for filtering
+
+
+
+
-
-**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website.
+**featured:** `typing.Optional[bool]` — Filter featured voices
@@ -2905,9 +2810,7 @@ client.voices.add(
-
-**files:** `from __future__ import annotations
-
-typing.List[core.File]` — See core.File for more documentation
+**reader_app_enabled:** `typing.Optional[bool]` — Filter voices that are enabled for the reader app
@@ -2915,7 +2818,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+**owner_id:** `typing.Optional[str]` — Filter voices by public owner ID
@@ -2923,7 +2826,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**description:** `typing.Optional[str]` — How would you describe the voice?
+**sort:** `typing.Optional[str]` — sort criteria
@@ -2931,7 +2834,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice.
+**page:** `typing.Optional[int]`
@@ -2951,7 +2854,7 @@ typing.List[core.File]` — See core.File for more documentation
-client.voices.edit(...)
+client.voices.get_similar_library_voices(...)
-
@@ -2963,7 +2866,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-Edit a voice created by you.
+Returns a list of shared voices similar to the provided audio sample. If neither similarity_threshold nor top_k is provided, we will apply default values.
@@ -2983,10 +2886,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.edit(
- voice_id="JBFqnCBsd6RMkjVDRZzb",
- name="George",
-)
+client.voices.get_similar_library_voices()
```
@@ -3002,33 +2902,9 @@ client.voices.edit(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
-
-
-
-
--
-
-**name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website.
-
-
-
-
-
--
-
-**files:** `from __future__ import annotations
-
-typing.Optional[typing.List[core.File]]` — See core.File for more documentation
-
-
-
-
-
--
+**audio_file:** `from __future__ import annotations
-**remove_background_noise:** `typing.Optional[bool]` — If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
+typing.Optional[core.File]` — See core.File for more documentation
@@ -3036,7 +2912,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
-**description:** `typing.Optional[str]` — How would you describe the voice?
+**similarity_threshold:** `typing.Optional[float]` — Threshold for voice similarity between provided sample and library voices. Must be in range <0, 2>. The smaller the value the more similar voices will be returned.
@@ -3044,7 +2920,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
-**labels:** `typing.Optional[str]` — Serialized labels dictionary for the voice.
+**top_k:** `typing.Optional[int]` — Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Must be in range <1, 100>.
@@ -3064,7 +2940,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-client.voices.add_sharing_voice(...)
+client.voices.get_a_profile_page(...)
-
@@ -3076,7 +2952,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
-Add a sharing voice to your collection of voices in VoiceLab.
+Gets a profile page based on a handle
@@ -3096,10 +2972,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.add_sharing_voice(
- public_user_id="63e84100a6bf7874ba37a1bab9a31828a379ec94b891b401653b655c5110880f",
- voice_id="sB1b5zUrxQVAFl2PhZFp",
- new_name="Alita",
+client.voices.get_a_profile_page(
+ handle="talexgeorge",
)
```
@@ -3116,7 +2990,7 @@ client.voices.add_sharing_voice(
-
-**public_user_id:** `str` — Public user ID used to publicly identify ElevenLabs users.
+**handle:** `str` — Handle for a VA's profile page
@@ -3124,19 +2998,64 @@ client.voices.add_sharing_voice(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+## Projects
+client.projects.get_all()
+
+-
+
+#### 📝 Description
-
-**new_name:** `str` — The name that identifies this voice. This will be displayed in the dropdown of the website.
-
+
+-
+
+Returns a list of your projects together and its metadata.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.projects.get_all()
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
@@ -3152,7 +3071,7 @@ client.voices.add_sharing_voice(
-client.voices.get_shared(...)
+client.projects.add(...)
-
@@ -3164,7 +3083,7 @@ client.voices.add_sharing_voice(
-
-Gets a list of shared voices.
+Creates a new project, it can be either initialized as blank, from a document or from a URL.
@@ -3184,10 +3103,11 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.get_shared(
- page_size=1,
- gender="female",
- language="en",
+client.projects.add(
+ name="name",
+ default_title_voice_id="default_title_voice_id",
+ default_paragraph_voice_id="default_paragraph_voice_id",
+ default_model_id="default_model_id",
)
```
@@ -3204,7 +3124,7 @@ client.voices.get_shared(
-
-**page_size:** `typing.Optional[int]` — How many shared voices to return at maximum. Can not exceed 100, defaults to 30.
+**name:** `str` — The name of the project, used for identification only.
@@ -3212,7 +3132,7 @@ client.voices.get_shared(
-
-**category:** `typing.Optional[str]` — voice category used for filtering
+**default_title_voice_id:** `str` — The voice_id that corresponds to the default voice used for new titles.
@@ -3220,7 +3140,7 @@ client.voices.get_shared(
-
-**gender:** `typing.Optional[str]` — gender used for filtering
+**default_paragraph_voice_id:** `str` — The voice_id that corresponds to the default voice used for new paragraphs.
@@ -3228,7 +3148,7 @@ client.voices.get_shared(
-
-**age:** `typing.Optional[str]` — age used for filtering
+**default_model_id:** `str` — The model_id of the model to be used for this project, you can query GET https://api.elevenlabs.io/v1/models to list all available models.
@@ -3236,7 +3156,7 @@ client.voices.get_shared(
-
-**accent:** `typing.Optional[str]` — accent used for filtering
+**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
@@ -3244,7 +3164,9 @@ client.voices.get_shared(
-
-**language:** `typing.Optional[str]` — language used for filtering
+**from_document:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -3252,7 +3174,14 @@ client.voices.get_shared(
-
-**search:** `typing.Optional[str]` — search term used for filtering
+**quality_preset:** `typing.Optional[str]`
+
+Output quality of the generated audio. Must be one of:
+standard - standard output format, 128kbps with 44.1kHz sample rate.
+high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%.
+ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%.
+ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%.
+
@@ -3260,7 +3189,7 @@ client.voices.get_shared(
-
-**use_cases:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — use-case used for filtering
+**title:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
@@ -3268,7 +3197,7 @@ client.voices.get_shared(
-
-**descriptives:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — search term used for filtering
+**author:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
@@ -3276,7 +3205,7 @@ client.voices.get_shared(
-
-**featured:** `typing.Optional[bool]` — Filter featured voices
+**description:** `typing.Optional[str]` — An optional description of the project.
@@ -3284,7 +3213,7 @@ client.voices.get_shared(
-
-**reader_app_enabled:** `typing.Optional[bool]` — Filter voices that are enabled for the reader app
+**genres:** `typing.Optional[typing.List[str]]` — An optional list of genres associated with the project.
@@ -3292,7 +3221,7 @@ client.voices.get_shared(
-
-**owner_id:** `typing.Optional[str]` — Filter voices by public owner ID
+**target_audience:** `typing.Optional[ProjectsAddRequestTargetAudience]` — An optional target audience of the project.
@@ -3300,7 +3229,7 @@ client.voices.get_shared(
-
-**sort:** `typing.Optional[str]` — sort criteria
+**language:** `typing.Optional[str]` — An optional language of the project. Two-letter language code (ISO 639-1).
@@ -3308,7 +3237,7 @@ client.voices.get_shared(
-
-**page:** `typing.Optional[int]`
+**content_type:** `typing.Optional[str]` — An optional content type of the project.
@@ -3316,69 +3245,47 @@ client.voices.get_shared(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**original_publication_date:** `typing.Optional[str]` — An optional original publication date of the project, in the format YYYY-MM-DD or YYYY.
-
-
+
+-
+**mature_content:** `typing.Optional[bool]` — An optional mature content of the project.
+
-
-client.voices.get_similar_library_voices(...)
-
-#### 📝 Description
+**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
+
+
+
-
+**acx_volume_normalization:** `typing.Optional[bool]` — [Deprecated] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+
+
+
+
-
-Returns a list of shared voices similar to the provided audio sample. If neither similarity_threshold nor top_k is provided, we will apply default values.
-
-
+**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+
-#### 🔌 Usage
-
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.voices.get_similar_library_voices()
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**audio_file:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**pronunciation_dictionary_locators:** `typing.Optional[typing.List[str]]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
@@ -3386,7 +3293,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**similarity_threshold:** `typing.Optional[float]` — Threshold for voice similarity between provided sample and library voices. Must be in range <0, 2>. The smaller the value the more similar voices will be returned.
+**fiction:** `typing.Optional[ProjectsAddRequestFiction]` — An optional fiction of the project.
@@ -3394,7 +3301,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**top_k:** `typing.Optional[int]` — Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Must be in range <1, 100>.
+**quality_check_on:** `typing.Optional[bool]` — Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
@@ -3414,7 +3321,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-client.voices.get_a_profile_page(...)
+client.projects.get(...)
-
@@ -3426,7 +3333,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Gets a profile page based on a handle
+Returns information about a specific project. This endpoint returns more detailed information about a project than GET api.elevenlabs.io/v1/projects.
@@ -3446,8 +3353,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.voices.get_a_profile_page(
- handle="talexgeorge",
+client.projects.get(
+ project_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -3464,7 +3371,7 @@ client.voices.get_a_profile_page(
-
-**handle:** `str` — Handle for a VA's profile page
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
@@ -3484,8 +3391,7 @@ client.voices.get_a_profile_page(
-## Projects
-client.projects.get_all()
+client.projects.edit_basic_project_info(...)
-
@@ -3497,7 +3403,7 @@ client.voices.get_a_profile_page(
-
-Returns a list of your projects together and its metadata.
+Edits basic project info.
@@ -3517,7 +3423,12 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.get_all()
+client.projects.edit_basic_project_info(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+ default_title_voice_id="default_title_voice_id",
+ default_paragraph_voice_id="default_paragraph_voice_id",
+)
```
@@ -3533,72 +3444,47 @@ client.projects.get_all()
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
-
+
+-
+**name:** `str` — The name of the project, used for identification only.
+
-
-client.projects.add(...)
-
-#### 📝 Description
-
-
--
+**default_title_voice_id:** `str` — The voice_id that corresponds to the default voice used for new titles.
+
+
+
-
-Creates a new project, it can be either initialized as blank, from a document or from a URL.
-
-
+**default_paragraph_voice_id:** `str` — The voice_id that corresponds to the default voice used for new paragraphs.
+
-#### 🔌 Usage
-
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.projects.add(
- name="name",
- default_title_voice_id="default_title_voice_id",
- default_paragraph_voice_id="default_paragraph_voice_id",
- default_model_id="default_model_id",
-)
-
-```
-
-
+**title:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**name:** `str` — The name of the project, used for identification only.
+**author:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
@@ -3606,7 +3492,7 @@ client.projects.add(
-
-**default_title_voice_id:** `str` — The voice_id that corresponds to the default voice used for new titles.
+**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
@@ -3614,7 +3500,7 @@ client.projects.add(
-
-**default_paragraph_voice_id:** `str` — The voice_id that corresponds to the default voice used for new paragraphs.
+**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
@@ -3622,7 +3508,7 @@ client.projects.add(
-
-**default_model_id:** `str` — The model_id of the model to be used for this project, you can query GET https://api.elevenlabs.io/v1/models to list all available models.
+**quality_check_on:** `typing.Optional[bool]` — Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
@@ -3630,72 +3516,69 @@ client.projects.add(
-
-**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-
-**from_document:** `from __future__ import annotations
-typing.Optional[core.File]` — See core.File for more documentation
-
+
+client.projects.delete(...)
-
-**quality_preset:** `typing.Optional[str]`
-
-Output quality of the generated audio. Must be one of:
-standard - standard output format, 128kbps with 44.1kHz sample rate.
-high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%.
-ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%.
-ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%.
+#### 📝 Description
-
-
-
+
+-
-
-**title:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
-
+Delete a project by its project_id.
+
+
+#### 🔌 Usage
+
-
-**author:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
-
-
-
-
-
-**description:** `typing.Optional[str]` — An optional description of the project.
-
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.projects.delete(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**genres:** `typing.Optional[typing.List[str]]` — An optional list of genres associated with the project.
-
-
-
-
-
-**target_audience:** `typing.Optional[ProjectsAddRequestTargetAudience]` — An optional target audience of the project.
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
@@ -3703,63 +3586,69 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-**language:** `typing.Optional[str]` — An optional language of the project. Two-letter language code (ISO 639-1).
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**content_type:** `typing.Optional[str]` — An optional content type of the project.
-
+
+client.projects.convert(...)
-
-**original_publication_date:** `typing.Optional[str]` — An optional original publication date of the project, in the format YYYY-MM-DD or YYYY.
-
-
-
+#### 📝 Description
-
-**mature_content:** `typing.Optional[bool]` — An optional mature content of the project.
-
-
-
-
-
-**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
-
+Starts conversion of a project and all of its chapters.
+
+
+#### 🔌 Usage
+
-
-**acx_volume_normalization:** `typing.Optional[bool]` — [Deprecated] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
-
-
-
-
-
-**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
-
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.projects.convert(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**pronunciation_dictionary_locators:** `typing.Optional[typing.List[str]]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
@@ -3779,7 +3668,7 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-client.projects.get(...)
+client.projects.get_snapshots(...)
-
@@ -3791,7 +3680,7 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-Returns information about a specific project. This endpoint returns more detailed information about a project than GET api.elevenlabs.io/v1/projects.
+Gets the snapshots of a project.
@@ -3811,7 +3700,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.get(
+client.projects.get_snapshots(
project_id="21m00Tcm4TlvDq8ikWAM",
)
@@ -3849,7 +3738,7 @@ client.projects.get(
-client.projects.edit_basic_project_info(...)
+client.projects.stream_archive(...)
-
@@ -3861,7 +3750,7 @@ client.projects.get(
-
-Edits basic project info.
+Streams archive with project audio.
@@ -3881,11 +3770,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.edit_basic_project_info(
+client.projects.stream_archive(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- default_title_voice_id="default_title_voice_id",
- default_paragraph_voice_id="default_paragraph_voice_id",
+ project_snapshot_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -3910,7 +3797,7 @@ client.projects.edit_basic_project_info(
-
-**name:** `str` — The name of the project, used for identification only.
+**project_snapshot_id:** `str` — The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project.
@@ -3918,31 +3805,478 @@ client.projects.edit_basic_project_info(
-
-**default_title_voice_id:** `str` — The voice_id that corresponds to the default voice used for new titles.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**default_paragraph_voice_id:** `str` — The voice_id that corresponds to the default voice used for new paragraphs.
-
-
--
-**title:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
-
+
+client.projects.add_chapter_to_a_project(...)
-
-**author:** `typing.Optional[str]` — An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Creates a new chapter either as blank or from a URL.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.projects.add_chapter_to_a_project(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+
+
+
+
+-
+
+**name:** `str` — The name of the chapter, used for identification only.
+
+
+
+
+
+-
+
+**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.projects.update_pronunciation_dictionaries(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.projects.update_pronunciation_dictionaries(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ pronunciation_dictionary_locators=[
+ PronunciationDictionaryVersionLocator(
+ pronunciation_dictionary_id="pronunciation_dictionary_id",
+ version_id="version_id",
+ )
+ ],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+
+
+
+
+-
+
+**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Chapters
+client.chapters.get_all(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Returns a list of your chapters for a project together and its metadata.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.chapters.get_all(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.chapters.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Returns information about a specific chapter.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.chapters.get(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ chapter_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+
+
+
+
+-
+
+**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.chapters.delete(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a chapter by its chapter_id.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.chapters.delete(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ chapter_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+
+
+
+
+-
+
+**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.chapters.convert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Starts conversion of a specific chapter.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.chapters.convert(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ chapter_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+
+
+
+
+-
+
+**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
@@ -3950,15 +4284,174 @@ client.projects.edit_basic_project_info(
-
-**isbn_number:** `typing.Optional[str]` — An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.chapters.get_all_snapshots(...)
-
-**volume_normalization:** `typing.Optional[bool]` — When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+#### 📝 Description
+
+
+-
+
+
+-
+
+Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.chapters.get_all_snapshots(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ chapter_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+
+
+
+
+-
+
+**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.chapters.stream_snapshot(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Stream the audio from a chapter snapshot. Use `GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the chapter snapshots of a chapter.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.chapters.stream_snapshot(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ chapter_id="21m00Tcm4TlvDq8ikWAM",
+ chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+
+
+
+
+
+-
+
+**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+
+
+
+
+
+-
+
+**chapter_snapshot_id:** `str` — The chapter_snapshot_id of the chapter snapshot. You can query GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots to the all available snapshots for a chapter.
+
+
+
+
+
+-
+
+**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format.
@@ -3978,57 +4471,148 @@ client.projects.edit_basic_project_info(
-client.projects.delete(...)
+## Dubbing
+client.dubbing.dub_a_video_or_an_audio_file(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Dubs provided audio or video file into given language.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.dubbing.dub_a_video_or_an_audio_file(
+ target_lang="target_lang",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**target_lang:** `str` — The Target language to dub the content into.
+
+
+
+
+
+-
+
+**file:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the dubbing project.
+
+
+
+
+
+-
+
+**source_url:** `typing.Optional[str]` — URL of the source video/audio file.
+
+
+
+
+
+-
+
+**source_lang:** `typing.Optional[str]` — Source language.
+
+
+
+
-
-#### 📝 Description
+**num_speakers:** `typing.Optional[int]` — Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers
+
+
+
-
+**watermark:** `typing.Optional[bool]` — Whether to apply watermark to the output video.
+
+
+
+
-
-Delete a project by its project_id.
-
-
+**start_time:** `typing.Optional[int]` — Start time of the source video/audio file.
+
-#### 🔌 Usage
-
-
+**end_time:** `typing.Optional[int]` — End time of the source video/audio file.
+
+
+
+
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.projects.delete(
- project_id="21m00Tcm4TlvDq8ikWAM",
-)
-
-```
-
-
+**highest_resolution:** `typing.Optional[bool]` — Whether to use the highest resolution available.
+
-#### ⚙️ Parameters
-
-
+**drop_background_audio:** `typing.Optional[bool]` — An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues.
+
+
+
+
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**use_profanity_filter:** `typing.Optional[bool]` — [BETA] Whether transcripts should have profanities censored with the words '[censored]'
@@ -4048,7 +4632,7 @@ client.projects.delete(
-client.projects.convert(...)
+client.dubbing.get_dubbing_project_metadata(...)
-
@@ -4060,7 +4644,7 @@ client.projects.delete(
-
-Starts conversion of a project and all of its chapters.
+Returns metadata about a dubbing project, including whether it's still in progress or not
@@ -4080,8 +4664,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.convert(
- project_id="21m00Tcm4TlvDq8ikWAM",
+client.dubbing.get_dubbing_project_metadata(
+ dubbing_id="dubbing_id",
)
```
@@ -4098,7 +4682,7 @@ client.projects.convert(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**dubbing_id:** `str` — ID of the dubbing project.
@@ -4118,7 +4702,7 @@ client.projects.convert(
-client.projects.get_snapshots(...)
+client.dubbing.delete_dubbing_project(...)
-
@@ -4130,7 +4714,7 @@ client.projects.convert(
-
-Gets the snapshots of a project.
+Deletes a dubbing project.
@@ -4150,8 +4734,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.get_snapshots(
- project_id="21m00Tcm4TlvDq8ikWAM",
+client.dubbing.delete_dubbing_project(
+ dubbing_id="dubbing_id",
)
```
@@ -4168,7 +4752,7 @@ client.projects.get_snapshots(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**dubbing_id:** `str` — ID of the dubbing project.
@@ -4188,7 +4772,7 @@ client.projects.get_snapshots(
-client.projects.stream_audio(...)
+client.dubbing.get_transcript_for_dub(...)
-
@@ -4200,7 +4784,7 @@ client.projects.get_snapshots(
-
-Stream the audio from a project snapshot.
+Returns transcript for the dub as an SRT file.
@@ -4220,10 +4804,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.stream_audio(
- project_id="string",
- project_snapshot_id="string",
- convert_to_mpeg=True,
+client.dubbing.get_transcript_for_dub(
+ dubbing_id="dubbing_id",
+ language_code="language_code",
)
```
@@ -4240,7 +4823,7 @@ client.projects.stream_audio(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**dubbing_id:** `str` — ID of the dubbing project.
@@ -4248,7 +4831,7 @@ client.projects.stream_audio(
-
-**project_snapshot_id:** `str` — The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project.
+**language_code:** `str` — ID of the language.
@@ -4256,7 +4839,9 @@ client.projects.stream_audio(
-
-**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format.
+**format_type:** `typing.Optional[
+ GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType
+]` — Format to use for the subtitle file, either 'srt' or 'webvtt'
@@ -4264,7 +4849,7 @@ client.projects.stream_audio(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4276,7 +4861,8 @@ client.projects.stream_audio(
-client.projects.stream_archive(...)
+## Models
+client.models.get_all()
-
@@ -4288,7 +4874,7 @@ client.projects.stream_audio(
-
-Streams archive with project audio.
+Gets a list of available models.
@@ -4308,10 +4894,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.stream_archive(
- project_id="21m00Tcm4TlvDq8ikWAM",
- project_snapshot_id="21m00Tcm4TlvDq8ikWAM",
-)
+client.models.get_all()
```
@@ -4327,22 +4910,6 @@ client.projects.stream_archive(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
-
-
-
-
--
-
-**project_snapshot_id:** `str` — The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4355,7 +4922,8 @@ client.projects.stream_archive(
-client.projects.add_chapter_to_a_project(...)
+## AudioNative
+client.audio_native.create(...)
-
@@ -4367,7 +4935,7 @@ client.projects.stream_archive(
-
-Creates a new chapter either as blank or from a URL.
+Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet.
@@ -4387,8 +4955,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.projects.add_chapter_to_a_project(
- project_id="21m00Tcm4TlvDq8ikWAM",
+client.audio_native.create(
name="name",
)
@@ -4406,7 +4973,7 @@ client.projects.add_chapter_to_a_project(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**name:** `str` — Project name.
@@ -4414,7 +4981,7 @@ client.projects.add_chapter_to_a_project(
-
-**name:** `str` — The name of the chapter, used for identification only.
+**image:** `typing.Optional[str]` — Image URL used in the player. If not provided, default image set in the Player settings is used.
@@ -4422,7 +4989,7 @@ client.projects.add_chapter_to_a_project(
-
-**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
+**author:** `typing.Optional[str]` — Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
@@ -4430,75 +4997,65 @@ client.projects.add_chapter_to_a_project(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**title:** `typing.Optional[str]` — Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
-
-
+
+-
+**small:** `typing.Optional[bool]` — Whether to use small player or not. If not provided, default value set in the Player settings is used.
+
-
-client.projects.update_pronunciation_dictionaries(...)
-
-#### 📝 Description
+**text_color:** `typing.Optional[str]` — Text color used in the player. If not provided, default text color set in the Player settings is used.
+
+
+
-
+**background_color:** `typing.Optional[str]` — Background color used in the player. If not provided, default background color set in the Player settings is used.
+
+
+
+
-
-Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does.
-
-
+**sessionization:** `typing.Optional[int]` — Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
+
-#### 🔌 Usage
-
-
+**voice_id:** `typing.Optional[str]` — Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
+
+
+
+
-
-```python
-from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.projects.update_pronunciation_dictionaries(
- project_id="21m00Tcm4TlvDq8ikWAM",
- pronunciation_dictionary_locators=[
- PronunciationDictionaryVersionLocator(
- pronunciation_dictionary_id="pronunciation_dictionary_id",
- version_id="version_id",
- )
- ],
-)
-
-```
-
-
+**model_id:** `typing.Optional[str]` — TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
+
-#### ⚙️ Parameters
-
-
-
--
+**file:** `from __future__ import annotations
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+typing.Optional[core.File]` — See core.File for more documentation
@@ -4506,7 +5063,7 @@ client.projects.update_pronunciation_dictionaries(
-
-**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not.
@@ -4526,8 +5083,8 @@ client.projects.update_pronunciation_dictionaries(
-## Chapters
-client.chapters.get_all(...)
+## Usage
+client.usage.get_characters_usage_metrics(...)
-
@@ -4539,7 +5096,7 @@ client.projects.update_pronunciation_dictionaries(
-
-Returns a list of your chapters for a project together and its metadata.
+Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
@@ -4559,8 +5116,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.chapters.get_all(
- project_id="21m00Tcm4TlvDq8ikWAM",
+client.usage.get_characters_usage_metrics(
+ start_unix=1,
+ end_unix=1,
)
```
@@ -4577,7 +5135,7 @@ client.chapters.get_all(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**start_unix:** `int` — UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day.
@@ -4585,70 +5143,15 @@ client.chapters.get_all(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**end_unix:** `int` — UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day.
-
-
-
-
-
-
-
-
-
-
-client.chapters.get(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Returns information about a specific chapter.
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.chapters.get(
- project_id="21m00Tcm4TlvDq8ikWAM",
- chapter_id="21m00Tcm4TlvDq8ikWAM",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**include_workspace_metrics:** `typing.Optional[bool]` — Whether or not to include the statistics of the entire workspace.
@@ -4656,7 +5159,7 @@ client.chapters.get(
-
-**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+**breakdown_type:** `typing.Optional[BreakdownTypes]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False.
@@ -4676,7 +5179,8 @@ client.chapters.get(
-client.chapters.delete(...)
+## PronunciationDictionary
+client.pronunciation_dictionary.add_from_file(...)
-
@@ -4688,7 +5192,7 @@ client.chapters.get(
-
-Delete a chapter by its chapter_id.
+Creates a new pronunciation dictionary from a lexicon .PLS file
@@ -4708,9 +5212,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.chapters.delete(
- project_id="21m00Tcm4TlvDq8ikWAM",
- chapter_id="21m00Tcm4TlvDq8ikWAM",
+client.pronunciation_dictionary.add_from_file(
+ name="name",
)
```
@@ -4727,7 +5230,7 @@ client.chapters.delete(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**name:** `str` — The name of the pronunciation dictionary, used for identification only.
@@ -4735,7 +5238,25 @@ client.chapters.delete(
-
-**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+**file:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — A description of the pronunciation dictionary, used for identification only.
+
+
+
+
+
+-
+
+**workspace_access:** `typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]` — Should be one of 'editor' or 'viewer'. If not provided, defaults to no access.
@@ -4755,7 +5276,7 @@ client.chapters.delete(
-client.chapters.convert(...)
+client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...)
-
@@ -4767,7 +5288,7 @@ client.chapters.delete(
-
-Starts conversion of a specific chapter.
+Add rules to the pronunciation dictionary
@@ -4783,13 +5304,22 @@ Starts conversion of a specific chapter.
```python
from elevenlabs import ElevenLabs
+from elevenlabs.pronunciation_dictionary import (
+ PronunciationDictionaryRule_Phoneme,
+)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.chapters.convert(
- project_id="21m00Tcm4TlvDq8ikWAM",
- chapter_id="21m00Tcm4TlvDq8ikWAM",
+client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
+ pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
+ rules=[
+ PronunciationDictionaryRule_Phoneme(
+ string_to_replace="rules",
+ phoneme="rules",
+ alphabet="rules",
+ )
+ ],
)
```
@@ -4806,7 +5336,7 @@ client.chapters.convert(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
@@ -4814,7 +5344,11 @@ client.chapters.convert(
-
-**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+**rules:** `typing.Sequence[PronunciationDictionaryRule]`
+
+List of pronunciation rules. Rule can be either:
+ an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', }
+ or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' }
@@ -4834,7 +5368,7 @@ client.chapters.convert(
-client.chapters.get_all_snapshots(...)
+client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...)
-
@@ -4846,7 +5380,7 @@ client.chapters.convert(
-
-Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created.
+Remove rules from the pronunciation dictionary
@@ -4866,9 +5400,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.chapters.get_all_snapshots(
- project_id="21m00Tcm4TlvDq8ikWAM",
- chapter_id="21m00Tcm4TlvDq8ikWAM",
+client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
+ pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
+ rule_strings=["rule_strings"],
)
```
@@ -4885,7 +5419,7 @@ client.chapters.get_all_snapshots(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
+**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
@@ -4893,7 +5427,7 @@ client.chapters.get_all_snapshots(
-
-**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
+**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary.
@@ -4913,7 +5447,7 @@ client.chapters.get_all_snapshots(
-client.chapters.stream_snapshot(...)
+client.pronunciation_dictionary.download(...)
-
@@ -4925,7 +5459,7 @@ client.chapters.get_all_snapshots(
-
-Stream the audio from a chapter snapshot. Use `GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the chapter snapshots of a chapter.
+Get PLS file with a pronunciation dictionary version rules
@@ -4945,10 +5479,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.chapters.stream_snapshot(
- project_id="21m00Tcm4TlvDq8ikWAM",
- chapter_id="21m00Tcm4TlvDq8ikWAM",
- chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM",
+client.pronunciation_dictionary.download(
+ dictionary_id="Fm6AvNgS53NXe6Kqxp3e",
+ version_id="KZFyRUq3R6kaqhKI146w",
)
```
@@ -4965,23 +5498,7 @@ client.chapters.stream_snapshot(
-
-**project_id:** `str` — The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
-
-
-
-
--
-
-**chapter_id:** `str` — The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
-
-
-
-
-
--
-
-**chapter_snapshot_id:** `str` — The chapter_snapshot_id of the chapter snapshot. You can query GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots to the all available snapshots for a chapter.
+**dictionary_id:** `str` — The id of the pronunciation dictionary
@@ -4989,7 +5506,7 @@ client.chapters.stream_snapshot(
-
-**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format.
+**version_id:** `str` — The id of the version of the pronunciation dictionary
@@ -5009,8 +5526,7 @@ client.chapters.stream_snapshot(
-## Dubbing
-client.dubbing.dub_a_video_or_an_audio_file(...)
+client.pronunciation_dictionary.get(...)
-
@@ -5022,7 +5538,7 @@ client.chapters.stream_snapshot(
-
-Dubs provided audio or video file into given language.
+Get metadata for a pronunciation dictionary
@@ -5042,8 +5558,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.dubbing.dub_a_video_or_an_audio_file(
- target_lang="target_lang",
+client.pronunciation_dictionary.get(
+ pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e",
)
```
@@ -5060,7 +5576,7 @@ client.dubbing.dub_a_video_or_an_audio_file(
-
-**target_lang:** `str` — The Target language to dub the content into.
+**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
@@ -5068,81 +5584,69 @@ client.dubbing.dub_a_video_or_an_audio_file(
-
-**file:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the dubbing project.
-
-
--
-**source_url:** `typing.Optional[str]` — URL of the source video/audio file.
-
+
+client.pronunciation_dictionary.get_all(...)
-
-**source_lang:** `typing.Optional[str]` — Source language.
-
-
-
+#### 📝 Description
-
-**num_speakers:** `typing.Optional[int]` — Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers
-
-
-
-
-
-**watermark:** `typing.Optional[bool]` — Whether to apply watermark to the output video.
-
+Get a list of the pronunciation dictionaries you have access to and their metadata
+
+
+#### 🔌 Usage
+
-
-**start_time:** `typing.Optional[int]` — Start time of the source video/audio file.
-
-
-
-
-
-**end_time:** `typing.Optional[int]` — End time of the source video/audio file.
-
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.pronunciation_dictionary.get_all(
+ page_size=1,
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**highest_resolution:** `typing.Optional[bool]` — Whether to use the highest resolution available.
-
-
-
-
-
-**drop_background_audio:** `typing.Optional[bool]` — An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues.
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -5150,7 +5654,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**use_profanity_filter:** `typing.Optional[bool]` — [BETA] Whether transcripts should have profanities censored with the words '[censored]'
+**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30.
@@ -5170,7 +5674,8 @@ typing.Optional[core.File]` — See core.File for more documentation
-client.dubbing.get_dubbing_project_metadata(...)
+## Workspace
+client.workspace.invite_user(...)
-
@@ -5182,7 +5687,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Returns metadata about a dubbing project, including whether it's still in progress or not
+Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators.
@@ -5202,8 +5707,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.dubbing.get_dubbing_project_metadata(
- dubbing_id="dubbing_id",
+client.workspace.invite_user(
+ email="email",
)
```
@@ -5220,7 +5725,7 @@ client.dubbing.get_dubbing_project_metadata(
-
-**dubbing_id:** `str` — ID of the dubbing project.
+**email:** `str` — Email of the target user.
@@ -5240,7 +5745,7 @@ client.dubbing.get_dubbing_project_metadata(
-client.dubbing.delete_dubbing_project(...)
+client.workspace.delete_existing_invitation(...)
-
@@ -5252,7 +5757,7 @@ client.dubbing.get_dubbing_project_metadata(
-
-Deletes a dubbing project.
+Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators.
@@ -5272,8 +5777,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.dubbing.delete_dubbing_project(
- dubbing_id="dubbing_id",
+client.workspace.delete_existing_invitation(
+ email="email",
)
```
@@ -5290,7 +5795,7 @@ client.dubbing.delete_dubbing_project(
-
-**dubbing_id:** `str` — ID of the dubbing project.
+**email:** `str` — Email of the target user.
@@ -5310,7 +5815,7 @@ client.dubbing.delete_dubbing_project(
-client.dubbing.get_dubbed_file(...)
+client.workspace.update_member(...)
-
@@ -5322,7 +5827,7 @@ client.dubbing.delete_dubbing_project(
-
-Returns dubbed file as a streamed file. Videos will be returned in MP4 format and audio only dubs will be returned in MP3.
+Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators.
@@ -5342,9 +5847,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.dubbing.get_dubbed_file(
- dubbing_id="string",
- language_code="string",
+client.workspace.update_member(
+ email="email",
)
```
@@ -5361,7 +5865,7 @@ client.dubbing.get_dubbed_file(
-
-**dubbing_id:** `str` — ID of the dubbing project.
+**email:** `str` — Email of the target user.
@@ -5369,7 +5873,7 @@ client.dubbing.get_dubbed_file(
-
-**language_code:** `str` — ID of the language.
+**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account.
@@ -5377,7 +5881,15 @@ client.dubbing.get_dubbed_file(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5389,7 +5901,8 @@ client.dubbing.get_dubbed_file(
-client.dubbing.get_transcript_for_dub(...)
+## ConversationalAi
+client.conversational_ai.get_signed_url(...)
-
@@ -5401,7 +5914,7 @@ client.dubbing.get_dubbed_file(
-
-Returns transcript for the dub as an SRT file.
+Get a signed url to start a conversation with an agent with an agent that requires authorization
@@ -5421,9 +5934,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.dubbing.get_transcript_for_dub(
- dubbing_id="dubbing_id",
- language_code="language_code",
+client.conversational_ai.get_signed_url(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -5440,7 +5952,7 @@ client.dubbing.get_transcript_for_dub(
-
-**dubbing_id:** `str` — ID of the dubbing project.
+**agent_id:** `str` — The id of the agent you're taking the action on.
@@ -5448,41 +5960,36 @@ client.dubbing.get_transcript_for_dub(
-
-**language_code:** `str` — ID of the language.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**format_type:** `typing.Optional[
- GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType
-]` — Format to use for the subtitle file, either 'srt' or 'webvtt'
-
+
+client.conversational_ai.create_agent(...)
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
-
+#### 📝 Description
-
-
-
+
+-
-## Workspace
-
client.workspace.get_sso_provider_admin(...)
-
+Create an agent from a config object
+
+
+
+
+
#### 🔌 Usage
@@ -5492,13 +5999,13 @@ client.dubbing.get_transcript_for_dub(
-
```python
-from elevenlabs import ElevenLabs
+from elevenlabs import ConversationalConfig, ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.get_sso_provider_admin(
- workspace_id="workspace_id",
+client.conversational_ai.create_agent(
+ conversation_config=ConversationalConfig(),
)
```
@@ -5515,7 +6022,23 @@ client.workspace.get_sso_provider_admin(
-
-**workspace_id:** `str`
+**conversation_config:** `ConversationalConfig` — Conversation configuration for an agent
+
+
+
+
+
+-
+
+**platform_settings:** `typing.Optional[AgentPlatformSettings]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — A name to make the agent easier to find
@@ -5535,7 +6058,7 @@ client.workspace.get_sso_provider_admin(
-client.workspace.invite_user(...)
+client.conversational_ai.get_agent(...)
-
@@ -5547,7 +6070,7 @@ client.workspace.get_sso_provider_admin(
-
-Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators.
+Retrieve config for an agent
@@ -5567,8 +6090,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.invite_user(
- email="email",
+client.conversational_ai.get_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -5585,7 +6108,7 @@ client.workspace.invite_user(
-
-**email:** `str` — Email of the target user.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -5605,7 +6128,7 @@ client.workspace.invite_user(
-client.workspace.delete_existing_invitation(...)
+client.conversational_ai.delete_agent(...)
-
@@ -5617,7 +6140,7 @@ client.workspace.invite_user(
-
-Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators.
+Delete an agent
@@ -5637,8 +6160,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.delete_existing_invitation(
- email="email",
+client.conversational_ai.delete_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -5655,7 +6178,7 @@ client.workspace.delete_existing_invitation(
-
-**email:** `str` — Email of the target user.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -5675,7 +6198,7 @@ client.workspace.delete_existing_invitation(
-client.workspace.update_member(...)
+client.conversational_ai.update_agent(...)
-
@@ -5687,7 +6210,7 @@ client.workspace.delete_existing_invitation(
-
-Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators.
+Patches an Agent settings
@@ -5707,8 +6230,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.update_member(
- email="email",
+client.conversational_ai.update_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -5725,7 +6248,7 @@ client.workspace.update_member(
-
-**email:** `str` — Email of the target user.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -5733,7 +6256,7 @@ client.workspace.update_member(
-
-**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account.
+**conversation_config:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Conversation configuration for an agent
@@ -5741,7 +6264,27 @@ client.workspace.update_member(
-
-**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace.
+**platform_settings:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+
+
+
+
+-
+
+**secrets:** `typing.Optional[
+ typing.Sequence[
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem
+ ]
+]` — A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — A name to make the agent easier to find
@@ -5761,8 +6304,7 @@ client.workspace.update_member(
-## Models
-client.models.get_all()
+client.conversational_ai.get_widget(...)
-
@@ -5774,7 +6316,7 @@ client.workspace.update_member(
-
-Gets a list of available models.
+Retrieve the widget configuration for an agent
@@ -5794,7 +6336,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.models.get_all()
+client.conversational_ai.get_widget(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
```
@@ -5810,6 +6354,22 @@ client.models.get_all()
-
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**conversation_signature:** `typing.Optional[str]` — An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5822,8 +6382,7 @@ client.models.get_all()
-## AudioNative
-client.audio_native.create(...)
+client.conversational_ai.get_link(...)
-
@@ -5835,7 +6394,7 @@ client.models.get_all()
-
-Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet.
+Get the current link used to share the agent with others
@@ -5855,8 +6414,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.audio_native.create(
- name="name",
+client.conversational_ai.get_link(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -5873,7 +6432,7 @@ client.audio_native.create(
-
-**name:** `str` — Project name.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -5881,81 +6440,69 @@ client.audio_native.create(
-
-**image:** `typing.Optional[str]` — Image URL used in the player. If not provided, default image set in the Player settings is used.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**author:** `typing.Optional[str]` — Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
-
-
--
-**title:** `typing.Optional[str]` — Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
-
+
+client.conversational_ai.post_avatar(...)
-
-**small:** `typing.Optional[bool]` — Whether to use small player or not. If not provided, default value set in the Player settings is used.
-
-
-
+#### 📝 Description
-
-**text_color:** `typing.Optional[str]` — Text color used in the player. If not provided, default text color set in the Player settings is used.
-
-
-
-
-
-**background_color:** `typing.Optional[str]` — Background color used in the player. If not provided, default background color set in the Player settings is used.
-
+Sets the avatar for an agent displayed in the widget
-
-
--
-
-**sessionization:** `typing.Optional[int]` — Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
-
+#### 🔌 Usage
+
-
-**voice_id:** `typing.Optional[str]` — Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
-
-
-
-
-
-**model_id:** `typing.Optional[str]` — TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
-
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.post_avatar(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**file:** `from __future__ import annotations
+
+-
-typing.Optional[core.File]` — See core.File for more documentation
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -5963,7 +6510,9 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not.
+**avatar_file:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -5983,8 +6532,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-## Usage
-client.usage.get_characters_usage_metrics(...)
+client.conversational_ai.get_knowledge_base_document(...)
-
@@ -5996,7 +6544,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
+Get details about a specific documentation making up the agent's knowledge base
@@ -6016,9 +6564,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.usage.get_characters_usage_metrics(
- start_unix=1,
- end_unix=1,
+client.conversational_ai.get_knowledge_base_document(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -6035,23 +6583,7 @@ client.usage.get_characters_usage_metrics(
-
-**start_unix:** `int` — UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day.
-
-
-
-
-
--
-
-**end_unix:** `int` — UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day.
-
-
-
-
-
--
-
-**include_workspace_metrics:** `typing.Optional[bool]` — Whether or not to include the statistics of the entire workspace.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -6059,7 +6591,7 @@ client.usage.get_characters_usage_metrics(
-
-**breakdown_type:** `typing.Optional[BreakdownTypes]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False.
+**documentation_id:** `str` — The id of a document from the agent's knowledge base. This is returned on document addition.
@@ -6079,8 +6611,7 @@ client.usage.get_characters_usage_metrics(
-## PronunciationDictionary
-client.pronunciation_dictionary.add_from_file(...)
+client.conversational_ai.add_agent_secret(...)
-
@@ -6092,7 +6623,7 @@ client.usage.get_characters_usage_metrics(
-
-Creates a new pronunciation dictionary from a lexicon .PLS file
+Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
@@ -6112,8 +6643,10 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.add_from_file(
+client.conversational_ai.add_agent_secret(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
name="name",
+ secret_value="secret_value",
)
```
@@ -6130,17 +6663,7 @@ client.pronunciation_dictionary.add_from_file(
-
-**name:** `str` — The name of the pronunciation dictionary, used for identification only.
-
-
-
-
-
--
-
-**file:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -6148,7 +6671,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**description:** `typing.Optional[str]` — A description of the pronunciation dictionary, used for identification only.
+**name:** `str` — A name to help identify a particular agent secret
@@ -6156,7 +6679,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**workspace_access:** `typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]` — Should be one of 'editor' or 'viewer'. If not provided, defaults to no access.
+**secret_value:** `str` — A value to be encrypted and used by the agent
@@ -6176,7 +6699,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...)
+client.conversational_ai.create_knowledge_base_document(...)
-
@@ -6188,7 +6711,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Add rules to the pronunciation dictionary
+Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
@@ -6204,22 +6727,12 @@ Add rules to the pronunciation dictionary
```python
from elevenlabs import ElevenLabs
-from elevenlabs.pronunciation_dictionary import (
- PronunciationDictionaryRule_Phoneme,
-)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
- pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
- rules=[
- PronunciationDictionaryRule_Phoneme(
- string_to_replace="rules",
- phoneme="rules",
- alphabet="rules",
- )
- ],
+client.conversational_ai.create_knowledge_base_document(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -6236,7 +6749,7 @@ client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
-
-**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -6244,11 +6757,17 @@ client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(
-
-**rules:** `typing.Sequence[PronunciationDictionaryRule]`
+**url:** `typing.Optional[str]` — URL to a page of documentation that the agent will have access to in order to interact with users.
+
+
+
-List of pronunciation rules. Rule can be either:
- an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', }
- or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' }
+
+-
+
+**file:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
@@ -6268,7 +6787,7 @@ List of pronunciation rules. Rule can be either:
-client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...)
+client.conversational_ai.get_agents(...)
-
@@ -6280,7 +6799,7 @@ List of pronunciation rules. Rule can be either:
-
-Remove rules from the pronunciation dictionary
+Returns a page of your agents and their metadata.
@@ -6300,10 +6819,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
- pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
- rule_strings=["rule_strings"],
-)
+client.conversational_ai.get_agents()
```
@@ -6319,7 +6835,7 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
-
-**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -6327,7 +6843,15 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
-
-**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary.
+**page_size:** `typing.Optional[int]` — How many Agents to return at maximum. Can not exceed 100, defaults to 30.
+
+
+
+
+
+-
+
+**search:** `typing.Optional[str]` — Search by agents name.
@@ -6347,7 +6871,7 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
-client.pronunciation_dictionary.download(...)
+client.conversational_ai.get_conversations(...)
-
@@ -6359,7 +6883,7 @@ client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(
-
-Get PLS file with a pronunciation dictionary version rules
+Get all conversations of agents that user owns. With option to restrict to a specific agent.
@@ -6379,9 +6903,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.download(
- dictionary_id="Fm6AvNgS53NXe6Kqxp3e",
- version_id="KZFyRUq3R6kaqhKI146w",
+client.conversational_ai.get_conversations(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -6398,7 +6921,7 @@ client.pronunciation_dictionary.download(
-
-**dictionary_id:** `str` — The id of the pronunciation dictionary
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -6406,7 +6929,23 @@ client.pronunciation_dictionary.download(
-
-**version_id:** `str` — The id of the version of the pronunciation dictionary
+**agent_id:** `typing.Optional[str]` — The id of the agent you're taking the action on.
+
+
+
+
+
+-
+
+**call_successful:** `typing.Optional[EvaluationSuccessResult]` — The result of the success evaluation
+
+
+
+
+
+-
+
+**page_size:** `typing.Optional[int]` — How many conversations to return at maximum. Can not exceed 100, defaults to 30.
@@ -6426,7 +6965,7 @@ client.pronunciation_dictionary.download(
-client.pronunciation_dictionary.get(...)
+client.conversational_ai.get_conversation(...)
-
@@ -6438,7 +6977,7 @@ client.pronunciation_dictionary.download(
-
-Get metadata for a pronunciation dictionary
+Get the details of a particular conversation
@@ -6458,8 +6997,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.get(
- pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e",
+client.conversational_ai.get_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -6476,7 +7015,7 @@ client.pronunciation_dictionary.get(
-
-**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
@@ -6496,7 +7035,7 @@ client.pronunciation_dictionary.get(
-client.pronunciation_dictionary.get_all(...)
+client.conversational_ai.get_conversation_audio(...)
-
@@ -6508,7 +7047,7 @@ client.pronunciation_dictionary.get(
-
-Get a list of the pronunciation dictionaries you have access to and their metadata
+Get the audio recording of a particular conversation
@@ -6528,8 +7067,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.get_all(
- page_size=1,
+client.conversational_ai.get_conversation_audio(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -6546,15 +7085,7 @@ client.pronunciation_dictionary.get_all(
-
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
-
-
-
-
-
--
-
-**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30.
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py
index d622566..98bee6b 100644
--- a/src/elevenlabs/__init__.py
+++ b/src/elevenlabs/__init__.py
@@ -2,25 +2,76 @@
from .types import (
Accent,
+ AddAgentSecretResponseModel,
AddChapterResponseModel,
+ AddKnowledgeBaseResponseModel,
AddProjectResponseModel,
AddPronunciationDictionaryResponseModel,
AddPronunciationDictionaryRulesResponseModel,
AddVoiceIvcResponseModel,
AddVoiceResponseModel,
Age,
+ AgentBan,
+ AgentConfig,
+ AgentConfigOverride,
+ AgentMetadataResponseModel,
+ AgentPlatformSettings,
+ AgentSummaryResponseModel,
+ AllowlistItem,
+ ArrayJsonSchemaProperty,
+ ArrayJsonSchemaPropertyItems,
+ AsrConversationalConfig,
+ AsrInputFormat,
+ AsrProvider,
+ AsrQuality,
AudioNativeCreateProjectResponseModel,
AudioNativeGetEmbedCodeResponseModel,
+ AuthSettings,
+ AuthorizationMethod,
+ BanReasonType,
BreakdownTypes,
ChapterResponse,
ChapterSnapshotResponse,
ChapterSnapshotsResponse,
ChapterState,
ChapterStatisticsResponse,
+ ClientEvent,
+ ClientToolConfig,
+ ConvAiNewSecretConfig,
+ ConvAiSecretLocator,
+ ConvAiStoredSecretConfig,
+ ConversationChargingCommonModel,
+ ConversationConfig,
+ ConversationConfigClientOverride,
+ ConversationHistoryAnalysisCommonModel,
+ ConversationHistoryEvaluationCriteriaResultCommonModel,
+ ConversationHistoryMetadataCommonModel,
+ ConversationHistoryTranscriptCommonModel,
+ ConversationHistoryTranscriptCommonModelRole,
+ ConversationHistoryTranscriptToolCallCommonModel,
+ ConversationHistoryTranscriptToolResultCommonModel,
+ ConversationInitiationClientData,
+ ConversationSignedUrlResponseModel,
+ ConversationSummaryResponseModel,
+ ConversationSummaryResponseModelStatus,
+ ConversationTokenDbModel,
+ ConversationTokenPurpose,
+ ConversationalConfig,
+ CreateAgentResponseModel,
Currency,
+ CustomLlm,
+ DataCollectionResultCommonModel,
DoDubbingResponse,
DubbingMetadataResponse,
EditProjectResponseModel,
+ EmbedConfig,
+ EmbedConfigAvatar,
+ EmbedConfigAvatar_Image,
+ EmbedConfigAvatar_Orb,
+ EmbedConfigAvatar_Url,
+ EmbedVariant,
+ EvaluationSettings,
+ EvaluationSuccessResult,
ExtendedSubscriptionResponseModelBillingPeriod,
ExtendedSubscriptionResponseModelCharacterRefreshPeriod,
ExtendedSubscriptionResponseModelCurrency,
@@ -28,7 +79,16 @@
FineTuningResponse,
FineTuningResponseModelStateValue,
Gender,
+ GetAgentEmbedResponseModel,
+ GetAgentLinkResponseModel,
+ GetAgentResponseModel,
+ GetAgentsPageResponseModel,
GetChaptersResponse,
+ GetConversationResponseModel,
+ GetConversationResponseModelStatus,
+ GetConversationsPageResponseModel,
+ GetKnowledgeBaseReponseModel,
+ GetKnowledgeBaseReponseModelType,
GetLibraryVoicesResponse,
GetProjectsResponse,
GetPronunciationDictionariesMetadataResponseModel,
@@ -40,42 +100,63 @@
HistoryAlignmentsResponseModel,
HistoryItem,
HttpValidationError,
+ ImageAvatar,
Invoice,
+ KnowledgeBaseLocator,
+ KnowledgeBaseLocatorType,
LanguageResponse,
LibraryVoiceResponse,
LibraryVoiceResponseModelCategory,
+ LiteralJsonSchemaProperty,
+ LiteralJsonSchemaPropertyType,
+ Llm,
ManualVerificationFileResponse,
ManualVerificationResponse,
Model,
ModelRatesResponseModel,
ModelResponseModelConcurrencyGroup,
+ ObjectJsonSchemaProperty,
+ ObjectJsonSchemaPropertyPropertiesValue,
OptimizeStreamingLatency,
+ OrbAvatar,
OutputFormat,
+ PostAgentAvatarResponseModel,
ProfilePageResponseModel,
ProjectExtendedResponseModel,
ProjectExtendedResponseModelAccessLevel,
+ ProjectExtendedResponseModelApplyTextNormalization,
+ ProjectExtendedResponseModelFiction,
ProjectExtendedResponseModelQualityPreset,
ProjectExtendedResponseModelTargetAudience,
ProjectResponse,
ProjectResponseModelAccessLevel,
+ ProjectResponseModelFiction,
ProjectResponseModelTargetAudience,
ProjectSnapshotResponse,
ProjectSnapshotUploadResponseModel,
ProjectSnapshotUploadResponseModelStatus,
ProjectSnapshotsResponse,
ProjectState,
+ PromptAgent,
+ PromptAgentOverride,
+ PromptAgentToolsItem,
+ PromptAgentToolsItem_Client,
+ PromptAgentToolsItem_Webhook,
+ PromptEvaluationCriteria,
PronunciationDictionaryAliasRuleRequestModel,
PronunciationDictionaryPhonemeRuleRequestModel,
PronunciationDictionaryVersionLocator,
PronunciationDictionaryVersionResponseModel,
+ PydanticPronunciationDictionaryVersionLocator,
+ QueryParamsJsonSchema,
+ ReaderResourceResponseModel,
+ ReaderResourceResponseModelResourceType,
RecordingResponse,
RemovePronunciationDictionaryRulesResponseModel,
ReviewStatus,
SpeechHistoryItemResponse,
SpeechHistoryItemResponseModelSource,
SpeechHistoryItemResponseModelVoiceCategory,
- SsoProviderResponseModel,
- SsoProviderResponseModelProviderType,
Subscription,
SubscriptionResponse,
SubscriptionResponseModelBillingPeriod,
@@ -83,6 +164,14 @@
SubscriptionResponseModelCurrency,
SubscriptionStatus,
TextToSpeechAsStreamRequest,
+ TtsConversationalConfig,
+ TtsConversationalConfigOverride,
+ TtsConversationalModel,
+ TtsOptimizeStreamingLatency,
+ TtsOutputFormat,
+ TurnConfig,
+ TurnMode,
+ UrlAvatar,
UsageCharactersResponseModel,
User,
ValidationError,
@@ -102,12 +191,17 @@
VoiceSharingResponseModelCategory,
VoiceSharingState,
VoiceVerificationResponse,
+ WebhookToolApiSchemaConfig,
+ WebhookToolApiSchemaConfigMethod,
+ WebhookToolApiSchemaConfigRequestHeadersValue,
+ WebhookToolConfig,
)
from .errors import UnprocessableEntityError
from . import (
audio_isolation,
audio_native,
chapters,
+ conversational_ai,
dubbing,
history,
models,
@@ -125,10 +219,16 @@
workspace,
)
from .client import AsyncElevenLabs, ElevenLabs
+from .conversational_ai import (
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+)
from .dubbing import GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType
from .environment import ElevenLabsEnvironment
+from .history import HistoryGetAllRequestSource
from .play import play, save, stream
-from .projects import ProjectsAddRequestTargetAudience
+from .projects import ProjectsAddRequestFiction, ProjectsAddRequestTargetAudience
from .pronunciation_dictionary import (
PronunciationDictionaryAddFromFileRequestWorkspaceAccess,
PronunciationDictionaryRule,
@@ -141,21 +241,43 @@
BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization,
BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
)
+from .text_to_voice import TextToVoiceCreatePreviewsRequestOutputFormat
from .version import __version__
from .workspace import BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole
__all__ = [
"Accent",
+ "AddAgentSecretResponseModel",
"AddChapterResponseModel",
+ "AddKnowledgeBaseResponseModel",
"AddProjectResponseModel",
"AddPronunciationDictionaryResponseModel",
"AddPronunciationDictionaryRulesResponseModel",
"AddVoiceIvcResponseModel",
"AddVoiceResponseModel",
"Age",
+ "AgentBan",
+ "AgentConfig",
+ "AgentConfigOverride",
+ "AgentMetadataResponseModel",
+ "AgentPlatformSettings",
+ "AgentSummaryResponseModel",
+ "AllowlistItem",
+ "ArrayJsonSchemaProperty",
+ "ArrayJsonSchemaPropertyItems",
+ "AsrConversationalConfig",
+ "AsrInputFormat",
+ "AsrProvider",
+ "AsrQuality",
"AsyncElevenLabs",
"AudioNativeCreateProjectResponseModel",
"AudioNativeGetEmbedCodeResponseModel",
+ "AuthSettings",
+ "AuthorizationMethod",
+ "BanReasonType",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored",
"BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization",
"BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization",
"BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization",
@@ -167,12 +289,45 @@
"ChapterSnapshotsResponse",
"ChapterState",
"ChapterStatisticsResponse",
+ "ClientEvent",
+ "ClientToolConfig",
+ "ConvAiNewSecretConfig",
+ "ConvAiSecretLocator",
+ "ConvAiStoredSecretConfig",
+ "ConversationChargingCommonModel",
+ "ConversationConfig",
+ "ConversationConfigClientOverride",
+ "ConversationHistoryAnalysisCommonModel",
+ "ConversationHistoryEvaluationCriteriaResultCommonModel",
+ "ConversationHistoryMetadataCommonModel",
+ "ConversationHistoryTranscriptCommonModel",
+ "ConversationHistoryTranscriptCommonModelRole",
+ "ConversationHistoryTranscriptToolCallCommonModel",
+ "ConversationHistoryTranscriptToolResultCommonModel",
+ "ConversationInitiationClientData",
+ "ConversationSignedUrlResponseModel",
+ "ConversationSummaryResponseModel",
+ "ConversationSummaryResponseModelStatus",
+ "ConversationTokenDbModel",
+ "ConversationTokenPurpose",
+ "ConversationalConfig",
+ "CreateAgentResponseModel",
"Currency",
+ "CustomLlm",
+ "DataCollectionResultCommonModel",
"DoDubbingResponse",
"DubbingMetadataResponse",
"EditProjectResponseModel",
"ElevenLabs",
"ElevenLabsEnvironment",
+ "EmbedConfig",
+ "EmbedConfigAvatar",
+ "EmbedConfigAvatar_Image",
+ "EmbedConfigAvatar_Orb",
+ "EmbedConfigAvatar_Url",
+ "EmbedVariant",
+ "EvaluationSettings",
+ "EvaluationSuccessResult",
"ExtendedSubscriptionResponseModelBillingPeriod",
"ExtendedSubscriptionResponseModelCharacterRefreshPeriod",
"ExtendedSubscriptionResponseModelCurrency",
@@ -180,7 +335,16 @@
"FineTuningResponse",
"FineTuningResponseModelStateValue",
"Gender",
+ "GetAgentEmbedResponseModel",
+ "GetAgentLinkResponseModel",
+ "GetAgentResponseModel",
+ "GetAgentsPageResponseModel",
"GetChaptersResponse",
+ "GetConversationResponseModel",
+ "GetConversationResponseModelStatus",
+ "GetConversationsPageResponseModel",
+ "GetKnowledgeBaseReponseModel",
+ "GetKnowledgeBaseReponseModelType",
"GetLibraryVoicesResponse",
"GetProjectsResponse",
"GetPronunciationDictionariesMetadataResponseModel",
@@ -191,33 +355,54 @@
"History",
"HistoryAlignmentResponseModel",
"HistoryAlignmentsResponseModel",
+ "HistoryGetAllRequestSource",
"HistoryItem",
"HttpValidationError",
+ "ImageAvatar",
"Invoice",
+ "KnowledgeBaseLocator",
+ "KnowledgeBaseLocatorType",
"LanguageResponse",
"LibraryVoiceResponse",
"LibraryVoiceResponseModelCategory",
+ "LiteralJsonSchemaProperty",
+ "LiteralJsonSchemaPropertyType",
+ "Llm",
"ManualVerificationFileResponse",
"ManualVerificationResponse",
"Model",
"ModelRatesResponseModel",
"ModelResponseModelConcurrencyGroup",
+ "ObjectJsonSchemaProperty",
+ "ObjectJsonSchemaPropertyPropertiesValue",
"OptimizeStreamingLatency",
+ "OrbAvatar",
"OutputFormat",
+ "PostAgentAvatarResponseModel",
"ProfilePageResponseModel",
"ProjectExtendedResponseModel",
"ProjectExtendedResponseModelAccessLevel",
+ "ProjectExtendedResponseModelApplyTextNormalization",
+ "ProjectExtendedResponseModelFiction",
"ProjectExtendedResponseModelQualityPreset",
"ProjectExtendedResponseModelTargetAudience",
"ProjectResponse",
"ProjectResponseModelAccessLevel",
+ "ProjectResponseModelFiction",
"ProjectResponseModelTargetAudience",
"ProjectSnapshotResponse",
"ProjectSnapshotUploadResponseModel",
"ProjectSnapshotUploadResponseModelStatus",
"ProjectSnapshotsResponse",
"ProjectState",
+ "ProjectsAddRequestFiction",
"ProjectsAddRequestTargetAudience",
+ "PromptAgent",
+ "PromptAgentOverride",
+ "PromptAgentToolsItem",
+ "PromptAgentToolsItem_Client",
+ "PromptAgentToolsItem_Webhook",
+ "PromptEvaluationCriteria",
"PronunciationDictionaryAddFromFileRequestWorkspaceAccess",
"PronunciationDictionaryAliasRuleRequestModel",
"PronunciationDictionaryPhonemeRuleRequestModel",
@@ -226,14 +411,16 @@
"PronunciationDictionaryRule_Phoneme",
"PronunciationDictionaryVersionLocator",
"PronunciationDictionaryVersionResponseModel",
+ "PydanticPronunciationDictionaryVersionLocator",
+ "QueryParamsJsonSchema",
+ "ReaderResourceResponseModel",
+ "ReaderResourceResponseModelResourceType",
"RecordingResponse",
"RemovePronunciationDictionaryRulesResponseModel",
"ReviewStatus",
"SpeechHistoryItemResponse",
"SpeechHistoryItemResponseModelSource",
"SpeechHistoryItemResponseModelVoiceCategory",
- "SsoProviderResponseModel",
- "SsoProviderResponseModelProviderType",
"Subscription",
"SubscriptionResponse",
"SubscriptionResponseModelBillingPeriod",
@@ -241,7 +428,16 @@
"SubscriptionResponseModelCurrency",
"SubscriptionStatus",
"TextToSpeechAsStreamRequest",
+ "TextToVoiceCreatePreviewsRequestOutputFormat",
+ "TtsConversationalConfig",
+ "TtsConversationalConfigOverride",
+ "TtsConversationalModel",
+ "TtsOptimizeStreamingLatency",
+ "TtsOutputFormat",
+ "TurnConfig",
+ "TurnMode",
"UnprocessableEntityError",
+ "UrlAvatar",
"UsageCharactersResponseModel",
"User",
"ValidationError",
@@ -261,10 +457,15 @@
"VoiceSharingResponseModelCategory",
"VoiceSharingState",
"VoiceVerificationResponse",
+ "WebhookToolApiSchemaConfig",
+ "WebhookToolApiSchemaConfigMethod",
+ "WebhookToolApiSchemaConfigRequestHeadersValue",
+ "WebhookToolConfig",
"__version__",
"audio_isolation",
"audio_native",
"chapters",
+ "conversational_ai",
"dubbing",
"history",
"models",
diff --git a/src/elevenlabs/audio_isolation/client.py b/src/elevenlabs/audio_isolation/client.py
index cae2e69..b52b320 100644
--- a/src/elevenlabs/audio_isolation/client.py
+++ b/src/elevenlabs/audio_isolation/client.py
@@ -37,15 +37,6 @@ def audio_isolation(
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.audio_isolation.audio_isolation()
"""
with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation",
@@ -97,15 +88,6 @@ def audio_isolation_stream(
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.audio_isolation.audio_isolation_stream()
"""
with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation/stream",
@@ -162,23 +144,6 @@ async def audio_isolation(
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.audio_isolation.audio_isolation()
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation",
@@ -230,23 +195,6 @@ async def audio_isolation_stream(
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.audio_isolation.audio_isolation_stream()
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation/stream",
diff --git a/src/elevenlabs/base_client.py b/src/elevenlabs/base_client.py
index 50a0d65..a67c7e0 100644
--- a/src/elevenlabs/base_client.py
+++ b/src/elevenlabs/base_client.py
@@ -18,11 +18,12 @@
from .projects.client import ProjectsClient
from .chapters.client import ChaptersClient
from .dubbing.client import DubbingClient
-from .workspace.client import WorkspaceClient
from .models.client import ModelsClient
from .audio_native.client import AudioNativeClient
from .usage.client import UsageClient
from .pronunciation_dictionary.client import PronunciationDictionaryClient
+from .workspace.client import WorkspaceClient
+from .conversational_ai.client import ConversationalAiClient
from .core.client_wrapper import AsyncClientWrapper
from .history.client import AsyncHistoryClient
from .text_to_sound_effects.client import AsyncTextToSoundEffectsClient
@@ -37,11 +38,12 @@
from .projects.client import AsyncProjectsClient
from .chapters.client import AsyncChaptersClient
from .dubbing.client import AsyncDubbingClient
-from .workspace.client import AsyncWorkspaceClient
from .models.client import AsyncModelsClient
from .audio_native.client import AsyncAudioNativeClient
from .usage.client import AsyncUsageClient
from .pronunciation_dictionary.client import AsyncPronunciationDictionaryClient
+from .workspace.client import AsyncWorkspaceClient
+from .conversational_ai.client import AsyncConversationalAiClient
class BaseElevenLabs:
@@ -115,11 +117,12 @@ def __init__(
self.projects = ProjectsClient(client_wrapper=self._client_wrapper)
self.chapters = ChaptersClient(client_wrapper=self._client_wrapper)
self.dubbing = DubbingClient(client_wrapper=self._client_wrapper)
- self.workspace = WorkspaceClient(client_wrapper=self._client_wrapper)
self.models = ModelsClient(client_wrapper=self._client_wrapper)
self.audio_native = AudioNativeClient(client_wrapper=self._client_wrapper)
self.usage = UsageClient(client_wrapper=self._client_wrapper)
self.pronunciation_dictionary = PronunciationDictionaryClient(client_wrapper=self._client_wrapper)
+ self.workspace = WorkspaceClient(client_wrapper=self._client_wrapper)
+ self.conversational_ai = ConversationalAiClient(client_wrapper=self._client_wrapper)
class AsyncBaseElevenLabs:
@@ -193,11 +196,12 @@ def __init__(
self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper)
self.chapters = AsyncChaptersClient(client_wrapper=self._client_wrapper)
self.dubbing = AsyncDubbingClient(client_wrapper=self._client_wrapper)
- self.workspace = AsyncWorkspaceClient(client_wrapper=self._client_wrapper)
self.models = AsyncModelsClient(client_wrapper=self._client_wrapper)
self.audio_native = AsyncAudioNativeClient(client_wrapper=self._client_wrapper)
self.usage = AsyncUsageClient(client_wrapper=self._client_wrapper)
self.pronunciation_dictionary = AsyncPronunciationDictionaryClient(client_wrapper=self._client_wrapper)
+ self.workspace = AsyncWorkspaceClient(client_wrapper=self._client_wrapper)
+ self.conversational_ai = AsyncConversationalAiClient(client_wrapper=self._client_wrapper)
def _get_base_url(*, base_url: typing.Optional[str] = None, environment: ElevenLabsEnvironment) -> str:
diff --git a/src/elevenlabs/conversational_ai/__init__.py b/src/elevenlabs/conversational_ai/__init__.py
new file mode 100644
index 0000000..a05e4b5
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/__init__.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+)
+
+__all__ = [
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored",
+]
diff --git a/src/elevenlabs/conversational_ai/client.py b/src/elevenlabs/conversational_ai/client.py
new file mode 100644
index 0000000..da92c7f
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/client.py
@@ -0,0 +1,2209 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from ..core.request_options import RequestOptions
+from ..types.conversation_signed_url_response_model import ConversationSignedUrlResponseModel
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.conversational_config import ConversationalConfig
+from ..types.agent_platform_settings import AgentPlatformSettings
+from ..types.create_agent_response_model import CreateAgentResponseModel
+from ..core.serialization import convert_and_respect_annotation_metadata
+from ..types.get_agent_response_model import GetAgentResponseModel
+from ..core.jsonable_encoder import jsonable_encoder
+from .types.body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item import (
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
+)
+from ..types.get_agent_embed_response_model import GetAgentEmbedResponseModel
+from ..types.get_agent_link_response_model import GetAgentLinkResponseModel
+from .. import core
+from ..types.post_agent_avatar_response_model import PostAgentAvatarResponseModel
+from ..types.get_knowledge_base_reponse_model import GetKnowledgeBaseReponseModel
+from ..types.add_agent_secret_response_model import AddAgentSecretResponseModel
+from ..types.add_knowledge_base_response_model import AddKnowledgeBaseResponseModel
+from ..types.get_agents_page_response_model import GetAgentsPageResponseModel
+from ..types.evaluation_success_result import EvaluationSuccessResult
+from ..types.get_conversations_page_response_model import GetConversationsPageResponseModel
+from ..types.get_conversation_response_model import GetConversationResponseModel
+from ..core.client_wrapper import AsyncClientWrapper
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class ConversationalAiClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def get_signed_url(
+ self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ConversationSignedUrlResponseModel:
+ """
+ Get a signed url to start a conversation with an agent with an agent that requires authorization
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of the agent you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ConversationSignedUrlResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_signed_url(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/conversation/get_signed_url",
+ method="GET",
+ params={
+ "agent_id": agent_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ ConversationSignedUrlResponseModel,
+ construct_type(
+ type_=ConversationSignedUrlResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_agent(
+ self,
+ *,
+ conversation_config: ConversationalConfig,
+ platform_settings: typing.Optional[AgentPlatformSettings] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentResponseModel:
+ """
+ Create an agent from a config object
+
+ Parameters
+ ----------
+ conversation_config : ConversationalConfig
+ Conversation configuration for an agent
+
+ platform_settings : typing.Optional[AgentPlatformSettings]
+ Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+ name : typing.Optional[str]
+ A name to make the agent easier to find
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ConversationalConfig, ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.create_agent(
+ conversation_config=ConversationalConfig(),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/agents/create",
+ method="POST",
+ json={
+ "conversation_config": convert_and_respect_annotation_metadata(
+ object_=conversation_config, annotation=ConversationalConfig, direction="write"
+ ),
+ "platform_settings": convert_and_respect_annotation_metadata(
+ object_=platform_settings, annotation=AgentPlatformSettings, direction="write"
+ ),
+ "name": name,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ CreateAgentResponseModel,
+ construct_type(
+ type_=CreateAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_agent(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAgentResponseModel:
+ """
+ Retrieve config for an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentResponseModel,
+ construct_type(
+ type_=GetAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_agent(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Dict[str, str]:
+ """
+ Delete an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Dict[str, str]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.delete_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Dict[str, str],
+ construct_type(
+ type_=typing.Dict[str, str], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_agent(
+ self,
+ agent_id: str,
+ *,
+ conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ secrets: typing.Optional[
+ typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]
+ ] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentResponseModel:
+ """
+ Patches an Agent settings
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Conversation configuration for an agent
+
+ platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+ secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]]
+ A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones
+
+ name : typing.Optional[str]
+ A name to make the agent easier to find
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.update_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="PATCH",
+ json={
+ "conversation_config": conversation_config,
+ "platform_settings": platform_settings,
+ "secrets": convert_and_respect_annotation_metadata(
+ object_=secrets,
+ annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem],
+ direction="write",
+ ),
+ "name": name,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentResponseModel,
+ construct_type(
+ type_=GetAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_widget(
+ self,
+ agent_id: str,
+ *,
+ conversation_signature: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentEmbedResponseModel:
+ """
+ Retrieve the widget configuration for an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ conversation_signature : typing.Optional[str]
+ An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentEmbedResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_widget(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget",
+ method="GET",
+ params={
+ "conversation_signature": conversation_signature,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentEmbedResponseModel,
+ construct_type(
+ type_=GetAgentEmbedResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_link(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAgentLinkResponseModel:
+ """
+ Get the current link used to share the agent with others
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentLinkResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_link(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/link",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentLinkResponseModel,
+ construct_type(
+ type_=GetAgentLinkResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def post_avatar(
+ self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None
+ ) -> PostAgentAvatarResponseModel:
+ """
+ Sets the avatar for an agent displayed in the widget
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ avatar_file : core.File
+ See core.File for more documentation
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PostAgentAvatarResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.post_avatar(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar",
+ method="POST",
+ data={},
+ files={
+ "avatar_file": avatar_file,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ PostAgentAvatarResponseModel,
+ construct_type(
+ type_=PostAgentAvatarResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_knowledge_base_document(
+ self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetKnowledgeBaseReponseModel:
+ """
+ Get details about a specific documentation making up the agent's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ documentation_id : str
+ The id of a document from the agent's knowledge base. This is returned on document addition.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetKnowledgeBaseReponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_knowledge_base_document(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/knowledge-base/{jsonable_encoder(documentation_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetKnowledgeBaseReponseModel,
+ construct_type(
+ type_=GetKnowledgeBaseReponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_agent_secret(
+ self, agent_id: str, *, name: str, secret_value: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AddAgentSecretResponseModel:
+ """
+ Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ name : str
+ A name to help identify a particular agent secret
+
+ secret_value : str
+ A value to be encrypted and used by the agent
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddAgentSecretResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.add_agent_secret(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+ secret_value="secret_value",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-secret",
+ method="POST",
+ json={
+ "name": name,
+ "secret_value": secret_value,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddAgentSecretResponseModel,
+ construct_type(
+ type_=AddAgentSecretResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_knowledge_base_document(
+ self,
+ agent_id: str,
+ *,
+ url: typing.Optional[str] = OMIT,
+ file: typing.Optional[core.File] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AddKnowledgeBaseResponseModel:
+ """
+ Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ url : typing.Optional[str]
+ URL to a page of documentation that the agent will have access to in order to interact with users.
+
+ file : typing.Optional[core.File]
+ See core.File for more documentation
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddKnowledgeBaseResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.create_knowledge_base_document(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-to-knowledge-base",
+ method="POST",
+ data={
+ "url": url,
+ },
+ files={
+ "file": file,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddKnowledgeBaseResponseModel,
+ construct_type(
+ type_=AddKnowledgeBaseResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_agents(
+ self,
+ *,
+ cursor: typing.Optional[str] = None,
+ page_size: typing.Optional[int] = None,
+ search: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentsPageResponseModel:
+ """
+ Returns a page of your agents and their metadata.
+
+ Parameters
+ ----------
+ cursor : typing.Optional[str]
+ Used for fetching next page. Cursor is returned in the response.
+
+ page_size : typing.Optional[int]
+ How many Agents to return at maximum. Can not exceed 100, defaults to 30.
+
+ search : typing.Optional[str]
+ Search by agents name.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentsPageResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_agents()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/agents",
+ method="GET",
+ params={
+ "cursor": cursor,
+ "page_size": page_size,
+ "search": search,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentsPageResponseModel,
+ construct_type(
+ type_=GetAgentsPageResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_conversations(
+ self,
+ *,
+ cursor: typing.Optional[str] = None,
+ agent_id: typing.Optional[str] = None,
+ call_successful: typing.Optional[EvaluationSuccessResult] = None,
+ page_size: typing.Optional[int] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetConversationsPageResponseModel:
+ """
+ Get all conversations of agents that user owns. With option to restrict to a specific agent.
+
+ Parameters
+ ----------
+ cursor : typing.Optional[str]
+ Used for fetching next page. Cursor is returned in the response.
+
+ agent_id : typing.Optional[str]
+ The id of the agent you're taking the action on.
+
+ call_successful : typing.Optional[EvaluationSuccessResult]
+ The result of the success evaluation
+
+ page_size : typing.Optional[int]
+ How many conversations to return at maximum. Can not exceed 100, defaults to 30.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConversationsPageResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_conversations(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/conversations",
+ method="GET",
+ params={
+ "cursor": cursor,
+ "agent_id": agent_id,
+ "call_successful": call_successful,
+ "page_size": page_size,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConversationsPageResponseModel,
+ construct_type(
+ type_=GetConversationsPageResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_conversation(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetConversationResponseModel:
+ """
+ Get the details of a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConversationResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConversationResponseModel,
+ construct_type(
+ type_=GetConversationResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_conversation_audio(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Get the audio recording of a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_conversation_audio(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncConversationalAiClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def get_signed_url(
+ self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> ConversationSignedUrlResponseModel:
+ """
+ Get a signed url to start a conversation with an agent with an agent that requires authorization
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of the agent you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ConversationSignedUrlResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_signed_url(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/conversation/get_signed_url",
+ method="GET",
+ params={
+ "agent_id": agent_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ ConversationSignedUrlResponseModel,
+ construct_type(
+ type_=ConversationSignedUrlResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_agent(
+ self,
+ *,
+ conversation_config: ConversationalConfig,
+ platform_settings: typing.Optional[AgentPlatformSettings] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentResponseModel:
+ """
+ Create an agent from a config object
+
+ Parameters
+ ----------
+ conversation_config : ConversationalConfig
+ Conversation configuration for an agent
+
+ platform_settings : typing.Optional[AgentPlatformSettings]
+ Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+ name : typing.Optional[str]
+ A name to make the agent easier to find
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs, ConversationalConfig
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.create_agent(
+ conversation_config=ConversationalConfig(),
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/agents/create",
+ method="POST",
+ json={
+ "conversation_config": convert_and_respect_annotation_metadata(
+ object_=conversation_config, annotation=ConversationalConfig, direction="write"
+ ),
+ "platform_settings": convert_and_respect_annotation_metadata(
+ object_=platform_settings, annotation=AgentPlatformSettings, direction="write"
+ ),
+ "name": name,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ CreateAgentResponseModel,
+ construct_type(
+ type_=CreateAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_agent(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAgentResponseModel:
+ """
+ Retrieve config for an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentResponseModel,
+ construct_type(
+ type_=GetAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_agent(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Dict[str, str]:
+ """
+ Delete an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Dict[str, str]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.delete_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Dict[str, str],
+ construct_type(
+ type_=typing.Dict[str, str], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_agent(
+ self,
+ agent_id: str,
+ *,
+ conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ secrets: typing.Optional[
+ typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]
+ ] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentResponseModel:
+ """
+ Patches an Agent settings
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Conversation configuration for an agent
+
+ platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+ secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]]
+ A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones
+
+ name : typing.Optional[str]
+ A name to make the agent easier to find
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.update_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}",
+ method="PATCH",
+ json={
+ "conversation_config": conversation_config,
+ "platform_settings": platform_settings,
+ "secrets": convert_and_respect_annotation_metadata(
+ object_=secrets,
+ annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem],
+ direction="write",
+ ),
+ "name": name,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentResponseModel,
+ construct_type(
+ type_=GetAgentResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_widget(
+ self,
+ agent_id: str,
+ *,
+ conversation_signature: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentEmbedResponseModel:
+ """
+ Retrieve the widget configuration for an agent
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ conversation_signature : typing.Optional[str]
+ An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentEmbedResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_widget(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget",
+ method="GET",
+ params={
+ "conversation_signature": conversation_signature,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentEmbedResponseModel,
+ construct_type(
+ type_=GetAgentEmbedResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_link(
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAgentLinkResponseModel:
+ """
+ Get the current link used to share the agent with others
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentLinkResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_link(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/link",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentLinkResponseModel,
+ construct_type(
+ type_=GetAgentLinkResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def post_avatar(
+ self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None
+ ) -> PostAgentAvatarResponseModel:
+ """
+ Sets the avatar for an agent displayed in the widget
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ avatar_file : core.File
+ See core.File for more documentation
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PostAgentAvatarResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.post_avatar(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar",
+ method="POST",
+ data={},
+ files={
+ "avatar_file": avatar_file,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ PostAgentAvatarResponseModel,
+ construct_type(
+ type_=PostAgentAvatarResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_knowledge_base_document(
+ self, agent_id: str, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetKnowledgeBaseReponseModel:
+ """
+ Get details about a specific documentation making up the agent's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ documentation_id : str
+ The id of a document from the agent's knowledge base. This is returned on document addition.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetKnowledgeBaseReponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_knowledge_base_document(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/knowledge-base/{jsonable_encoder(documentation_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetKnowledgeBaseReponseModel,
+ construct_type(
+ type_=GetKnowledgeBaseReponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_agent_secret(
+ self, agent_id: str, *, name: str, secret_value: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AddAgentSecretResponseModel:
+ """
+ Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ name : str
+ A name to help identify a particular agent secret
+
+ secret_value : str
+ A value to be encrypted and used by the agent
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddAgentSecretResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.add_agent_secret(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ name="name",
+ secret_value="secret_value",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-secret",
+ method="POST",
+ json={
+ "name": name,
+ "secret_value": secret_value,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddAgentSecretResponseModel,
+ construct_type(
+ type_=AddAgentSecretResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_knowledge_base_document(
+ self,
+ agent_id: str,
+ *,
+ url: typing.Optional[str] = OMIT,
+ file: typing.Optional[core.File] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AddKnowledgeBaseResponseModel:
+ """
+ Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+
+ Parameters
+ ----------
+ agent_id : str
+ The id of an agent. This is returned on agent creation.
+
+ url : typing.Optional[str]
+ URL to a page of documentation that the agent will have access to in order to interact with users.
+
+ file : typing.Optional[core.File]
+ See core.File for more documentation
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AddKnowledgeBaseResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.create_knowledge_base_document(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/agents/{jsonable_encoder(agent_id)}/add-to-knowledge-base",
+ method="POST",
+ data={
+ "url": url,
+ },
+ files={
+ "file": file,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ AddKnowledgeBaseResponseModel,
+ construct_type(
+ type_=AddKnowledgeBaseResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_agents(
+ self,
+ *,
+ cursor: typing.Optional[str] = None,
+ page_size: typing.Optional[int] = None,
+ search: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetAgentsPageResponseModel:
+ """
+ Returns a page of your agents and their metadata.
+
+ Parameters
+ ----------
+ cursor : typing.Optional[str]
+ Used for fetching next page. Cursor is returned in the response.
+
+ page_size : typing.Optional[int]
+ How many Agents to return at maximum. Can not exceed 100, defaults to 30.
+
+ search : typing.Optional[str]
+ Search by agents name.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAgentsPageResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_agents()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/agents",
+ method="GET",
+ params={
+ "cursor": cursor,
+ "page_size": page_size,
+ "search": search,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAgentsPageResponseModel,
+ construct_type(
+ type_=GetAgentsPageResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_conversations(
+ self,
+ *,
+ cursor: typing.Optional[str] = None,
+ agent_id: typing.Optional[str] = None,
+ call_successful: typing.Optional[EvaluationSuccessResult] = None,
+ page_size: typing.Optional[int] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetConversationsPageResponseModel:
+ """
+ Get all conversations of agents that user owns. With option to restrict to a specific agent.
+
+ Parameters
+ ----------
+ cursor : typing.Optional[str]
+ Used for fetching next page. Cursor is returned in the response.
+
+ agent_id : typing.Optional[str]
+ The id of the agent you're taking the action on.
+
+ call_successful : typing.Optional[EvaluationSuccessResult]
+ The result of the success evaluation
+
+ page_size : typing.Optional[int]
+ How many conversations to return at maximum. Can not exceed 100, defaults to 30.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConversationsPageResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_conversations(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/conversations",
+ method="GET",
+ params={
+ "cursor": cursor,
+ "agent_id": agent_id,
+ "call_successful": call_successful,
+ "page_size": page_size,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConversationsPageResponseModel,
+ construct_type(
+ type_=GetConversationsPageResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_conversation(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetConversationResponseModel:
+ """
+ Get the details of a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConversationResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConversationResponseModel,
+ construct_type(
+ type_=GetConversationResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_conversation_audio(
+ self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Get the audio recording of a particular conversation
+
+ Parameters
+ ----------
+ conversation_id : str
+ The id of the conversation you're taking the action on.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_conversation_audio(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/elevenlabs/conversational_ai/types/__init__.py b/src/elevenlabs/conversational_ai/types/__init__.py
new file mode 100644
index 0000000..3d467b3
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/types/__init__.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item import (
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+)
+
+__all__ = [
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New",
+ "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored",
+]
diff --git a/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py b/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py
new file mode 100644
index 0000000..ffcbbd7
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ...core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ...core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ...core.unchecked_base_model import UnionMetadata
+
+
+class BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New(UncheckedBaseModel):
+ type: typing.Literal["new"] = "new"
+ name: str
+ value: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored(UncheckedBaseModel):
+ type: typing.Literal["stored"] = "stored"
+ secret_id: str
+ name: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem = typing_extensions.Annotated[
+ typing.Union[
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+ ],
+ UnionMetadata(discriminant="type"),
+]
diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py
index 043753f..6a14c53 100644
--- a/src/elevenlabs/core/client_wrapper.py
+++ b/src/elevenlabs/core/client_wrapper.py
@@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "elevenlabs",
- "X-Fern-SDK-Version": "1.13.0",
+ "X-Fern-SDK-Version": "1.13.1",
}
if self._api_key is not None:
headers["xi-api-key"] = self._api_key
diff --git a/src/elevenlabs/dubbing/client.py b/src/elevenlabs/dubbing/client.py
index f9e3e4a..5a42929 100644
--- a/src/elevenlabs/dubbing/client.py
+++ b/src/elevenlabs/dubbing/client.py
@@ -287,18 +287,6 @@ def get_dubbed_file(
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.dubbing.get_dubbed_file(
- dubbing_id="string",
- language_code="string",
- )
"""
with self._client_wrapper.httpx_client.stream(
f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}",
@@ -694,26 +682,6 @@ async def get_dubbed_file(
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.dubbing.get_dubbed_file(
- dubbing_id="string",
- language_code="string",
- )
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}",
diff --git a/src/elevenlabs/history/__init__.py b/src/elevenlabs/history/__init__.py
index f3ea265..5c94f16 100644
--- a/src/elevenlabs/history/__init__.py
+++ b/src/elevenlabs/history/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import HistoryGetAllRequestSource
+
+__all__ = ["HistoryGetAllRequestSource"]
diff --git a/src/elevenlabs/history/client.py b/src/elevenlabs/history/client.py
index 6bb1f6f..cd7367b 100644
--- a/src/elevenlabs/history/client.py
+++ b/src/elevenlabs/history/client.py
@@ -2,6 +2,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
+from .types.history_get_all_request_source import HistoryGetAllRequestSource
from ..core.request_options import RequestOptions
from ..types.get_speech_history_response import GetSpeechHistoryResponse
from ..core.unchecked_base_model import construct_type
@@ -27,6 +28,8 @@ def get_all(
page_size: typing.Optional[int] = None,
start_after_history_item_id: typing.Optional[str] = None,
voice_id: typing.Optional[str] = None,
+ search: typing.Optional[str] = None,
+ source: typing.Optional[HistoryGetAllRequestSource] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetSpeechHistoryResponse:
"""
@@ -43,6 +46,12 @@ def get_all(
voice_id : typing.Optional[str]
Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs.
+ search : typing.Optional[str]
+ search term used for filtering
+
+ source : typing.Optional[HistoryGetAllRequestSource]
+ Source of the generated history item
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -70,6 +79,8 @@ def get_all(
"page_size": page_size,
"start_after_history_item_id": start_after_history_item_id,
"voice_id": voice_id,
+ "search": search,
+ "source": source,
},
request_options=request_options,
)
@@ -347,6 +358,8 @@ async def get_all(
page_size: typing.Optional[int] = None,
start_after_history_item_id: typing.Optional[str] = None,
voice_id: typing.Optional[str] = None,
+ search: typing.Optional[str] = None,
+ source: typing.Optional[HistoryGetAllRequestSource] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetSpeechHistoryResponse:
"""
@@ -363,6 +376,12 @@ async def get_all(
voice_id : typing.Optional[str]
Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs.
+ search : typing.Optional[str]
+ search term used for filtering
+
+ source : typing.Optional[HistoryGetAllRequestSource]
+ Source of the generated history item
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -398,6 +417,8 @@ async def main() -> None:
"page_size": page_size,
"start_after_history_item_id": start_after_history_item_id,
"voice_id": voice_id,
+ "search": search,
+ "source": source,
},
request_options=request_options,
)
diff --git a/src/elevenlabs/history/types/__init__.py b/src/elevenlabs/history/types/__init__.py
new file mode 100644
index 0000000..c1e5069
--- /dev/null
+++ b/src/elevenlabs/history/types/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .history_get_all_request_source import HistoryGetAllRequestSource
+
+__all__ = ["HistoryGetAllRequestSource"]
diff --git a/src/elevenlabs/history/types/history_get_all_request_source.py b/src/elevenlabs/history/types/history_get_all_request_source.py
new file mode 100644
index 0000000..fc4371d
--- /dev/null
+++ b/src/elevenlabs/history/types/history_get_all_request_source.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+HistoryGetAllRequestSource = typing.Union[typing.Literal["TTS", "STS"], typing.Any]
diff --git a/src/elevenlabs/projects/__init__.py b/src/elevenlabs/projects/__init__.py
index 7bdfec7..749f44b 100644
--- a/src/elevenlabs/projects/__init__.py
+++ b/src/elevenlabs/projects/__init__.py
@@ -1,5 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import ProjectsAddRequestTargetAudience
+from .types import ProjectsAddRequestFiction, ProjectsAddRequestTargetAudience
-__all__ = ["ProjectsAddRequestTargetAudience"]
+__all__ = ["ProjectsAddRequestFiction", "ProjectsAddRequestTargetAudience"]
diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py
index 5a944a8..9c249e1 100644
--- a/src/elevenlabs/projects/client.py
+++ b/src/elevenlabs/projects/client.py
@@ -11,6 +11,7 @@
from ..core.api_error import ApiError
from .. import core
from .types.projects_add_request_target_audience import ProjectsAddRequestTargetAudience
+from .types.projects_add_request_fiction import ProjectsAddRequestFiction
from ..types.add_project_response_model import AddProjectResponseModel
from ..types.project_extended_response_model import ProjectExtendedResponseModel
from ..core.jsonable_encoder import jsonable_encoder
@@ -104,6 +105,8 @@ def add(
acx_volume_normalization: typing.Optional[bool] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT,
+ fiction: typing.Optional[ProjectsAddRequestFiction] = OMIT,
+ quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddProjectResponseModel:
"""
@@ -176,6 +179,12 @@ def add(
pronunciation_dictionary_locators : typing.Optional[typing.List[str]]
A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+ fiction : typing.Optional[ProjectsAddRequestFiction]
+ An optional fiction of the project.
+
+ quality_check_on : typing.Optional[bool]
+ Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -221,6 +230,8 @@ def add(
"acx_volume_normalization": acx_volume_normalization,
"volume_normalization": volume_normalization,
"pronunciation_dictionary_locators": pronunciation_dictionary_locators,
+ "fiction": fiction,
+ "quality_check_on": quality_check_on,
},
files={
"from_document": from_document,
@@ -322,6 +333,7 @@ def edit_basic_project_info(
author: typing.Optional[str] = OMIT,
isbn_number: typing.Optional[str] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
+ quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EditProjectResponseModel:
"""
@@ -353,6 +365,9 @@ def edit_basic_project_info(
volume_normalization : typing.Optional[bool]
When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+ quality_check_on : typing.Optional[bool]
+ Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -386,6 +401,7 @@ def edit_basic_project_info(
"author": author,
"isbn_number": isbn_number,
"volume_normalization": volume_normalization,
+ "quality_check_on": quality_check_on,
},
request_options=request_options,
omit=OMIT,
@@ -620,19 +636,6 @@ def stream_audio(
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.projects.stream_audio(
- project_id="string",
- project_snapshot_id="string",
- convert_to_mpeg=True,
- )
"""
with self._client_wrapper.httpx_client.stream(
f"v1/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream",
@@ -965,6 +968,8 @@ async def add(
acx_volume_normalization: typing.Optional[bool] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT,
+ fiction: typing.Optional[ProjectsAddRequestFiction] = OMIT,
+ quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddProjectResponseModel:
"""
@@ -1037,6 +1042,12 @@ async def add(
pronunciation_dictionary_locators : typing.Optional[typing.List[str]]
A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+ fiction : typing.Optional[ProjectsAddRequestFiction]
+ An optional fiction of the project.
+
+ quality_check_on : typing.Optional[bool]
+ Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1090,6 +1101,8 @@ async def main() -> None:
"acx_volume_normalization": acx_volume_normalization,
"volume_normalization": volume_normalization,
"pronunciation_dictionary_locators": pronunciation_dictionary_locators,
+ "fiction": fiction,
+ "quality_check_on": quality_check_on,
},
files={
"from_document": from_document,
@@ -1199,6 +1212,7 @@ async def edit_basic_project_info(
author: typing.Optional[str] = OMIT,
isbn_number: typing.Optional[str] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
+ quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EditProjectResponseModel:
"""
@@ -1230,6 +1244,9 @@ async def edit_basic_project_info(
volume_normalization : typing.Optional[bool]
When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
+ quality_check_on : typing.Optional[bool]
+ Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1271,6 +1288,7 @@ async def main() -> None:
"author": author,
"isbn_number": isbn_number,
"volume_normalization": volume_normalization,
+ "quality_check_on": quality_check_on,
},
request_options=request_options,
omit=OMIT,
@@ -1529,27 +1547,6 @@ async def stream_audio(
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.projects.stream_audio(
- project_id="string",
- project_snapshot_id="string",
- convert_to_mpeg=True,
- )
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
f"v1/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream",
diff --git a/src/elevenlabs/projects/types/__init__.py b/src/elevenlabs/projects/types/__init__.py
index 42c21d4..e0531ce 100644
--- a/src/elevenlabs/projects/types/__init__.py
+++ b/src/elevenlabs/projects/types/__init__.py
@@ -1,5 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
+from .projects_add_request_fiction import ProjectsAddRequestFiction
from .projects_add_request_target_audience import ProjectsAddRequestTargetAudience
-__all__ = ["ProjectsAddRequestTargetAudience"]
+__all__ = ["ProjectsAddRequestFiction", "ProjectsAddRequestTargetAudience"]
diff --git a/src/elevenlabs/projects/types/projects_add_request_fiction.py b/src/elevenlabs/projects/types/projects_add_request_fiction.py
new file mode 100644
index 0000000..a5232ff
--- /dev/null
+++ b/src/elevenlabs/projects/types/projects_add_request_fiction.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectsAddRequestFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any]
diff --git a/src/elevenlabs/speech_to_speech/client.py b/src/elevenlabs/speech_to_speech/client.py
index 981ca92..d4e661b 100644
--- a/src/elevenlabs/speech_to_speech/client.py
+++ b/src/elevenlabs/speech_to_speech/client.py
@@ -63,7 +63,7 @@ def convert(
Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
remove_background_noise : typing.Optional[bool]
If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
@@ -75,20 +75,6 @@ def convert(
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.speech_to_speech.convert(
- voice_id="string",
- enable_logging=True,
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
- )
"""
with self._client_wrapper.httpx_client.stream(
f"v1/speech-to-speech/{jsonable_encoder(voice_id)}",
@@ -184,7 +170,7 @@ def convert_as_stream(
Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
remove_background_noise : typing.Optional[bool]
If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
@@ -196,20 +182,6 @@ def convert_as_stream(
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.speech_to_speech.convert_as_stream(
- voice_id="string",
- enable_logging="0",
- optimize_streaming_latency="mp3_22050_32",
- output_format="string",
- )
"""
with self._client_wrapper.httpx_client.stream(
f"v1/speech-to-speech/{jsonable_encoder(voice_id)}/stream",
@@ -299,7 +271,7 @@ async def convert(
Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
remove_background_noise : typing.Optional[bool]
If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
@@ -311,28 +283,6 @@ async def convert(
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.speech_to_speech.convert(
- voice_id="string",
- enable_logging=True,
- optimize_streaming_latency="0",
- output_format="mp3_22050_32",
- )
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
f"v1/speech-to-speech/{jsonable_encoder(voice_id)}",
@@ -428,7 +378,7 @@ async def convert_as_stream(
Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
remove_background_noise : typing.Optional[bool]
If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
@@ -440,28 +390,6 @@ async def convert_as_stream(
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.speech_to_speech.convert_as_stream(
- voice_id="string",
- enable_logging="0",
- optimize_streaming_latency="mp3_22050_32",
- output_format="string",
- )
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
f"v1/speech-to-speech/{jsonable_encoder(voice_id)}/stream",
diff --git a/src/elevenlabs/text_to_sound_effects/client.py b/src/elevenlabs/text_to_sound_effects/client.py
index af99463..cbf8c36 100644
--- a/src/elevenlabs/text_to_sound_effects/client.py
+++ b/src/elevenlabs/text_to_sound_effects/client.py
@@ -47,19 +47,6 @@ def convert(
------
typing.Iterator[bytes]
Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.text_to_sound_effects.convert(
- text="string",
- duration_seconds=1.1,
- prompt_influence=1.1,
- )
"""
with self._client_wrapper.httpx_client.stream(
"v1/sound-generation",
@@ -128,27 +115,6 @@ async def convert(
------
typing.AsyncIterator[bytes]
Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.text_to_sound_effects.convert(
- text="string",
- duration_seconds=1.1,
- prompt_influence=1.1,
- )
-
-
- asyncio.run(main())
"""
async with self._client_wrapper.httpx_client.stream(
"v1/sound-generation",
diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py
index 6056554..d58c083 100644
--- a/src/elevenlabs/text_to_speech/client.py
+++ b/src/elevenlabs/text_to_speech/client.py
@@ -94,7 +94,7 @@ def convert(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -252,7 +252,7 @@ def convert_with_timestamps(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -405,7 +405,7 @@ def convert_as_stream(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -563,7 +563,7 @@ def stream_with_timestamps(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -714,7 +714,7 @@ async def convert(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -880,7 +880,7 @@ async def convert_with_timestamps(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -1041,7 +1041,7 @@ async def convert_as_stream(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
@@ -1207,7 +1207,7 @@ async def stream_with_timestamps(
A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
seed : typing.Optional[int]
- If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
diff --git a/src/elevenlabs/text_to_voice/__init__.py b/src/elevenlabs/text_to_voice/__init__.py
index f3ea265..1a606e5 100644
--- a/src/elevenlabs/text_to_voice/__init__.py
+++ b/src/elevenlabs/text_to_voice/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import TextToVoiceCreatePreviewsRequestOutputFormat
+
+__all__ = ["TextToVoiceCreatePreviewsRequestOutputFormat"]
diff --git a/src/elevenlabs/text_to_voice/client.py b/src/elevenlabs/text_to_voice/client.py
index 5af3934..53a00fb 100644
--- a/src/elevenlabs/text_to_voice/client.py
+++ b/src/elevenlabs/text_to_voice/client.py
@@ -2,6 +2,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
+from .types.text_to_voice_create_previews_request_output_format import TextToVoiceCreatePreviewsRequestOutputFormat
from ..core.request_options import RequestOptions
from ..types.voice_previews_response_model import VoicePreviewsResponseModel
from ..core.unchecked_base_model import construct_type
@@ -21,7 +22,13 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def create_previews(
- self, *, voice_description: str, text: str, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ voice_description: str,
+ text: str,
+ output_format: typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] = None,
+ auto_generate_text: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> VoicePreviewsResponseModel:
"""
Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
@@ -34,6 +41,23 @@ def create_previews(
text : str
Text to generate, text length has to be between 100 and 1000.
+ output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]
+ Output format of the generated audio. Must be one of:
+ mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
+ mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
+ mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
+ mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
+ mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
+ mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
+ pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
+ pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
+ pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
+ pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
+ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+
+ auto_generate_text : typing.Optional[bool]
+ Whether to automatically generate a text suitable for the voice description.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -50,16 +74,20 @@ def create_previews(
api_key="YOUR_API_KEY",
)
client.text_to_voice.create_previews(
- voice_description="voice_description",
- text="text",
+ voice_description="A sassy little squeaky mouse",
+ text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.",
)
"""
_response = self._client_wrapper.httpx_client.request(
"v1/text-to-voice/create-previews",
method="POST",
+ params={
+ "output_format": output_format,
+ },
json={
"voice_description": voice_description,
"text": text,
+ "auto_generate_text": auto_generate_text,
},
request_options=request_options,
omit=OMIT,
@@ -134,9 +162,9 @@ def create_voice_from_preview(
api_key="YOUR_API_KEY",
)
client.text_to_voice.create_voice_from_preview(
- voice_name="voice_name",
- voice_description="voice_description",
- generated_voice_id="generated_voice_id",
+ voice_name="Little squeaky mouse",
+ voice_description="A sassy little squeaky mouse",
+ generated_voice_id="37HceQefKmEi3bGovXjL",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -182,7 +210,13 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def create_previews(
- self, *, voice_description: str, text: str, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ voice_description: str,
+ text: str,
+ output_format: typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] = None,
+ auto_generate_text: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> VoicePreviewsResponseModel:
"""
Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
@@ -195,6 +229,23 @@ async def create_previews(
text : str
Text to generate, text length has to be between 100 and 1000.
+ output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]
+ Output format of the generated audio. Must be one of:
+ mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps.
+ mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps.
+ mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps.
+ mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps.
+ mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps.
+ mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above.
+ pcm_16000 - PCM format (S16LE) with 16kHz sample rate.
+ pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate.
+ pcm_24000 - PCM format (S16LE) with 24kHz sample rate.
+ pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above.
+ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
+
+ auto_generate_text : typing.Optional[bool]
+ Whether to automatically generate a text suitable for the voice description.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -216,8 +267,8 @@ async def create_previews(
async def main() -> None:
await client.text_to_voice.create_previews(
- voice_description="voice_description",
- text="text",
+ voice_description="A sassy little squeaky mouse",
+ text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.",
)
@@ -226,9 +277,13 @@ async def main() -> None:
_response = await self._client_wrapper.httpx_client.request(
"v1/text-to-voice/create-previews",
method="POST",
+ params={
+ "output_format": output_format,
+ },
json={
"voice_description": voice_description,
"text": text,
+ "auto_generate_text": auto_generate_text,
},
request_options=request_options,
omit=OMIT,
@@ -308,9 +363,9 @@ async def create_voice_from_preview(
async def main() -> None:
await client.text_to_voice.create_voice_from_preview(
- voice_name="voice_name",
- voice_description="voice_description",
- generated_voice_id="generated_voice_id",
+ voice_name="Little squeaky mouse",
+ voice_description="A sassy little squeaky mouse",
+ generated_voice_id="37HceQefKmEi3bGovXjL",
)
diff --git a/src/elevenlabs/text_to_voice/types/__init__.py b/src/elevenlabs/text_to_voice/types/__init__.py
new file mode 100644
index 0000000..39c033b
--- /dev/null
+++ b/src/elevenlabs/text_to_voice/types/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .text_to_voice_create_previews_request_output_format import TextToVoiceCreatePreviewsRequestOutputFormat
+
+__all__ = ["TextToVoiceCreatePreviewsRequestOutputFormat"]
diff --git a/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py b/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py
new file mode 100644
index 0000000..6e6980f
--- /dev/null
+++ b/src/elevenlabs/text_to_voice/types/text_to_voice_create_previews_request_output_format.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TextToVoiceCreatePreviewsRequestOutputFormat = typing.Union[
+ typing.Literal[
+ "mp3_22050_32",
+ "mp3_44100_32",
+ "mp3_44100_64",
+ "mp3_44100_96",
+ "mp3_44100_128",
+ "mp3_44100_192",
+ "pcm_16000",
+ "pcm_22050",
+ "pcm_24000",
+ "pcm_44100",
+ "ulaw_8000",
+ ],
+ typing.Any,
+]
diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py
index 2363ce4..389cd77 100644
--- a/src/elevenlabs/types/__init__.py
+++ b/src/elevenlabs/types/__init__.py
@@ -1,25 +1,80 @@
# This file was auto-generated by Fern from our API Definition.
from .accent import Accent
+from .add_agent_secret_response_model import AddAgentSecretResponseModel
from .add_chapter_response_model import AddChapterResponseModel
+from .add_knowledge_base_response_model import AddKnowledgeBaseResponseModel
from .add_project_response_model import AddProjectResponseModel
from .add_pronunciation_dictionary_response_model import AddPronunciationDictionaryResponseModel
from .add_pronunciation_dictionary_rules_response_model import AddPronunciationDictionaryRulesResponseModel
from .add_voice_ivc_response_model import AddVoiceIvcResponseModel
from .add_voice_response_model import AddVoiceResponseModel
from .age import Age
+from .agent_ban import AgentBan
+from .agent_config import AgentConfig
+from .agent_config_override import AgentConfigOverride
+from .agent_metadata_response_model import AgentMetadataResponseModel
+from .agent_platform_settings import AgentPlatformSettings
+from .agent_summary_response_model import AgentSummaryResponseModel
+from .allowlist_item import AllowlistItem
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .array_json_schema_property_items import ArrayJsonSchemaPropertyItems
+from .asr_conversational_config import AsrConversationalConfig
+from .asr_input_format import AsrInputFormat
+from .asr_provider import AsrProvider
+from .asr_quality import AsrQuality
from .audio_native_create_project_response_model import AudioNativeCreateProjectResponseModel
from .audio_native_get_embed_code_response_model import AudioNativeGetEmbedCodeResponseModel
+from .auth_settings import AuthSettings
+from .authorization_method import AuthorizationMethod
+from .ban_reason_type import BanReasonType
from .breakdown_types import BreakdownTypes
from .chapter_response import ChapterResponse
from .chapter_snapshot_response import ChapterSnapshotResponse
from .chapter_snapshots_response import ChapterSnapshotsResponse
from .chapter_state import ChapterState
from .chapter_statistics_response import ChapterStatisticsResponse
+from .client_event import ClientEvent
+from .client_tool_config import ClientToolConfig
+from .conv_ai_new_secret_config import ConvAiNewSecretConfig
+from .conv_ai_secret_locator import ConvAiSecretLocator
+from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig
+from .conversation_charging_common_model import ConversationChargingCommonModel
+from .conversation_config import ConversationConfig
+from .conversation_config_client_override import ConversationConfigClientOverride
+from .conversation_history_analysis_common_model import ConversationHistoryAnalysisCommonModel
+from .conversation_history_evaluation_criteria_result_common_model import (
+ ConversationHistoryEvaluationCriteriaResultCommonModel,
+)
+from .conversation_history_metadata_common_model import ConversationHistoryMetadataCommonModel
+from .conversation_history_transcript_common_model import ConversationHistoryTranscriptCommonModel
+from .conversation_history_transcript_common_model_role import ConversationHistoryTranscriptCommonModelRole
+from .conversation_history_transcript_tool_call_common_model import ConversationHistoryTranscriptToolCallCommonModel
+from .conversation_history_transcript_tool_result_common_model import ConversationHistoryTranscriptToolResultCommonModel
+from .conversation_initiation_client_data import ConversationInitiationClientData
+from .conversation_signed_url_response_model import ConversationSignedUrlResponseModel
+from .conversation_summary_response_model import ConversationSummaryResponseModel
+from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus
+from .conversation_token_db_model import ConversationTokenDbModel
+from .conversation_token_purpose import ConversationTokenPurpose
+from .conversational_config import ConversationalConfig
+from .create_agent_response_model import CreateAgentResponseModel
from .currency import Currency
+from .custom_llm import CustomLlm
+from .data_collection_result_common_model import DataCollectionResultCommonModel
from .do_dubbing_response import DoDubbingResponse
from .dubbing_metadata_response import DubbingMetadataResponse
from .edit_project_response_model import EditProjectResponseModel
+from .embed_config import EmbedConfig
+from .embed_config_avatar import (
+ EmbedConfigAvatar,
+ EmbedConfigAvatar_Image,
+ EmbedConfigAvatar_Orb,
+ EmbedConfigAvatar_Url,
+)
+from .embed_variant import EmbedVariant
+from .evaluation_settings import EvaluationSettings
+from .evaluation_success_result import EvaluationSuccessResult
from .extended_subscription_response_model_billing_period import ExtendedSubscriptionResponseModelBillingPeriod
from .extended_subscription_response_model_character_refresh_period import (
ExtendedSubscriptionResponseModelCharacterRefreshPeriod,
@@ -29,7 +84,16 @@
from .fine_tuning_response import FineTuningResponse
from .fine_tuning_response_model_state_value import FineTuningResponseModelStateValue
from .gender import Gender
+from .get_agent_embed_response_model import GetAgentEmbedResponseModel
+from .get_agent_link_response_model import GetAgentLinkResponseModel
+from .get_agent_response_model import GetAgentResponseModel
+from .get_agents_page_response_model import GetAgentsPageResponseModel
from .get_chapters_response import GetChaptersResponse
+from .get_conversation_response_model import GetConversationResponseModel
+from .get_conversation_response_model_status import GetConversationResponseModelStatus
+from .get_conversations_page_response_model import GetConversationsPageResponseModel
+from .get_knowledge_base_reponse_model import GetKnowledgeBaseReponseModel
+from .get_knowledge_base_reponse_model_type import GetKnowledgeBaseReponseModelType
from .get_library_voices_response import GetLibraryVoicesResponse
from .get_projects_response import GetProjectsResponse
from .get_pronunciation_dictionaries_metadata_response_model import GetPronunciationDictionariesMetadataResponseModel
@@ -41,42 +105,61 @@
from .history_alignments_response_model import HistoryAlignmentsResponseModel
from .history_item import HistoryItem
from .http_validation_error import HttpValidationError
+from .image_avatar import ImageAvatar
from .invoice import Invoice
+from .knowledge_base_locator import KnowledgeBaseLocator
+from .knowledge_base_locator_type import KnowledgeBaseLocatorType
from .language_response import LanguageResponse
from .library_voice_response import LibraryVoiceResponse
from .library_voice_response_model_category import LibraryVoiceResponseModelCategory
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from .literal_json_schema_property_type import LiteralJsonSchemaPropertyType
+from .llm import Llm
from .manual_verification_file_response import ManualVerificationFileResponse
from .manual_verification_response import ManualVerificationResponse
from .model import Model
from .model_rates_response_model import ModelRatesResponseModel
from .model_response_model_concurrency_group import ModelResponseModelConcurrencyGroup
+from .object_json_schema_property import ObjectJsonSchemaProperty
+from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue
from .optimize_streaming_latency import OptimizeStreamingLatency
+from .orb_avatar import OrbAvatar
from .output_format import OutputFormat
+from .post_agent_avatar_response_model import PostAgentAvatarResponseModel
from .profile_page_response_model import ProfilePageResponseModel
from .project_extended_response_model import ProjectExtendedResponseModel
from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel
+from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization
+from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction
from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset
from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience
from .project_response import ProjectResponse
from .project_response_model_access_level import ProjectResponseModelAccessLevel
+from .project_response_model_fiction import ProjectResponseModelFiction
from .project_response_model_target_audience import ProjectResponseModelTargetAudience
from .project_snapshot_response import ProjectSnapshotResponse
from .project_snapshot_upload_response_model import ProjectSnapshotUploadResponseModel
from .project_snapshot_upload_response_model_status import ProjectSnapshotUploadResponseModelStatus
from .project_snapshots_response import ProjectSnapshotsResponse
from .project_state import ProjectState
+from .prompt_agent import PromptAgent
+from .prompt_agent_override import PromptAgentOverride
+from .prompt_agent_tools_item import PromptAgentToolsItem, PromptAgentToolsItem_Client, PromptAgentToolsItem_Webhook
+from .prompt_evaluation_criteria import PromptEvaluationCriteria
from .pronunciation_dictionary_alias_rule_request_model import PronunciationDictionaryAliasRuleRequestModel
from .pronunciation_dictionary_phoneme_rule_request_model import PronunciationDictionaryPhonemeRuleRequestModel
from .pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator
from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel
+from .pydantic_pronunciation_dictionary_version_locator import PydanticPronunciationDictionaryVersionLocator
+from .query_params_json_schema import QueryParamsJsonSchema
+from .reader_resource_response_model import ReaderResourceResponseModel
+from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType
from .recording_response import RecordingResponse
from .remove_pronunciation_dictionary_rules_response_model import RemovePronunciationDictionaryRulesResponseModel
from .review_status import ReviewStatus
from .speech_history_item_response import SpeechHistoryItemResponse
from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource
from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory
-from .sso_provider_response_model import SsoProviderResponseModel
-from .sso_provider_response_model_provider_type import SsoProviderResponseModelProviderType
from .subscription import Subscription
from .subscription_response import SubscriptionResponse
from .subscription_response_model_billing_period import SubscriptionResponseModelBillingPeriod
@@ -84,6 +167,14 @@
from .subscription_response_model_currency import SubscriptionResponseModelCurrency
from .subscription_status import SubscriptionStatus
from .text_to_speech_as_stream_request import TextToSpeechAsStreamRequest
+from .tts_conversational_config import TtsConversationalConfig
+from .tts_conversational_config_override import TtsConversationalConfigOverride
+from .tts_conversational_model import TtsConversationalModel
+from .tts_optimize_streaming_latency import TtsOptimizeStreamingLatency
+from .tts_output_format import TtsOutputFormat
+from .turn_config import TurnConfig
+from .turn_mode import TurnMode
+from .url_avatar import UrlAvatar
from .usage_characters_response_model import UsageCharactersResponseModel
from .user import User
from .validation_error import ValidationError
@@ -103,28 +194,83 @@
from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory
from .voice_sharing_state import VoiceSharingState
from .voice_verification_response import VoiceVerificationResponse
+from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig
+from .webhook_tool_api_schema_config_method import WebhookToolApiSchemaConfigMethod
+from .webhook_tool_api_schema_config_request_headers_value import WebhookToolApiSchemaConfigRequestHeadersValue
+from .webhook_tool_config import WebhookToolConfig
__all__ = [
"Accent",
+ "AddAgentSecretResponseModel",
"AddChapterResponseModel",
+ "AddKnowledgeBaseResponseModel",
"AddProjectResponseModel",
"AddPronunciationDictionaryResponseModel",
"AddPronunciationDictionaryRulesResponseModel",
"AddVoiceIvcResponseModel",
"AddVoiceResponseModel",
"Age",
+ "AgentBan",
+ "AgentConfig",
+ "AgentConfigOverride",
+ "AgentMetadataResponseModel",
+ "AgentPlatformSettings",
+ "AgentSummaryResponseModel",
+ "AllowlistItem",
+ "ArrayJsonSchemaProperty",
+ "ArrayJsonSchemaPropertyItems",
+ "AsrConversationalConfig",
+ "AsrInputFormat",
+ "AsrProvider",
+ "AsrQuality",
"AudioNativeCreateProjectResponseModel",
"AudioNativeGetEmbedCodeResponseModel",
+ "AuthSettings",
+ "AuthorizationMethod",
+ "BanReasonType",
"BreakdownTypes",
"ChapterResponse",
"ChapterSnapshotResponse",
"ChapterSnapshotsResponse",
"ChapterState",
"ChapterStatisticsResponse",
+ "ClientEvent",
+ "ClientToolConfig",
+ "ConvAiNewSecretConfig",
+ "ConvAiSecretLocator",
+ "ConvAiStoredSecretConfig",
+ "ConversationChargingCommonModel",
+ "ConversationConfig",
+ "ConversationConfigClientOverride",
+ "ConversationHistoryAnalysisCommonModel",
+ "ConversationHistoryEvaluationCriteriaResultCommonModel",
+ "ConversationHistoryMetadataCommonModel",
+ "ConversationHistoryTranscriptCommonModel",
+ "ConversationHistoryTranscriptCommonModelRole",
+ "ConversationHistoryTranscriptToolCallCommonModel",
+ "ConversationHistoryTranscriptToolResultCommonModel",
+ "ConversationInitiationClientData",
+ "ConversationSignedUrlResponseModel",
+ "ConversationSummaryResponseModel",
+ "ConversationSummaryResponseModelStatus",
+ "ConversationTokenDbModel",
+ "ConversationTokenPurpose",
+ "ConversationalConfig",
+ "CreateAgentResponseModel",
"Currency",
+ "CustomLlm",
+ "DataCollectionResultCommonModel",
"DoDubbingResponse",
"DubbingMetadataResponse",
"EditProjectResponseModel",
+ "EmbedConfig",
+ "EmbedConfigAvatar",
+ "EmbedConfigAvatar_Image",
+ "EmbedConfigAvatar_Orb",
+ "EmbedConfigAvatar_Url",
+ "EmbedVariant",
+ "EvaluationSettings",
+ "EvaluationSuccessResult",
"ExtendedSubscriptionResponseModelBillingPeriod",
"ExtendedSubscriptionResponseModelCharacterRefreshPeriod",
"ExtendedSubscriptionResponseModelCurrency",
@@ -132,7 +278,16 @@
"FineTuningResponse",
"FineTuningResponseModelStateValue",
"Gender",
+ "GetAgentEmbedResponseModel",
+ "GetAgentLinkResponseModel",
+ "GetAgentResponseModel",
+ "GetAgentsPageResponseModel",
"GetChaptersResponse",
+ "GetConversationResponseModel",
+ "GetConversationResponseModelStatus",
+ "GetConversationsPageResponseModel",
+ "GetKnowledgeBaseReponseModel",
+ "GetKnowledgeBaseReponseModelType",
"GetLibraryVoicesResponse",
"GetProjectsResponse",
"GetPronunciationDictionariesMetadataResponseModel",
@@ -144,42 +299,63 @@
"HistoryAlignmentsResponseModel",
"HistoryItem",
"HttpValidationError",
+ "ImageAvatar",
"Invoice",
+ "KnowledgeBaseLocator",
+ "KnowledgeBaseLocatorType",
"LanguageResponse",
"LibraryVoiceResponse",
"LibraryVoiceResponseModelCategory",
+ "LiteralJsonSchemaProperty",
+ "LiteralJsonSchemaPropertyType",
+ "Llm",
"ManualVerificationFileResponse",
"ManualVerificationResponse",
"Model",
"ModelRatesResponseModel",
"ModelResponseModelConcurrencyGroup",
+ "ObjectJsonSchemaProperty",
+ "ObjectJsonSchemaPropertyPropertiesValue",
"OptimizeStreamingLatency",
+ "OrbAvatar",
"OutputFormat",
+ "PostAgentAvatarResponseModel",
"ProfilePageResponseModel",
"ProjectExtendedResponseModel",
"ProjectExtendedResponseModelAccessLevel",
+ "ProjectExtendedResponseModelApplyTextNormalization",
+ "ProjectExtendedResponseModelFiction",
"ProjectExtendedResponseModelQualityPreset",
"ProjectExtendedResponseModelTargetAudience",
"ProjectResponse",
"ProjectResponseModelAccessLevel",
+ "ProjectResponseModelFiction",
"ProjectResponseModelTargetAudience",
"ProjectSnapshotResponse",
"ProjectSnapshotUploadResponseModel",
"ProjectSnapshotUploadResponseModelStatus",
"ProjectSnapshotsResponse",
"ProjectState",
+ "PromptAgent",
+ "PromptAgentOverride",
+ "PromptAgentToolsItem",
+ "PromptAgentToolsItem_Client",
+ "PromptAgentToolsItem_Webhook",
+ "PromptEvaluationCriteria",
"PronunciationDictionaryAliasRuleRequestModel",
"PronunciationDictionaryPhonemeRuleRequestModel",
"PronunciationDictionaryVersionLocator",
"PronunciationDictionaryVersionResponseModel",
+ "PydanticPronunciationDictionaryVersionLocator",
+ "QueryParamsJsonSchema",
+ "ReaderResourceResponseModel",
+ "ReaderResourceResponseModelResourceType",
"RecordingResponse",
"RemovePronunciationDictionaryRulesResponseModel",
"ReviewStatus",
"SpeechHistoryItemResponse",
"SpeechHistoryItemResponseModelSource",
"SpeechHistoryItemResponseModelVoiceCategory",
- "SsoProviderResponseModel",
- "SsoProviderResponseModelProviderType",
"Subscription",
"SubscriptionResponse",
"SubscriptionResponseModelBillingPeriod",
@@ -187,6 +363,14 @@
"SubscriptionResponseModelCurrency",
"SubscriptionStatus",
"TextToSpeechAsStreamRequest",
+ "TtsConversationalConfig",
+ "TtsConversationalConfigOverride",
+ "TtsConversationalModel",
+ "TtsOptimizeStreamingLatency",
+ "TtsOutputFormat",
+ "TurnConfig",
+ "TurnMode",
+ "UrlAvatar",
"UsageCharactersResponseModel",
"User",
"ValidationError",
@@ -206,4 +390,8 @@
"VoiceSharingResponseModelCategory",
"VoiceSharingState",
"VoiceVerificationResponse",
+ "WebhookToolApiSchemaConfig",
+ "WebhookToolApiSchemaConfigMethod",
+ "WebhookToolApiSchemaConfigRequestHeadersValue",
+ "WebhookToolConfig",
]
diff --git a/src/elevenlabs/types/sso_provider_response_model.py b/src/elevenlabs/types/add_agent_secret_response_model.py
similarity index 66%
rename from src/elevenlabs/types/sso_provider_response_model.py
rename to src/elevenlabs/types/add_agent_secret_response_model.py
index ee15827..88687d8 100644
--- a/src/elevenlabs/types/sso_provider_response_model.py
+++ b/src/elevenlabs/types/add_agent_secret_response_model.py
@@ -1,16 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
-from .sso_provider_response_model_provider_type import SsoProviderResponseModelProviderType
-import typing
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
import pydantic
-class SsoProviderResponseModel(UncheckedBaseModel):
- provider_type: SsoProviderResponseModelProviderType
- provider_id: str
- domains: typing.List[str]
+class AddAgentSecretResponseModel(UncheckedBaseModel):
+ id: str
+ name: str
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/add_knowledge_base_response_model.py b/src/elevenlabs/types/add_knowledge_base_response_model.py
new file mode 100644
index 0000000..e9105cb
--- /dev/null
+++ b/src/elevenlabs/types/add_knowledge_base_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AddKnowledgeBaseResponseModel(UncheckedBaseModel):
+ id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_ban.py b/src/elevenlabs/types/agent_ban.py
new file mode 100644
index 0000000..ac7027b
--- /dev/null
+++ b/src/elevenlabs/types/agent_ban.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .ban_reason_type import BanReasonType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentBan(UncheckedBaseModel):
+ at_unix: int
+ reason: typing.Optional[str] = None
+ reason_type: BanReasonType
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_config.py b/src/elevenlabs/types/agent_config.py
new file mode 100644
index 0000000..e7ff782
--- /dev/null
+++ b/src/elevenlabs/types/agent_config.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .prompt_agent import PromptAgent
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentConfig(UncheckedBaseModel):
+ prompt: typing.Optional[PromptAgent] = None
+ first_message: typing.Optional[str] = None
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, AgentConfig=AgentConfig)
+update_forward_refs(ObjectJsonSchemaProperty, AgentConfig=AgentConfig)
diff --git a/src/elevenlabs/types/agent_config_override.py b/src/elevenlabs/types/agent_config_override.py
new file mode 100644
index 0000000..a6f959f
--- /dev/null
+++ b/src/elevenlabs/types/agent_config_override.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .prompt_agent_override import PromptAgentOverride
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentConfigOverride(UncheckedBaseModel):
+ prompt: typing.Optional[PromptAgentOverride] = None
+ first_message: typing.Optional[str] = None
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_metadata_response_model.py b/src/elevenlabs/types/agent_metadata_response_model.py
new file mode 100644
index 0000000..3609829
--- /dev/null
+++ b/src/elevenlabs/types/agent_metadata_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AgentMetadataResponseModel(UncheckedBaseModel):
+ created_at_unix_secs: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_platform_settings.py b/src/elevenlabs/types/agent_platform_settings.py
new file mode 100644
index 0000000..595bf41
--- /dev/null
+++ b/src/elevenlabs/types/agent_platform_settings.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .auth_settings import AuthSettings
+from .evaluation_settings import EvaluationSettings
+from .embed_config import EmbedConfig
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from .agent_ban import AgentBan
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentPlatformSettings(UncheckedBaseModel):
+ auth: typing.Optional[AuthSettings] = None
+ evaluation: typing.Optional[EvaluationSettings] = None
+ widget: typing.Optional[EmbedConfig] = None
+ data_collection: typing.Optional[typing.Dict[str, LiteralJsonSchemaProperty]] = None
+ ban: typing.Optional[AgentBan] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_summary_response_model.py b/src/elevenlabs/types/agent_summary_response_model.py
new file mode 100644
index 0000000..91ec68b
--- /dev/null
+++ b/src/elevenlabs/types/agent_summary_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AgentSummaryResponseModel(UncheckedBaseModel):
+ agent_id: str
+ name: str
+ created_at_unix_secs: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/allowlist_item.py b/src/elevenlabs/types/allowlist_item.py
new file mode 100644
index 0000000..3e10d4b
--- /dev/null
+++ b/src/elevenlabs/types/allowlist_item.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AllowlistItem(UncheckedBaseModel):
+ hostname: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/array_json_schema_property.py b/src/elevenlabs/types/array_json_schema_property.py
new file mode 100644
index 0000000..bc69467
--- /dev/null
+++ b/src/elevenlabs/types/array_json_schema_property.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class ArrayJsonSchemaProperty(UncheckedBaseModel):
+ type: typing.Optional[typing.Literal["array"]] = None
+ items: "ArrayJsonSchemaPropertyItems"
+ description: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .object_json_schema_property import ObjectJsonSchemaProperty # noqa: E402
+from .array_json_schema_property_items import ArrayJsonSchemaPropertyItems # noqa: E402
+
+update_forward_refs(ObjectJsonSchemaProperty, ArrayJsonSchemaProperty=ArrayJsonSchemaProperty)
+update_forward_refs(ArrayJsonSchemaProperty)
diff --git a/src/elevenlabs/types/array_json_schema_property_items.py b/src/elevenlabs/types/array_json_schema_property_items.py
new file mode 100644
index 0000000..ed27a10
--- /dev/null
+++ b/src/elevenlabs/types/array_json_schema_property_items.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+import typing
+
+if typing.TYPE_CHECKING:
+ from .object_json_schema_property import ObjectJsonSchemaProperty
+ from .array_json_schema_property import ArrayJsonSchemaProperty
+ArrayJsonSchemaPropertyItems = typing.Union[
+ LiteralJsonSchemaProperty, "ObjectJsonSchemaProperty", "ArrayJsonSchemaProperty"
+]
diff --git a/src/elevenlabs/types/asr_conversational_config.py b/src/elevenlabs/types/asr_conversational_config.py
new file mode 100644
index 0000000..125c833
--- /dev/null
+++ b/src/elevenlabs/types/asr_conversational_config.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .asr_quality import AsrQuality
+from .asr_provider import AsrProvider
+from .asr_input_format import AsrInputFormat
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AsrConversationalConfig(UncheckedBaseModel):
+ quality: typing.Optional[AsrQuality] = None
+ provider: typing.Optional[AsrProvider] = None
+ user_input_audio_format: typing.Optional[AsrInputFormat] = None
+ keywords: typing.Optional[typing.List[str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/asr_input_format.py b/src/elevenlabs/types/asr_input_format.py
new file mode 100644
index 0000000..5d0623d
--- /dev/null
+++ b/src/elevenlabs/types/asr_input_format.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsrInputFormat = typing.Union[
+ typing.Literal["pcm_16000", "pcm_22050", "pcm_24000", "pcm_44100", "ulaw_8000"], typing.Any
+]
diff --git a/src/elevenlabs/types/asr_provider.py b/src/elevenlabs/types/asr_provider.py
new file mode 100644
index 0000000..af99d4a
--- /dev/null
+++ b/src/elevenlabs/types/asr_provider.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsrProvider = typing.Literal["elevenlabs"]
diff --git a/src/elevenlabs/types/asr_quality.py b/src/elevenlabs/types/asr_quality.py
new file mode 100644
index 0000000..b0f3906
--- /dev/null
+++ b/src/elevenlabs/types/asr_quality.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsrQuality = typing.Literal["high"]
diff --git a/src/elevenlabs/types/auth_settings.py b/src/elevenlabs/types/auth_settings.py
new file mode 100644
index 0000000..f673dd8
--- /dev/null
+++ b/src/elevenlabs/types/auth_settings.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .allowlist_item import AllowlistItem
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AuthSettings(UncheckedBaseModel):
+ enable_auth: typing.Optional[bool] = None
+ allowlist: typing.Optional[typing.List[AllowlistItem]] = None
+ shareable_token: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/authorization_method.py b/src/elevenlabs/types/authorization_method.py
new file mode 100644
index 0000000..7605e0d
--- /dev/null
+++ b/src/elevenlabs/types/authorization_method.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AuthorizationMethod = typing.Union[
+ typing.Literal["public", "authorization_header", "signed_url", "shareable_link"], typing.Any
+]
diff --git a/src/elevenlabs/types/ban_reason_type.py b/src/elevenlabs/types/ban_reason_type.py
new file mode 100644
index 0000000..81accd2
--- /dev/null
+++ b/src/elevenlabs/types/ban_reason_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+BanReasonType = typing.Union[typing.Literal["safety", "manual"], typing.Any]
diff --git a/src/elevenlabs/types/breakdown_types.py b/src/elevenlabs/types/breakdown_types.py
index addda63..cc29940 100644
--- a/src/elevenlabs/types/breakdown_types.py
+++ b/src/elevenlabs/types/breakdown_types.py
@@ -3,5 +3,5 @@
import typing
BreakdownTypes = typing.Union[
- typing.Literal["none", "voice", "user", "api_keys", "all_api_keys", "product_type", "model"], typing.Any
+ typing.Literal["none", "voice", "user", "api_keys", "all_api_keys", "product_type", "model", "resource"], typing.Any
]
diff --git a/src/elevenlabs/types/chapter_response.py b/src/elevenlabs/types/chapter_response.py
index 7193b11..192804d 100644
--- a/src/elevenlabs/types/chapter_response.py
+++ b/src/elevenlabs/types/chapter_response.py
@@ -16,6 +16,7 @@ class ChapterResponse(UncheckedBaseModel):
can_be_downloaded: bool
state: ChapterState
statistics: typing.Optional[ChapterStatisticsResponse] = None
+ last_conversion_error: typing.Optional[str] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/client_event.py b/src/elevenlabs/types/client_event.py
new file mode 100644
index 0000000..5152c63
--- /dev/null
+++ b/src/elevenlabs/types/client_event.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ClientEvent = typing.Union[
+ typing.Literal[
+ "conversation_initiation_metadata",
+ "asr_initiation_metadata",
+ "ping",
+ "audio",
+ "interruption",
+ "user_transcript",
+ "agent_response",
+ "agent_response_correction",
+ "client_tool_call",
+ "internal_vad_score",
+ "internal_turn_probability",
+ "internal_tentative_agent_response",
+ ],
+ typing.Any,
+]
diff --git a/src/elevenlabs/types/client_tool_config.py b/src/elevenlabs/types/client_tool_config.py
new file mode 100644
index 0000000..72762e0
--- /dev/null
+++ b/src/elevenlabs/types/client_tool_config.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class ClientToolConfig(UncheckedBaseModel):
+ """
+ A client tool is one that sends an event to the user's client to trigger something client side
+ """
+
+ name: str
+ description: str
+ parameters: typing.Optional[ObjectJsonSchemaProperty] = None
+ expects_response: typing.Optional[bool] = None
+ response_timeout_secs: typing.Optional[int] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, ClientToolConfig=ClientToolConfig)
+update_forward_refs(ObjectJsonSchemaProperty, ClientToolConfig=ClientToolConfig)
diff --git a/src/elevenlabs/types/conv_ai_new_secret_config.py b/src/elevenlabs/types/conv_ai_new_secret_config.py
new file mode 100644
index 0000000..4276a25
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_new_secret_config.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConvAiNewSecretConfig(UncheckedBaseModel):
+ name: str
+ value: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conv_ai_secret_locator.py b/src/elevenlabs/types/conv_ai_secret_locator.py
new file mode 100644
index 0000000..9aa49a8
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_secret_locator.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConvAiSecretLocator(UncheckedBaseModel):
+ """
+ Used to reference a secret from the agent's secret store.
+ """
+
+ secret_id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conv_ai_stored_secret_config.py b/src/elevenlabs/types/conv_ai_stored_secret_config.py
new file mode 100644
index 0000000..316c978
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_stored_secret_config.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConvAiStoredSecretConfig(UncheckedBaseModel):
+ secret_id: str
+ name: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_charging_common_model.py b/src/elevenlabs/types/conversation_charging_common_model.py
new file mode 100644
index 0000000..cfbf546
--- /dev/null
+++ b/src/elevenlabs/types/conversation_charging_common_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationChargingCommonModel(UncheckedBaseModel):
+ dev_discount: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_config.py b/src/elevenlabs/types/conversation_config.py
new file mode 100644
index 0000000..d0e80ee
--- /dev/null
+++ b/src/elevenlabs/types/conversation_config.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .client_event import ClientEvent
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationConfig(UncheckedBaseModel):
+ max_duration_seconds: typing.Optional[int] = None
+ client_events: typing.Optional[typing.List[ClientEvent]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_config_client_override.py b/src/elevenlabs/types/conversation_config_client_override.py
new file mode 100644
index 0000000..d8a89f2
--- /dev/null
+++ b/src/elevenlabs/types/conversation_config_client_override.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .agent_config_override import AgentConfigOverride
+from .tts_conversational_config_override import TtsConversationalConfigOverride
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationConfigClientOverride(UncheckedBaseModel):
+ agent: typing.Optional[AgentConfigOverride] = None
+ tts: typing.Optional[TtsConversationalConfigOverride] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_analysis_common_model.py b/src/elevenlabs/types/conversation_history_analysis_common_model.py
new file mode 100644
index 0000000..cfbbe14
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_analysis_common_model.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_history_evaluation_criteria_result_common_model import (
+ ConversationHistoryEvaluationCriteriaResultCommonModel,
+)
+from .data_collection_result_common_model import DataCollectionResultCommonModel
+from .evaluation_success_result import EvaluationSuccessResult
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationHistoryAnalysisCommonModel(UncheckedBaseModel):
+ evaluation_criteria_results: typing.Optional[
+ typing.Dict[str, ConversationHistoryEvaluationCriteriaResultCommonModel]
+ ] = None
+ data_collection_results: typing.Optional[typing.Dict[str, DataCollectionResultCommonModel]] = None
+ call_successful: EvaluationSuccessResult
+ transcript_summary: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py b/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py
new file mode 100644
index 0000000..af659a8
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_evaluation_criteria_result_common_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .evaluation_success_result import EvaluationSuccessResult
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConversationHistoryEvaluationCriteriaResultCommonModel(UncheckedBaseModel):
+ criteria_id: str
+ result: EvaluationSuccessResult
+ rationale: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_metadata_common_model.py b/src/elevenlabs/types/conversation_history_metadata_common_model.py
new file mode 100644
index 0000000..de108d4
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_metadata_common_model.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .authorization_method import AuthorizationMethod
+from .conversation_charging_common_model import ConversationChargingCommonModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationHistoryMetadataCommonModel(UncheckedBaseModel):
+ start_time_unix_secs: int
+ call_duration_secs: int
+ cost: typing.Optional[int] = None
+ authorization_method: typing.Optional[AuthorizationMethod] = None
+ charging: typing.Optional[ConversationChargingCommonModel] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_transcript_common_model.py b/src/elevenlabs/types/conversation_history_transcript_common_model.py
new file mode 100644
index 0000000..3285d88
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_transcript_common_model.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .conversation_history_transcript_common_model_role import ConversationHistoryTranscriptCommonModelRole
+import typing
+from .conversation_history_transcript_tool_call_common_model import ConversationHistoryTranscriptToolCallCommonModel
+from .conversation_history_transcript_tool_result_common_model import ConversationHistoryTranscriptToolResultCommonModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationHistoryTranscriptCommonModel(UncheckedBaseModel):
+ role: ConversationHistoryTranscriptCommonModelRole
+ message: typing.Optional[str] = None
+ tool_calls: typing.Optional[typing.List[ConversationHistoryTranscriptToolCallCommonModel]] = None
+ tool_results: typing.Optional[typing.List[ConversationHistoryTranscriptToolResultCommonModel]] = None
+ time_in_call_secs: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_transcript_common_model_role.py b/src/elevenlabs/types/conversation_history_transcript_common_model_role.py
new file mode 100644
index 0000000..1964c6f
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_transcript_common_model_role.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ConversationHistoryTranscriptCommonModelRole = typing.Union[typing.Literal["user", "agent"], typing.Any]
diff --git a/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py b/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py
new file mode 100644
index 0000000..1afe050
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_transcript_tool_call_common_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConversationHistoryTranscriptToolCallCommonModel(UncheckedBaseModel):
+ request_id: str
+ tool_name: str
+ params_as_json: str
+ tool_has_been_called: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py b/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py
new file mode 100644
index 0000000..15ef7ff
--- /dev/null
+++ b/src/elevenlabs/types/conversation_history_transcript_tool_result_common_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConversationHistoryTranscriptToolResultCommonModel(UncheckedBaseModel):
+ request_id: str
+ tool_name: str
+ result_value: str
+ is_error: bool
+ tool_has_been_called: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_initiation_client_data.py b/src/elevenlabs/types/conversation_initiation_client_data.py
new file mode 100644
index 0000000..f98379e
--- /dev/null
+++ b/src/elevenlabs/types/conversation_initiation_client_data.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_config_client_override import ConversationConfigClientOverride
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationInitiationClientData(UncheckedBaseModel):
+ conversation_config_override: typing.Optional[ConversationConfigClientOverride] = None
+ custom_llm_extra_body: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_signed_url_response_model.py b/src/elevenlabs/types/conversation_signed_url_response_model.py
new file mode 100644
index 0000000..b38e5f8
--- /dev/null
+++ b/src/elevenlabs/types/conversation_signed_url_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ConversationSignedUrlResponseModel(UncheckedBaseModel):
+ signed_url: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_summary_response_model.py b/src/elevenlabs/types/conversation_summary_response_model.py
new file mode 100644
index 0000000..1910629
--- /dev/null
+++ b/src/elevenlabs/types/conversation_summary_response_model.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus
+from .evaluation_success_result import EvaluationSuccessResult
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationSummaryResponseModel(UncheckedBaseModel):
+ agent_id: str
+ agent_name: typing.Optional[str] = None
+ conversation_id: str
+ start_time_unix_secs: int
+ call_duration_secs: int
+ message_count: int
+ status: ConversationSummaryResponseModelStatus
+ call_successful: EvaluationSuccessResult
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_summary_response_model_status.py b/src/elevenlabs/types/conversation_summary_response_model_status.py
new file mode 100644
index 0000000..4baceca
--- /dev/null
+++ b/src/elevenlabs/types/conversation_summary_response_model_status.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ConversationSummaryResponseModelStatus = typing.Union[typing.Literal["processing", "done"], typing.Any]
diff --git a/src/elevenlabs/types/conversation_token_db_model.py b/src/elevenlabs/types/conversation_token_db_model.py
new file mode 100644
index 0000000..9107ab9
--- /dev/null
+++ b/src/elevenlabs/types/conversation_token_db_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_token_purpose import ConversationTokenPurpose
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationTokenDbModel(UncheckedBaseModel):
+ agent_id: str
+ conversation_token: str
+ expiration_time_unix_secs: typing.Optional[int] = None
+ purpose: typing.Optional[ConversationTokenPurpose] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_token_purpose.py b/src/elevenlabs/types/conversation_token_purpose.py
new file mode 100644
index 0000000..bfaccef
--- /dev/null
+++ b/src/elevenlabs/types/conversation_token_purpose.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ConversationTokenPurpose = typing.Union[typing.Literal["signed_url", "shareable_link"], typing.Any]
diff --git a/src/elevenlabs/types/conversational_config.py b/src/elevenlabs/types/conversational_config.py
new file mode 100644
index 0000000..0fa91dc
--- /dev/null
+++ b/src/elevenlabs/types/conversational_config.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .agent_config import AgentConfig
+from .asr_conversational_config import AsrConversationalConfig
+from .turn_config import TurnConfig
+from .tts_conversational_config import TtsConversationalConfig
+from .conversation_config import ConversationConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class ConversationalConfig(UncheckedBaseModel):
+ agent: typing.Optional[AgentConfig] = None
+ asr: typing.Optional[AsrConversationalConfig] = None
+ turn: typing.Optional[TurnConfig] = None
+ tts: typing.Optional[TtsConversationalConfig] = None
+ conversation: typing.Optional[ConversationConfig] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, ConversationalConfig=ConversationalConfig)
+update_forward_refs(ObjectJsonSchemaProperty, ConversationalConfig=ConversationalConfig)
diff --git a/src/elevenlabs/types/create_agent_response_model.py b/src/elevenlabs/types/create_agent_response_model.py
new file mode 100644
index 0000000..48aede9
--- /dev/null
+++ b/src/elevenlabs/types/create_agent_response_model.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class CreateAgentResponseModel(UncheckedBaseModel):
+ agent_id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/custom_llm.py b/src/elevenlabs/types/custom_llm.py
new file mode 100644
index 0000000..5c4a570
--- /dev/null
+++ b/src/elevenlabs/types/custom_llm.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conv_ai_secret_locator import ConvAiSecretLocator
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class CustomLlm(UncheckedBaseModel):
+ url: str
+ model_id: typing.Optional[str] = None
+ api_key: typing.Optional[ConvAiSecretLocator] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/data_collection_result_common_model.py b/src/elevenlabs/types/data_collection_result_common_model.py
new file mode 100644
index 0000000..1c4856b
--- /dev/null
+++ b/src/elevenlabs/types/data_collection_result_common_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class DataCollectionResultCommonModel(UncheckedBaseModel):
+ data_collection_id: str
+ value: typing.Optional[typing.Optional[typing.Any]] = None
+ json_schema: typing.Optional[LiteralJsonSchemaProperty] = None
+ rationale: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/embed_config.py b/src/elevenlabs/types/embed_config.py
new file mode 100644
index 0000000..d67f2d2
--- /dev/null
+++ b/src/elevenlabs/types/embed_config.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .embed_variant import EmbedVariant
+from .embed_config_avatar import EmbedConfigAvatar
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class EmbedConfig(UncheckedBaseModel):
+ variant: typing.Optional[EmbedVariant] = None
+ avatar: typing.Optional[EmbedConfigAvatar] = None
+ custom_avatar_path: typing.Optional[str] = None
+ bg_color: typing.Optional[str] = None
+ text_color: typing.Optional[str] = None
+ btn_color: typing.Optional[str] = None
+ btn_text_color: typing.Optional[str] = None
+ border_color: typing.Optional[str] = None
+ focus_color: typing.Optional[str] = None
+ border_radius: typing.Optional[int] = None
+ btn_radius: typing.Optional[int] = None
+ action_text: typing.Optional[str] = None
+ start_call_text: typing.Optional[str] = None
+ end_call_text: typing.Optional[str] = None
+ expand_text: typing.Optional[str] = None
+ listening_text: typing.Optional[str] = None
+ speaking_text: typing.Optional[str] = None
+ shareable_page_text: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/embed_config_avatar.py b/src/elevenlabs/types/embed_config_avatar.py
new file mode 100644
index 0000000..13699ea
--- /dev/null
+++ b/src/elevenlabs/types/embed_config_avatar.py
@@ -0,0 +1,58 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ..core.unchecked_base_model import UnionMetadata
+
+
+class EmbedConfigAvatar_Orb(UncheckedBaseModel):
+ type: typing.Literal["orb"] = "orb"
+ color_1: typing.Optional[str] = None
+ color_2: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class EmbedConfigAvatar_Url(UncheckedBaseModel):
+ type: typing.Literal["url"] = "url"
+ custom_url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class EmbedConfigAvatar_Image(UncheckedBaseModel):
+ type: typing.Literal["image"] = "image"
+ url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+EmbedConfigAvatar = typing_extensions.Annotated[
+ typing.Union[EmbedConfigAvatar_Orb, EmbedConfigAvatar_Url, EmbedConfigAvatar_Image],
+ UnionMetadata(discriminant="type"),
+]
diff --git a/src/elevenlabs/types/embed_variant.py b/src/elevenlabs/types/embed_variant.py
new file mode 100644
index 0000000..3ad7293
--- /dev/null
+++ b/src/elevenlabs/types/embed_variant.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EmbedVariant = typing.Union[typing.Literal["compact", "full", "expandable"], typing.Any]
diff --git a/src/elevenlabs/types/evaluation_settings.py b/src/elevenlabs/types/evaluation_settings.py
new file mode 100644
index 0000000..ed0dd53
--- /dev/null
+++ b/src/elevenlabs/types/evaluation_settings.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .prompt_evaluation_criteria import PromptEvaluationCriteria
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class EvaluationSettings(UncheckedBaseModel):
+ """
+ Settings to evaluate an agent's performance.
+ Agents are evaluated against a set of criteria, with success being defined as meeting some combination of those criteria.
+ """
+
+ criteria: typing.Optional[typing.List[PromptEvaluationCriteria]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/evaluation_success_result.py b/src/elevenlabs/types/evaluation_success_result.py
new file mode 100644
index 0000000..3d18d89
--- /dev/null
+++ b/src/elevenlabs/types/evaluation_success_result.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EvaluationSuccessResult = typing.Union[typing.Literal["success", "failure", "unknown"], typing.Any]
diff --git a/src/elevenlabs/types/get_agent_embed_response_model.py b/src/elevenlabs/types/get_agent_embed_response_model.py
new file mode 100644
index 0000000..760be2f
--- /dev/null
+++ b/src/elevenlabs/types/get_agent_embed_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .embed_config import EmbedConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class GetAgentEmbedResponseModel(UncheckedBaseModel):
+ agent_id: str
+ widget_config: EmbedConfig
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_agent_link_response_model.py b/src/elevenlabs/types/get_agent_link_response_model.py
new file mode 100644
index 0000000..9578917
--- /dev/null
+++ b/src/elevenlabs/types/get_agent_link_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_token_db_model import ConversationTokenDbModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetAgentLinkResponseModel(UncheckedBaseModel):
+ agent_id: str
+ token: typing.Optional[ConversationTokenDbModel] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_agent_response_model.py b/src/elevenlabs/types/get_agent_response_model.py
new file mode 100644
index 0000000..b23ea80
--- /dev/null
+++ b/src/elevenlabs/types/get_agent_response_model.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+from .conversational_config import ConversationalConfig
+from .agent_metadata_response_model import AgentMetadataResponseModel
+import typing
+from .agent_platform_settings import AgentPlatformSettings
+from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class GetAgentResponseModel(UncheckedBaseModel):
+ agent_id: str
+ name: str
+ conversation_config: ConversationalConfig
+ metadata: AgentMetadataResponseModel
+ platform_settings: typing.Optional[AgentPlatformSettings] = None
+ secrets: typing.List[ConvAiStoredSecretConfig]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, GetAgentResponseModel=GetAgentResponseModel)
+update_forward_refs(ObjectJsonSchemaProperty, GetAgentResponseModel=GetAgentResponseModel)
diff --git a/src/elevenlabs/types/get_agents_page_response_model.py b/src/elevenlabs/types/get_agents_page_response_model.py
new file mode 100644
index 0000000..5170a9e
--- /dev/null
+++ b/src/elevenlabs/types/get_agents_page_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .agent_summary_response_model import AgentSummaryResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetAgentsPageResponseModel(UncheckedBaseModel):
+ agents: typing.List[AgentSummaryResponseModel]
+ next_cursor: typing.Optional[str] = None
+ has_more: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_conversation_response_model.py b/src/elevenlabs/types/get_conversation_response_model.py
new file mode 100644
index 0000000..1dc49d5
--- /dev/null
+++ b/src/elevenlabs/types/get_conversation_response_model.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .get_conversation_response_model_status import GetConversationResponseModelStatus
+import typing
+from .conversation_history_transcript_common_model import ConversationHistoryTranscriptCommonModel
+from .conversation_history_metadata_common_model import ConversationHistoryMetadataCommonModel
+from .conversation_history_analysis_common_model import ConversationHistoryAnalysisCommonModel
+from .conversation_initiation_client_data import ConversationInitiationClientData
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetConversationResponseModel(UncheckedBaseModel):
+ agent_id: str
+ conversation_id: str
+ status: GetConversationResponseModelStatus
+ transcript: typing.List[ConversationHistoryTranscriptCommonModel]
+ metadata: ConversationHistoryMetadataCommonModel
+ analysis: typing.Optional[ConversationHistoryAnalysisCommonModel] = None
+ conversation_initiation_client_data: typing.Optional[ConversationInitiationClientData] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_conversation_response_model_status.py b/src/elevenlabs/types/get_conversation_response_model_status.py
new file mode 100644
index 0000000..e104d5c
--- /dev/null
+++ b/src/elevenlabs/types/get_conversation_response_model_status.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+GetConversationResponseModelStatus = typing.Union[typing.Literal["processing", "done"], typing.Any]
diff --git a/src/elevenlabs/types/get_conversations_page_response_model.py b/src/elevenlabs/types/get_conversations_page_response_model.py
new file mode 100644
index 0000000..4deefb5
--- /dev/null
+++ b/src/elevenlabs/types/get_conversations_page_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_summary_response_model import ConversationSummaryResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetConversationsPageResponseModel(UncheckedBaseModel):
+ conversations: typing.List[ConversationSummaryResponseModel]
+ next_cursor: typing.Optional[str] = None
+ has_more: bool
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_knowledge_base_reponse_model.py b/src/elevenlabs/types/get_knowledge_base_reponse_model.py
new file mode 100644
index 0000000..2390d76
--- /dev/null
+++ b/src/elevenlabs/types/get_knowledge_base_reponse_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .get_knowledge_base_reponse_model_type import GetKnowledgeBaseReponseModelType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class GetKnowledgeBaseReponseModel(UncheckedBaseModel):
+ id: str
+ type: GetKnowledgeBaseReponseModelType
+ extracted_inner_html: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py b/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py
new file mode 100644
index 0000000..d8904ba
--- /dev/null
+++ b/src/elevenlabs/types/get_knowledge_base_reponse_model_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+GetKnowledgeBaseReponseModelType = typing.Union[typing.Literal["file", "url"], typing.Any]
diff --git a/src/elevenlabs/types/image_avatar.py b/src/elevenlabs/types/image_avatar.py
new file mode 100644
index 0000000..5b5fed9
--- /dev/null
+++ b/src/elevenlabs/types/image_avatar.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ImageAvatar(UncheckedBaseModel):
+ url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/knowledge_base_locator.py b/src/elevenlabs/types/knowledge_base_locator.py
new file mode 100644
index 0000000..95aa389
--- /dev/null
+++ b/src/elevenlabs/types/knowledge_base_locator.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .knowledge_base_locator_type import KnowledgeBaseLocatorType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class KnowledgeBaseLocator(UncheckedBaseModel):
+ type: KnowledgeBaseLocatorType
+ name: str
+ id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/knowledge_base_locator_type.py b/src/elevenlabs/types/knowledge_base_locator_type.py
new file mode 100644
index 0000000..074d02b
--- /dev/null
+++ b/src/elevenlabs/types/knowledge_base_locator_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+KnowledgeBaseLocatorType = typing.Union[typing.Literal["file", "url"], typing.Any]
diff --git a/src/elevenlabs/types/literal_json_schema_property.py b/src/elevenlabs/types/literal_json_schema_property.py
new file mode 100644
index 0000000..76fa90f
--- /dev/null
+++ b/src/elevenlabs/types/literal_json_schema_property.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .literal_json_schema_property_type import LiteralJsonSchemaPropertyType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class LiteralJsonSchemaProperty(UncheckedBaseModel):
+ type: LiteralJsonSchemaPropertyType
+ description: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/literal_json_schema_property_type.py b/src/elevenlabs/types/literal_json_schema_property_type.py
new file mode 100644
index 0000000..f3ddb1f
--- /dev/null
+++ b/src/elevenlabs/types/literal_json_schema_property_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LiteralJsonSchemaPropertyType = typing.Union[typing.Literal["boolean", "string", "integer", "number"], typing.Any]
diff --git a/src/elevenlabs/types/llm.py b/src/elevenlabs/types/llm.py
new file mode 100644
index 0000000..313f9d0
--- /dev/null
+++ b/src/elevenlabs/types/llm.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Llm = typing.Union[
+ typing.Literal[
+ "gpt-4o-mini",
+ "gpt-4o",
+ "gpt-4",
+ "gpt-4-turbo",
+ "gpt-3.5-turbo",
+ "gemini-1.5-pro",
+ "gemini-1.5-flash",
+ "gemini-1.0-pro",
+ "claude-3-5-sonnet",
+ "claude-3-haiku",
+ "grok-beta",
+ "custom-llm",
+ ],
+ typing.Any,
+]
diff --git a/src/elevenlabs/types/object_json_schema_property.py b/src/elevenlabs/types/object_json_schema_property.py
new file mode 100644
index 0000000..4ec5fd8
--- /dev/null
+++ b/src/elevenlabs/types/object_json_schema_property.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class ObjectJsonSchemaProperty(UncheckedBaseModel):
+ type: typing.Optional[typing.Literal["object"]] = None
+ properties: typing.Optional[typing.Dict[str, "ObjectJsonSchemaPropertyPropertiesValue"]] = None
+ required: typing.Optional[typing.List[str]] = None
+ description: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue # noqa: E402
+
+update_forward_refs(ObjectJsonSchemaProperty)
diff --git a/src/elevenlabs/types/object_json_schema_property_properties_value.py b/src/elevenlabs/types/object_json_schema_property_properties_value.py
new file mode 100644
index 0000000..20b8951
--- /dev/null
+++ b/src/elevenlabs/types/object_json_schema_property_properties_value.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+import typing
+
+if typing.TYPE_CHECKING:
+ from .object_json_schema_property import ObjectJsonSchemaProperty
+ from .array_json_schema_property import ArrayJsonSchemaProperty
+ObjectJsonSchemaPropertyPropertiesValue = typing.Union[
+ LiteralJsonSchemaProperty, "ObjectJsonSchemaProperty", "ArrayJsonSchemaProperty"
+]
diff --git a/src/elevenlabs/types/orb_avatar.py b/src/elevenlabs/types/orb_avatar.py
new file mode 100644
index 0000000..ff39f85
--- /dev/null
+++ b/src/elevenlabs/types/orb_avatar.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class OrbAvatar(UncheckedBaseModel):
+ color_1: typing.Optional[str] = None
+ color_2: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/post_agent_avatar_response_model.py b/src/elevenlabs/types/post_agent_avatar_response_model.py
new file mode 100644
index 0000000..3b56a77
--- /dev/null
+++ b/src/elevenlabs/types/post_agent_avatar_response_model.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PostAgentAvatarResponseModel(UncheckedBaseModel):
+ agent_id: str
+ avatar_url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/project_extended_response_model.py b/src/elevenlabs/types/project_extended_response_model.py
index 87b9dca..9f9d470 100644
--- a/src/elevenlabs/types/project_extended_response_model.py
+++ b/src/elevenlabs/types/project_extended_response_model.py
@@ -5,9 +5,11 @@
from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience
from .project_state import ProjectState
from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel
+from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction
from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset
from .chapter_response import ChapterResponse
from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel
+from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
@@ -35,9 +37,13 @@ class ProjectExtendedResponseModel(UncheckedBaseModel):
volume_normalization: bool
state: ProjectState
access_level: ProjectExtendedResponseModelAccessLevel
+ fiction: typing.Optional[ProjectExtendedResponseModelFiction] = None
+ quality_check_on: bool
+ quality_check_on_when_bulk_convert: bool
quality_preset: ProjectExtendedResponseModelQualityPreset
chapters: typing.List[ChapterResponse]
pronunciation_dictionary_versions: typing.List[PronunciationDictionaryVersionResponseModel]
+ apply_text_normalization: ProjectExtendedResponseModelApplyTextNormalization
experimental: typing.Dict[str, typing.Optional[typing.Any]]
if IS_PYDANTIC_V2:
diff --git a/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py b/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py
new file mode 100644
index 0000000..490a9ab
--- /dev/null
+++ b/src/elevenlabs/types/project_extended_response_model_apply_text_normalization.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectExtendedResponseModelApplyTextNormalization = typing.Union[
+ typing.Literal["auto", "on", "off", "apply_english"], typing.Any
+]
diff --git a/src/elevenlabs/types/project_extended_response_model_fiction.py b/src/elevenlabs/types/project_extended_response_model_fiction.py
new file mode 100644
index 0000000..0c54e14
--- /dev/null
+++ b/src/elevenlabs/types/project_extended_response_model_fiction.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectExtendedResponseModelFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any]
diff --git a/src/elevenlabs/types/project_response.py b/src/elevenlabs/types/project_response.py
index 5da3d83..dbc496b 100644
--- a/src/elevenlabs/types/project_response.py
+++ b/src/elevenlabs/types/project_response.py
@@ -5,6 +5,7 @@
from .project_response_model_target_audience import ProjectResponseModelTargetAudience
from .project_state import ProjectState
from .project_response_model_access_level import ProjectResponseModelAccessLevel
+from .project_response_model_fiction import ProjectResponseModelFiction
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
@@ -32,6 +33,9 @@ class ProjectResponse(UncheckedBaseModel):
volume_normalization: bool
state: ProjectState
access_level: ProjectResponseModelAccessLevel
+ fiction: typing.Optional[ProjectResponseModelFiction] = None
+ quality_check_on: bool
+ quality_check_on_when_bulk_convert: bool
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/project_response_model_fiction.py b/src/elevenlabs/types/project_response_model_fiction.py
new file mode 100644
index 0000000..04a90ca
--- /dev/null
+++ b/src/elevenlabs/types/project_response_model_fiction.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ProjectResponseModelFiction = typing.Union[typing.Literal["fiction", "non-fiction"], typing.Any]
diff --git a/src/elevenlabs/types/prompt_agent.py b/src/elevenlabs/types/prompt_agent.py
new file mode 100644
index 0000000..b3b6071
--- /dev/null
+++ b/src/elevenlabs/types/prompt_agent.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .llm import Llm
+from .prompt_agent_tools_item import PromptAgentToolsItem
+from .knowledge_base_locator import KnowledgeBaseLocator
+from .custom_llm import CustomLlm
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class PromptAgent(UncheckedBaseModel):
+ prompt: typing.Optional[str] = None
+ llm: typing.Optional[Llm] = None
+ temperature: typing.Optional[float] = None
+ max_tokens: typing.Optional[int] = None
+ tools: typing.Optional[typing.List[PromptAgentToolsItem]] = None
+ knowledge_base: typing.Optional[typing.List[KnowledgeBaseLocator]] = None
+ custom_llm: typing.Optional[CustomLlm] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, PromptAgent=PromptAgent)
+update_forward_refs(ObjectJsonSchemaProperty, PromptAgent=PromptAgent)
diff --git a/src/elevenlabs/types/prompt_agent_override.py b/src/elevenlabs/types/prompt_agent_override.py
new file mode 100644
index 0000000..2ca0395
--- /dev/null
+++ b/src/elevenlabs/types/prompt_agent_override.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PromptAgentOverride(UncheckedBaseModel):
+ prompt: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/prompt_agent_tools_item.py b/src/elevenlabs/types/prompt_agent_tools_item.py
new file mode 100644
index 0000000..410df2c
--- /dev/null
+++ b/src/elevenlabs/types/prompt_agent_tools_item.py
@@ -0,0 +1,56 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ..core.unchecked_base_model import UnionMetadata
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class PromptAgentToolsItem_Webhook(UncheckedBaseModel):
+ type: typing.Literal["webhook"] = "webhook"
+ name: str
+ description: str
+ api_schema: WebhookToolApiSchemaConfig
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class PromptAgentToolsItem_Client(UncheckedBaseModel):
+ type: typing.Literal["client"] = "client"
+ name: str
+ description: str
+ parameters: typing.Optional[ObjectJsonSchemaProperty] = None
+ expects_response: typing.Optional[bool] = None
+ response_timeout_secs: typing.Optional[int] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+PromptAgentToolsItem = typing_extensions.Annotated[
+ typing.Union[PromptAgentToolsItem_Webhook, PromptAgentToolsItem_Client], UnionMetadata(discriminant="type")
+]
+update_forward_refs(ArrayJsonSchemaProperty, PromptAgentToolsItem_Webhook=PromptAgentToolsItem_Webhook)
+update_forward_refs(ObjectJsonSchemaProperty, PromptAgentToolsItem_Webhook=PromptAgentToolsItem_Webhook)
+update_forward_refs(ArrayJsonSchemaProperty, PromptAgentToolsItem_Client=PromptAgentToolsItem_Client)
+update_forward_refs(ObjectJsonSchemaProperty, PromptAgentToolsItem_Client=PromptAgentToolsItem_Client)
diff --git a/src/elevenlabs/types/prompt_evaluation_criteria.py b/src/elevenlabs/types/prompt_evaluation_criteria.py
new file mode 100644
index 0000000..23a5689
--- /dev/null
+++ b/src/elevenlabs/types/prompt_evaluation_criteria.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PromptEvaluationCriteria(UncheckedBaseModel):
+ """
+ An evaluation using the transcript and a prompt for a yes/no achieved answer
+ """
+
+ id: str
+ name: typing.Optional[str] = None
+ type: typing.Optional[typing.Literal["prompt"]] = None
+ conversation_goal_prompt: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py b/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py
new file mode 100644
index 0000000..e967a2f
--- /dev/null
+++ b/src/elevenlabs/types/pydantic_pronunciation_dictionary_version_locator.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class PydanticPronunciationDictionaryVersionLocator(UncheckedBaseModel):
+ """
+ A locator for other documents to be able to reference a specific dictionary and it's version.
+ This is a pydantic version of PronunciationDictionaryVersionLocatorDBModel.
+ Required to ensure compat with the rest of the agent data models.
+ """
+
+ pronunciation_dictionary_id: str
+ version_id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/query_params_json_schema.py b/src/elevenlabs/types/query_params_json_schema.py
new file mode 100644
index 0000000..0de3881
--- /dev/null
+++ b/src/elevenlabs/types/query_params_json_schema.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class QueryParamsJsonSchema(UncheckedBaseModel):
+ properties: typing.Dict[str, LiteralJsonSchemaProperty]
+ required: typing.Optional[typing.List[str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/reader_resource_response_model.py b/src/elevenlabs/types/reader_resource_response_model.py
new file mode 100644
index 0000000..e98b709
--- /dev/null
+++ b/src/elevenlabs/types/reader_resource_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ReaderResourceResponseModel(UncheckedBaseModel):
+ resource_type: ReaderResourceResponseModelResourceType
+ resource_id: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/reader_resource_response_model_resource_type.py b/src/elevenlabs/types/reader_resource_response_model_resource_type.py
new file mode 100644
index 0000000..937d917
--- /dev/null
+++ b/src/elevenlabs/types/reader_resource_response_model_resource_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ReaderResourceResponseModelResourceType = typing.Union[typing.Literal["read", "collection"], typing.Any]
diff --git a/src/elevenlabs/types/sso_provider_response_model_provider_type.py b/src/elevenlabs/types/sso_provider_response_model_provider_type.py
deleted file mode 100644
index 52c8f95..0000000
--- a/src/elevenlabs/types/sso_provider_response_model_provider_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SsoProviderResponseModelProviderType = typing.Union[typing.Literal["saml", "oidc"], typing.Any]
diff --git a/src/elevenlabs/types/tts_conversational_config.py b/src/elevenlabs/types/tts_conversational_config.py
new file mode 100644
index 0000000..3c219fb
--- /dev/null
+++ b/src/elevenlabs/types/tts_conversational_config.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .tts_conversational_model import TtsConversationalModel
+from .tts_output_format import TtsOutputFormat
+from .tts_optimize_streaming_latency import TtsOptimizeStreamingLatency
+from .pydantic_pronunciation_dictionary_version_locator import PydanticPronunciationDictionaryVersionLocator
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class TtsConversationalConfig(UncheckedBaseModel):
+ model_id: typing.Optional[TtsConversationalModel] = None
+ voice_id: typing.Optional[str] = None
+ agent_output_audio_format: typing.Optional[TtsOutputFormat] = None
+ optimize_streaming_latency: typing.Optional[TtsOptimizeStreamingLatency] = None
+ stability: typing.Optional[float] = None
+ similarity_boost: typing.Optional[float] = None
+ pronunciation_dictionary_locators: typing.Optional[typing.List[PydanticPronunciationDictionaryVersionLocator]] = (
+ None
+ )
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/tts_conversational_config_override.py b/src/elevenlabs/types/tts_conversational_config_override.py
new file mode 100644
index 0000000..db600b8
--- /dev/null
+++ b/src/elevenlabs/types/tts_conversational_config_override.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class TtsConversationalConfigOverride(UncheckedBaseModel):
+ voice_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/tts_conversational_model.py b/src/elevenlabs/types/tts_conversational_model.py
new file mode 100644
index 0000000..3c9c0bc
--- /dev/null
+++ b/src/elevenlabs/types/tts_conversational_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TtsConversationalModel = typing.Union[typing.Literal["eleven_turbo_v2", "eleven_turbo_v2_5"], typing.Any]
diff --git a/src/elevenlabs/types/tts_optimize_streaming_latency.py b/src/elevenlabs/types/tts_optimize_streaming_latency.py
new file mode 100644
index 0000000..36429b8
--- /dev/null
+++ b/src/elevenlabs/types/tts_optimize_streaming_latency.py
@@ -0,0 +1,3 @@
+# This file was auto-generated by Fern from our API Definition.
+
+TtsOptimizeStreamingLatency = int
diff --git a/src/elevenlabs/types/tts_output_format.py b/src/elevenlabs/types/tts_output_format.py
new file mode 100644
index 0000000..aceaba2
--- /dev/null
+++ b/src/elevenlabs/types/tts_output_format.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TtsOutputFormat = typing.Union[
+ typing.Literal["pcm_16000", "pcm_22050", "pcm_24000", "pcm_44100", "ulaw_8000"], typing.Any
+]
diff --git a/src/elevenlabs/types/turn_config.py b/src/elevenlabs/types/turn_config.py
new file mode 100644
index 0000000..50347f6
--- /dev/null
+++ b/src/elevenlabs/types/turn_config.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .turn_mode import TurnMode
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class TurnConfig(UncheckedBaseModel):
+ turn_timeout: typing.Optional[float] = None
+ mode: typing.Optional[TurnMode] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/turn_mode.py b/src/elevenlabs/types/turn_mode.py
new file mode 100644
index 0000000..a82a3a3
--- /dev/null
+++ b/src/elevenlabs/types/turn_mode.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TurnMode = typing.Union[typing.Literal["silence", "turn"], typing.Any]
diff --git a/src/elevenlabs/types/url_avatar.py b/src/elevenlabs/types/url_avatar.py
new file mode 100644
index 0000000..4406933
--- /dev/null
+++ b/src/elevenlabs/types/url_avatar.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class UrlAvatar(UncheckedBaseModel):
+ custom_url: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/user.py b/src/elevenlabs/types/user.py
index df7839d..e90906c 100644
--- a/src/elevenlabs/types/user.py
+++ b/src/elevenlabs/types/user.py
@@ -17,6 +17,8 @@ class User(UncheckedBaseModel):
first_name: typing.Optional[str] = None
is_api_key_hashed: typing.Optional[bool] = None
xi_api_key_preview: typing.Optional[str] = None
+ referral_link_code: typing.Optional[str] = None
+ partnerstack_partner_default_link: typing.Optional[str] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/voice.py b/src/elevenlabs/types/voice.py
index ea2a422..08ee993 100644
--- a/src/elevenlabs/types/voice.py
+++ b/src/elevenlabs/types/voice.py
@@ -32,6 +32,7 @@ class Voice(UncheckedBaseModel):
is_owner: typing.Optional[bool] = None
is_legacy: typing.Optional[bool] = None
is_mixed: typing.Optional[bool] = None
+ created_at_unix: typing.Optional[int] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/voice_preview_response_model.py b/src/elevenlabs/types/voice_preview_response_model.py
index c438418..a43c34b 100644
--- a/src/elevenlabs/types/voice_preview_response_model.py
+++ b/src/elevenlabs/types/voice_preview_response_model.py
@@ -1,15 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
import pydantic
class VoicePreviewResponseModel(UncheckedBaseModel):
audio_base_64: str
generated_voice_id: str
- media_type: typing.Optional[typing.Literal["audio/mpeg"]] = None
+ media_type: str
+ duration_secs: float
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/voice_previews_response_model.py b/src/elevenlabs/types/voice_previews_response_model.py
index 721505b..d9b8b56 100644
--- a/src/elevenlabs/types/voice_previews_response_model.py
+++ b/src/elevenlabs/types/voice_previews_response_model.py
@@ -9,6 +9,7 @@
class VoicePreviewsResponseModel(UncheckedBaseModel):
previews: typing.List[VoicePreviewResponseModel]
+ text: str
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/voice_sharing_response.py b/src/elevenlabs/types/voice_sharing_response.py
index 365a560..9fb5062 100644
--- a/src/elevenlabs/types/voice_sharing_response.py
+++ b/src/elevenlabs/types/voice_sharing_response.py
@@ -6,6 +6,7 @@
from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory
from .review_status import ReviewStatus
from .voice_sharing_moderation_check_response_model import VoiceSharingModerationCheckResponseModel
+from .reader_resource_response_model import ReaderResourceResponseModel
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
@@ -42,6 +43,7 @@ class VoiceSharingResponse(UncheckedBaseModel):
youtube_username: typing.Optional[str] = None
tiktok_username: typing.Optional[str] = None
moderation_check: typing.Optional[VoiceSharingModerationCheckResponseModel] = None
+ reader_restricted_on: typing.Optional[typing.List[ReaderResourceResponseModel]] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config.py b/src/elevenlabs/types/webhook_tool_api_schema_config.py
new file mode 100644
index 0000000..ae3ad49
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+import typing
+from .webhook_tool_api_schema_config_method import WebhookToolApiSchemaConfigMethod
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from .query_params_json_schema import QueryParamsJsonSchema
+from .webhook_tool_api_schema_config_request_headers_value import WebhookToolApiSchemaConfigRequestHeadersValue
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class WebhookToolApiSchemaConfig(UncheckedBaseModel):
+ """
+ Configuration for a webhook that will be called by an LLM tool.
+ """
+
+ url: str
+ method: typing.Optional[WebhookToolApiSchemaConfigMethod] = None
+ path_params_schema: typing.Optional[typing.Dict[str, LiteralJsonSchemaProperty]] = None
+ query_params_schema: typing.Optional[QueryParamsJsonSchema] = None
+ request_body_schema: typing.Optional[ObjectJsonSchemaProperty] = None
+ request_headers: typing.Optional[typing.Dict[str, WebhookToolApiSchemaConfigRequestHeadersValue]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, WebhookToolApiSchemaConfig=WebhookToolApiSchemaConfig)
+update_forward_refs(ObjectJsonSchemaProperty, WebhookToolApiSchemaConfig=WebhookToolApiSchemaConfig)
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_method.py b/src/elevenlabs/types/webhook_tool_api_schema_config_method.py
new file mode 100644
index 0000000..02708df
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config_method.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+WebhookToolApiSchemaConfigMethod = typing.Union[typing.Literal["GET", "POST", "PATCH", "DELETE"], typing.Any]
diff --git a/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py b/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py
new file mode 100644
index 0000000..e4aae56
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_api_schema_config_request_headers_value.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .conv_ai_secret_locator import ConvAiSecretLocator
+
+WebhookToolApiSchemaConfigRequestHeadersValue = typing.Union[str, ConvAiSecretLocator]
diff --git a/src/elevenlabs/types/webhook_tool_config.py b/src/elevenlabs/types/webhook_tool_config.py
new file mode 100644
index 0000000..9d76e0d
--- /dev/null
+++ b/src/elevenlabs/types/webhook_tool_config.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .array_json_schema_property import ArrayJsonSchemaProperty
+from .object_json_schema_property import ObjectJsonSchemaProperty
+from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class WebhookToolConfig(UncheckedBaseModel):
+ """
+ A webhook tool is a tool that calls an external webhook from our server
+ """
+
+ name: str
+ description: str
+ api_schema: WebhookToolApiSchemaConfig
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+update_forward_refs(ArrayJsonSchemaProperty, WebhookToolConfig=WebhookToolConfig)
+update_forward_refs(ObjectJsonSchemaProperty, WebhookToolConfig=WebhookToolConfig)
diff --git a/src/elevenlabs/voice_generation/client.py b/src/elevenlabs/voice_generation/client.py
index d79d966..7503866 100644
--- a/src/elevenlabs/voice_generation/client.py
+++ b/src/elevenlabs/voice_generation/client.py
@@ -85,7 +85,7 @@ def generate(
Category code corresponding to the gender of the generated voice. Possible values: female, male.
accent : str
- Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian.
+ Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian.
age : Age
Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old.
@@ -318,7 +318,7 @@ async def generate(
Category code corresponding to the gender of the generated voice. Possible values: female, male.
accent : str
- Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian.
+ Category code corresponding to the accent of the generated voice. Possible values: british, american, african, australian, indian.
age : Age
Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old.
diff --git a/src/elevenlabs/workspace/client.py b/src/elevenlabs/workspace/client.py
index b56b77b..ff9411c 100644
--- a/src/elevenlabs/workspace/client.py
+++ b/src/elevenlabs/workspace/client.py
@@ -3,7 +3,6 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from ..core.request_options import RequestOptions
-from ..types.sso_provider_response_model import SsoProviderResponseModel
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
@@ -22,65 +21,6 @@ class WorkspaceClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def get_sso_provider_admin(
- self, *, workspace_id: str, request_options: typing.Optional[RequestOptions] = None
- ) -> SsoProviderResponseModel:
- """
- Parameters
- ----------
- workspace_id : str
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- SsoProviderResponseModel
- Successful Response
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- client.workspace.get_sso_provider_admin(
- workspace_id="workspace_id",
- )
- """
- _response = self._client_wrapper.httpx_client.request(
- "admin/n8enylacgd/sso-provider",
- method="GET",
- params={
- "workspace_id": workspace_id,
- },
- request_options=request_options,
- )
- try:
- if 200 <= _response.status_code < 300:
- return typing.cast(
- SsoProviderResponseModel,
- construct_type(
- type_=SsoProviderResponseModel, # type: ignore
- object_=_response.json(),
- ),
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
-
def invite_user(
self, *, email: str, request_options: typing.Optional[RequestOptions] = None
) -> typing.Optional[typing.Any]:
@@ -288,73 +228,6 @@ class AsyncWorkspaceClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def get_sso_provider_admin(
- self, *, workspace_id: str, request_options: typing.Optional[RequestOptions] = None
- ) -> SsoProviderResponseModel:
- """
- Parameters
- ----------
- workspace_id : str
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Returns
- -------
- SsoProviderResponseModel
- Successful Response
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- await client.workspace.get_sso_provider_admin(
- workspace_id="workspace_id",
- )
-
-
- asyncio.run(main())
- """
- _response = await self._client_wrapper.httpx_client.request(
- "admin/n8enylacgd/sso-provider",
- method="GET",
- params={
- "workspace_id": workspace_id,
- },
- request_options=request_options,
- )
- try:
- if 200 <= _response.status_code < 300:
- return typing.cast(
- SsoProviderResponseModel,
- construct_type(
- type_=SsoProviderResponseModel, # type: ignore
- object_=_response.json(),
- ),
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
-
async def invite_user(
self, *, email: str, request_options: typing.Optional[RequestOptions] = None
) -> typing.Optional[typing.Any]: