From b70e224c1093fe449cb3706c79ba924cbff50760 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Fri, 7 Nov 2025 16:11:38 +0000 Subject: [PATCH] SDK regeneration --- .fern/metadata.json | 83 + poetry.lock | 340 +- pyproject.toml | 8 +- reference.md | 5623 +++++++++++++++++ requirements.txt | 2 +- src/cohere/__init__.py | 936 ++- src/cohere/base_client.py | 172 +- src/cohere/batches/__init__.py | 43 +- src/cohere/batches/client.py | 12 +- src/cohere/batches/types/__init__.py | 46 +- src/cohere/connectors/client.py | 12 +- src/cohere/core/__init__.py | 98 +- src/cohere/core/client_wrapper.py | 30 +- src/cohere/core/force_multipart.py | 6 +- src/cohere/core/http_response.py | 2 +- src/cohere/core/http_sse/__init__.py | 42 + src/cohere/core/http_sse/_api.py | 112 + src/cohere/core/http_sse/_decoders.py | 61 + src/cohere/core/http_sse/_exceptions.py | 7 + src/cohere/core/http_sse/_models.py | 17 + src/cohere/core/pydantic_utilities.py | 15 +- src/cohere/core/unchecked_base_model.py | 81 +- src/cohere/datasets/__init__.py | 34 +- src/cohere/datasets/client.py | 37 +- src/cohere/datasets/types/__init__.py | 40 +- src/cohere/embed_jobs/__init__.py | 29 +- src/cohere/embed_jobs/types/__init__.py | 29 +- src/cohere/errors/__init__.py | 64 +- src/cohere/finetuning/__init__.py | 89 +- src/cohere/finetuning/client.py | 22 +- src/cohere/finetuning/finetuning/__init__.py | 86 +- .../finetuning/finetuning/types/__init__.py | 82 +- src/cohere/models/client.py | 14 +- src/cohere/types/__init__.py | 717 ++- src/cohere/types/tool_call_v2.py | 4 +- src/cohere/types/tool_v2.py | 2 +- src/cohere/v2/__init__.py | 101 +- src/cohere/v2/raw_client.py | 36 +- src/cohere/v2/types/__init__.py | 101 +- 39 files changed, 8330 insertions(+), 905 deletions(-) create mode 100644 .fern/metadata.json create mode 100644 reference.md create mode 100644 src/cohere/core/http_sse/__init__.py create mode 100644 src/cohere/core/http_sse/_api.py create mode 100644 src/cohere/core/http_sse/_decoders.py create mode 100644 src/cohere/core/http_sse/_exceptions.py create mode 100644 src/cohere/core/http_sse/_models.py diff --git a/.fern/metadata.json b/.fern/metadata.json new file mode 100644 index 000000000..3b2eefd2d --- /dev/null +++ b/.fern/metadata.json @@ -0,0 +1,83 @@ +{ + "cliVersion": "0.109.1", + "generatorName": "fernapi/fern-python-sdk", + "generatorVersion": "4.34.0", + "generatorConfig": { + "pyproject_python_version": "^3.9", + "inline_request_params": false, + "extra_dependencies": { + "fastavro": "^1.9.4", + "requests": "^2.0.0", + "types-requests": "^2.0.0", + "tokenizers": ">=0.15,<1", + "httpx-sse": "^0.4.0" + }, + "improved_imports": true, + "pydantic_config": { + "frozen": false, + "union_naming": "v1", + "require_optional_fields": false, + "extra_fields": "allow", + "use_str_enums": true, + "skip_validation": true + }, + "timeout_in_seconds": 300, + "client": { + "class_name": "BaseCohere", + "filename": "base_client.py", + "exported_class_name": "Client", + "exported_filename": "client.py" + }, + "additional_init_exports": [ + { + "from": "client", + "imports": [ + "Client", + "AsyncClient" + ] + }, + { + "from": "bedrock_client", + "imports": [ + "BedrockClient", + "BedrockClientV2" + ] + }, + { + "from": "sagemaker_client", + "imports": [ + "SagemakerClient", + "SagemakerClientV2" + ] + }, + { + "from": "aws_client", + "imports": [ + "AwsClient" + ] + }, + { + "from": "client_v2", + "imports": [ + "AsyncClientV2", + "ClientV2" + ] + }, + { + "from": "aliases", + "imports": [ + "StreamedChatResponseV2", + "MessageStartStreamedChatResponseV2", + "MessageEndStreamedChatResponseV2", + "ContentStartStreamedChatResponseV2", + "ContentDeltaStreamedChatResponseV2", + "ContentEndStreamedChatResponseV2", + "ToolCallStartStreamedChatResponseV2", + "ToolCallDeltaStreamedChatResponseV2", + "ToolCallEndStreamedChatResponseV2", + "ChatResponse" + ] + } + ] + } +} \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 7cfb28e84..e293f401f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -164,6 +164,20 @@ files = [ {file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"}, ] +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + [[package]] name = "colorama" version = "0.4.6" @@ -266,13 +280,13 @@ files = [ [[package]] name = "fsspec" -version = "2025.9.0" +version = "2025.10.0" description = "File-system specification" optional = false python-versions = ">=3.9" files = [ - {file = "fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7"}, - {file = "fsspec-2025.9.0.tar.gz", hash = "sha256:19fd429483d25d28b65ec68f9f4adc16c17ea2c7c7bf54ec61360d478fb19c19"}, + {file = "fsspec-2025.10.0-py3-none-any.whl", hash = "sha256:7c7712353ae7d875407f97715f0e1ffcc21e33d5b24556cb1e090ae9409ec61d"}, + {file = "fsspec-2025.10.0.tar.gz", hash = "sha256:b6789427626f068f9a83ca4e8a3cc050850b6c0f71f99ddb4f542b8266a26a59"}, ] [package.extras] @@ -395,52 +409,49 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "httpx-sse" -version = "0.4.0" +version = "0.4.3" description = "Consume Server-Sent Event (SSE) messages with HTTPX." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, - {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, + {file = "httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc"}, + {file = "httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d"}, ] [[package]] name = "huggingface-hub" -version = "0.36.0" +version = "1.1.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false -python-versions = ">=3.8.0" +python-versions = ">=3.9.0" files = [ - {file = "huggingface_hub-0.36.0-py3-none-any.whl", hash = "sha256:7bcc9ad17d5b3f07b57c78e79d527102d08313caa278a641993acddcb894548d"}, - {file = "huggingface_hub-0.36.0.tar.gz", hash = "sha256:47b3f0e2539c39bf5cde015d63b72ec49baff67b6931c3d97f3f84532e2b8d25"}, + {file = "huggingface_hub-1.1.2-py3-none-any.whl", hash = "sha256:dfcfa84a043466fac60573c3e4af475490a7b0d7375b22e3817706d6659f61f7"}, + {file = "huggingface_hub-1.1.2.tar.gz", hash = "sha256:7bdafc432dc12fa1f15211bdfa689a02531d2a47a3cc0d74935f5726cdbcab8e"}, ] [package.dependencies] filelock = "*" fsspec = ">=2023.5.0" -hf-xet = {version = ">=1.1.3,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""} +hf-xet = {version = ">=1.2.0,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""} +httpx = ">=0.23.0,<1" packaging = ">=20.9" pyyaml = ">=5.1" -requests = "*" +shellingham = "*" tqdm = ">=4.42.1" +typer-slim = "*" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "mypy (>=1.14.1,<1.15.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] -cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "mypy (>=1.14.1,<1.15.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -hf-transfer = ["hf-transfer (>=0.1.4)"] -hf-xet = ["hf-xet (>=1.1.2,<2.0.0)"] -inference = ["aiohttp"] -mcp = ["aiohttp", "mcp (>=1.8.0)", "typer"] +hf-xet = ["hf-xet (>=1.1.3,<2.0.0)"] +mcp = ["mcp (>=1.8.0)"] oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"] -quality = ["libcst (>=1.4.0)", "mypy (==1.15.0)", "mypy (>=1.14.1,<1.15.0)", "ruff (>=0.9.0)", "ty"] -tensorflow = ["graphviz", "pydot", "tensorflow"] -tensorflow-testing = ["keras (<3.0)", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +quality = ["libcst (>=1.4.0)", "mypy (==1.15.0)", "ruff (>=0.9.0)", "ty"] +testing = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["safetensors[torch]", "torch"] -typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] +typing = ["types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] name = "idna" @@ -559,18 +570,18 @@ testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.12.3" +version = "2.12.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" files = [ - {file = "pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf"}, - {file = "pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74"}, + {file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"}, + {file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.41.4" +pydantic-core = "2.41.5" typing-extensions = ">=4.14.1" typing-inspection = ">=0.4.2" @@ -580,128 +591,132 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.41.4" +version = "2.41.5" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" files = [ - {file = "pydantic_core-2.41.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2442d9a4d38f3411f22eb9dd0912b7cbf4b7d5b6c92c4173b75d3e1ccd84e36e"}, - {file = "pydantic_core-2.41.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30a9876226dda131a741afeab2702e2d127209bde3c65a2b8133f428bc5d006b"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d55bbac04711e2980645af68b97d445cdbcce70e5216de444a6c4b6943ebcccd"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1d778fb7849a42d0ee5927ab0f7453bf9f85eef8887a546ec87db5ddb178945"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b65077a4693a98b90ec5ad8f203ad65802a1b9b6d4a7e48066925a7e1606706"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62637c769dee16eddb7686bf421be48dfc2fae93832c25e25bc7242e698361ba"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfe3aa529c8f501babf6e502936b9e8d4698502b2cfab41e17a028d91b1ac7b"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca2322da745bf2eeb581fc9ea3bbb31147702163ccbcbf12a3bb630e4bf05e1d"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e8cd3577c796be7231dcf80badcf2e0835a46665eaafd8ace124d886bab4d700"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:1cae8851e174c83633f0833e90636832857297900133705ee158cf79d40f03e6"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a26d950449aae348afe1ac8be5525a00ae4235309b729ad4d3399623125b43c9"}, - {file = "pydantic_core-2.41.4-cp310-cp310-win32.whl", hash = "sha256:0cf2a1f599efe57fa0051312774280ee0f650e11152325e41dfd3018ef2c1b57"}, - {file = "pydantic_core-2.41.4-cp310-cp310-win_amd64.whl", hash = "sha256:a8c2e340d7e454dc3340d3d2e8f23558ebe78c98aa8f68851b04dcb7bc37abdc"}, - {file = "pydantic_core-2.41.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:28ff11666443a1a8cf2a044d6a545ebffa8382b5f7973f22c36109205e65dc80"}, - {file = "pydantic_core-2.41.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61760c3925d4633290292bad462e0f737b840508b4f722247d8729684f6539ae"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae547b7315d055b0de2ec3965643b0ab82ad0106a7ffd29615ee9f266a02827"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef9ee5471edd58d1fcce1c80ffc8783a650e3e3a193fe90d52e43bb4d87bff1f"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15dd504af121caaf2c95cb90c0ebf71603c53de98305621b94da0f967e572def"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a926768ea49a8af4d36abd6a8968b8790f7f76dd7cbd5a4c180db2b4ac9a3a2"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916b9b7d134bff5440098a4deb80e4cb623e68974a87883299de9124126c2a8"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cf90535979089df02e6f17ffd076f07237efa55b7343d98760bde8743c4b265"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7533c76fa647fade2d7ec75ac5cc079ab3f34879626dae5689b27790a6cf5a5c"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:37e516bca9264cbf29612539801ca3cd5d1be465f940417b002905e6ed79d38a"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0c19cb355224037c83642429b8ce261ae108e1c5fbf5c028bac63c77b0f8646e"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win32.whl", hash = "sha256:09c2a60e55b357284b5f31f5ab275ba9f7f70b7525e18a132ec1f9160b4f1f03"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win_amd64.whl", hash = "sha256:711156b6afb5cb1cb7c14a2cc2c4a8b4c717b69046f13c6b332d8a0a8f41ca3e"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win_arm64.whl", hash = "sha256:6cb9cf7e761f4f8a8589a45e49ed3c0d92d1d696a45a6feaee8c904b26efc2db"}, - {file = "pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887"}, - {file = "pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8"}, - {file = "pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746"}, - {file = "pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89"}, - {file = "pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1"}, - {file = "pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0"}, - {file = "pydantic_core-2.41.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:646e76293345954acea6966149683047b7b2ace793011922208c8e9da12b0062"}, - {file = "pydantic_core-2.41.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc8e85a63085a137d286e2791037f5fdfff0aabb8b899483ca9c496dd5797338"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:692c622c8f859a17c156492783902d8370ac7e121a611bd6fe92cc71acf9ee8d"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1e2906efb1031a532600679b424ef1d95d9f9fb507f813951f23320903adbd7"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04e2f7f8916ad3ddd417a7abdd295276a0bf216993d9318a5d61cc058209166"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df649916b81822543d1c8e0e1d079235f68acdc7d270c911e8425045a8cfc57e"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c529f862fdba70558061bb936fe00ddbaaa0c647fd26e4a4356ef1d6561891"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc3b4c5a1fd3a311563ed866c2c9b62da06cb6398bee186484ce95c820db71cb"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6e0fc40d84448f941df9b3334c4b78fe42f36e3bf631ad54c3047a0cdddc2514"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:44e7625332683b6c1c8b980461475cde9595eff94447500e80716db89b0da005"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:170ee6835f6c71081d031ef1c3b4dc4a12b9efa6a9540f93f95b82f3c7571ae8"}, - {file = "pydantic_core-2.41.4-cp39-cp39-win32.whl", hash = "sha256:3adf61415efa6ce977041ba9745183c0e1f637ca849773afa93833e04b163feb"}, - {file = "pydantic_core-2.41.4-cp39-cp39-win_amd64.whl", hash = "sha256:a238dd3feee263eeaeb7dc44aea4ba1364682c4f9f9467e6af5596ba322c2332"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:a1b2cfec3879afb742a7b0bcfa53e4f22ba96571c9e54d6a3afe1052d17d843b"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:d175600d975b7c244af6eb9c9041f10059f20b8bbffec9e33fdd5ee3f67cdc42"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f184d657fa4947ae5ec9c47bd7e917730fa1cbb78195037e32dcbab50aca5ee"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed810568aeffed3edc78910af32af911c835cc39ebbfacd1f0ab5dd53028e5c"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e5ab4fc177dd41536b3c32b2ea11380dd3d4619a385860621478ac2d25ceb00"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d88d0054d3fa11ce936184896bed3c1c5441d6fa483b498fac6a5d0dd6f64a9"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2a054a8725f05b4b6503357e0ac1c4e8234ad3b0c2ac130d6ffc66f0e170e2"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0d9db5a161c99375a0c68c058e227bee1d89303300802601d76a3d01f74e258"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6273ea2c8ffdac7b7fda2653c49682db815aebf4a89243a6feccf5e36c18c347"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:4c973add636efc61de22530b2ef83a65f39b6d6f656df97f678720e20de26caa"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b69d1973354758007f46cf2d44a4f3d0933f10b6dc9bf15cf1356e037f6f731a"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3619320641fd212aaf5997b6ca505e97540b7e16418f4a241f44cdf108ffb50d"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:491535d45cd7ad7e4a2af4a5169b0d07bebf1adfd164b0368da8aa41e19907a5"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:54d86c0cada6aba4ec4c047d0e348cbad7063b87ae0f005d9f8c9ad04d4a92a2"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca1124aced216b2500dc2609eade086d718e8249cb9696660ab447d50a758bd"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c9024169becccf0cb470ada03ee578d7348c119a0d42af3dcf9eda96e3a247c"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:26895a4268ae5a2849269f4991cdc97236e4b9c010e51137becf25182daac405"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:ca4df25762cf71308c446e33c9b1fdca2923a3f13de616e2a949f38bf21ff5a8"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5a28fcedd762349519276c36634e71853b4541079cab4acaaac60c4421827308"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c173ddcd86afd2535e2b695217e82191580663a1d1928239f877f5a1649ef39f"}, - {file = "pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"}, + {file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"}, ] [package.dependencies] @@ -891,6 +906,17 @@ files = [ {file = "ruff-0.11.5.tar.gz", hash = "sha256:cae2e2439cb88853e421901ec040a758960b576126dab520fa08e9de431d1bef"}, ] +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + [[package]] name = "six" version = "1.17.0" @@ -1017,6 +1043,24 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] +[[package]] +name = "typer-slim" +version = "0.20.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.8" +files = [ + {file = "typer_slim-0.20.0-py3-none-any.whl", hash = "sha256:f42a9b7571a12b97dddf364745d29f12221865acef7a2680065f9bb29c7dc89d"}, + {file = "typer_slim-0.20.0.tar.gz", hash = "sha256:9fc6607b3c6c20f5c33ea9590cbeb17848667c51feee27d9e314a579ab07d1a3"}, +] + +[package.dependencies] +click = ">=8.0.0" +typing-extensions = ">=3.7.4.3" + +[package.extras] +standard = ["rich (>=10.11.0)", "shellingham (>=1.3.0)"] + [[package]] name = "types-python-dateutil" version = "2.9.0.20251008" @@ -1087,4 +1131,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "796ed291ede9928d8457ee7be983f6709623e752187f078643d0c44dd76d3425" +content-hash = "2cc1ef11434f24487526fa4d821d917aeb5c941a8f1bf81182db3b761b3c13d4" diff --git a/pyproject.toml b/pyproject.toml index 925dc448d..adb5ac8d5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "cohere" [tool.poetry] name = "cohere" -version = "5.20.0" +version = "5.20.1" description = "" readme = "README.md" authors = [] @@ -31,16 +31,16 @@ packages = [ { include = "cohere", from = "src"} ] -[project.urls] +[tool.poetry.urls] Repository = 'https://github.com/cohere-ai/cohere-python' [tool.poetry.dependencies] python = "^3.9" fastavro = "^1.9.4" httpx = ">=0.21.2" -httpx-sse = "0.4.0" +httpx-sse = "^0.4.0" pydantic = ">= 1.9.2" -pydantic-core = "^2.18.2" +pydantic-core = ">=2.18.2" requests = "^2.0.0" tokenizers = ">=0.15,<1" types-requests = "^2.0.0" diff --git a/reference.md b/reference.md new file mode 100644 index 000000000..d0b917c9f --- /dev/null +++ b/reference.md @@ -0,0 +1,5623 @@ +# Reference +
client.chat_stream(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Generates a streamed text response to a user message. + +To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +response = client.chat_stream( + model="command-a-03-2025", + message="hello!", +) +for chunk in response.data: + yield chunk + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**message:** `str` + +Text input for the model to respond to. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**accepts:** `typing.Optional[typing.Literal["text/event-stream"]]` — Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events. + +
+
+ +
+
+ +**model:** `typing.Optional[str]` + +The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model. + +Compatible Deployments: Cohere Platform, Private Deployments + +
+
+ +
+
+ +**preamble:** `typing.Optional[str]` + +When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style, and use the `SYSTEM` role. + +The `SYSTEM` role is also used for the contents of the optional `chat_history=` parameter. When used with the `chat_history=` parameter it adds content throughout a conversation. Conversely, when used with the `preamble=` parameter it adds content at the start of the conversation only. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**chat_history:** `typing.Optional[typing.Sequence[Message]]` + +A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`. + +Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content. + +The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**conversation_id:** `typing.Optional[str]` + +An alternative to `chat_history`. + +Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string. + +Compatible Deployments: Cohere Platform + +
+
+ +
+
+ +**prompt_truncation:** `typing.Optional[ChatStreamRequestPromptTruncation]` + +Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. + +Dictates how the prompt will be constructed. + +With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance. + +With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API. + +With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. + +Compatible Deployments: + - AUTO: Cohere Platform Only + - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**connectors:** `typing.Optional[typing.Sequence[ChatConnector]]` + +Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/v1/docs/creating-and-deploying-a-connector) one. + +When specified, the model's reply will be enriched with information found by querying each of the connectors (RAG). + +Compatible Deployments: Cohere Platform + +
+
+ +
+
+ +**search_queries_only:** `typing.Optional[bool]` + +Defaults to `false`. + +When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.Sequence[ChatDocument]]` + +A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary. + +Example: +``` +[ + { "title": "Tall penguins", "text": "Emperor penguins are the tallest." }, + { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica." }, +] +``` + +Keys and values from each document will be serialized to a string and passed to the model. The resulting generation will include citations that reference some of these documents. + +Some suggested keys are "text", "author", and "date". For better generation quality, it is recommended to keep the total word count of the strings in the dictionary to under 300 words. + +An `id` field (string) can be optionally supplied to identify the document in the citations. This field will not be passed to the model. + +An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model. + +See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**citation_quality:** `typing.Optional[ChatStreamRequestCitationQuality]` + +Defaults to `"enabled"`. +Citations are enabled by default for models that support it, but can be turned off by setting `"type": "disabled"`. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` + +Defaults to `0.3`. + +A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. + +Randomness can be further maximized by increasing the value of the `p` parameter. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**max_input_tokens:** `typing.Optional[int]` + +The maximum number of input tokens to send to the model. If not specified, `max_input_tokens` is the model's context length limit minus a small buffer. + +Input will be truncated according to the `prompt_truncation` parameter. + +Compatible Deployments: Cohere Platform + +
+
+ +
+
+ +**k:** `typing.Optional[int]` + +Ensures only the top `k` most likely tokens are considered for generation at each step. +Defaults to `0`, min value of `0`, max value of `500`. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**p:** `typing.Optional[float]` + +Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. +Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +If specified, the backend will make a best effort to sample tokens +deterministically, such that repeated requests with the same +seed and parameters should return the same result. However, +determinism cannot be totally guaranteed. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**stop_sequences:** `typing.Optional[typing.Sequence[str]]` + +A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**frequency_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + +Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**presence_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + +Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**raw_prompting:** `typing.Optional[bool]` + +When enabled, the user's prompt will be sent to the model without +any pre-processing. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[Tool]]` + +A list of available tools (functions) that the model may suggest invoking before producing a text response. + +When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**tool_results:** `typing.Optional[typing.Sequence[ToolResult]]` + +A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. +Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries. + +**Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list. +``` +tool_results = [ + { + "call": { + "name": , + "parameters": { + : + } + }, + "outputs": [{ + : + }] + }, + ... +] +``` +**Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**force_single_step:** `typing.Optional[bool]` — Forces the chat to be single step. Defaults to `false`. + +
+
+ +
+
+ +**response_format:** `typing.Optional[ResponseFormat]` + +
+
+ +
+
+ +**safety_mode:** `typing.Optional[ChatStreamRequestSafetyMode]` + +Used to select the [safety instruction](https://docs.cohere.com/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. +When `NONE` is specified, the safety instruction will be omitted. + +Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters. + +**Note**: This parameter is only compatible newer Cohere models, starting with [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release) and [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release). + +**Note**: `command-r7b-12-2024` and newer models only support `"CONTEXTUAL"` and `"STRICT"` modes. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.chat(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Generates a text response to a user message. +To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client, Tool, ToolParameterDefinitionsValue + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.chat( + model="command-a-03-2025", + message="Can you provide a sales summary for 29th September 2023, and also give me some details about the products in the 'Electronics' category, for example their prices and stock levels?", + tools=[ + Tool( + name="query_daily_sales_report", + description="Connects to a database to retrieve overall sales volumes and sales information for a given day.", + parameter_definitions={ + "day": ToolParameterDefinitionsValue( + description="Retrieves sales data for this day, formatted as YYYY-MM-DD.", + type="str", + required=True, + ) + }, + ), + Tool( + name="query_product_catalog", + description="Connects to a a product catalog with information about all the products being sold, including categories, prices, and stock levels.", + parameter_definitions={ + "category": ToolParameterDefinitionsValue( + description="Retrieves product information data for all products in this category.", + type="str", + required=True, + ) + }, + ), + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**message:** `str` + +Text input for the model to respond to. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**accepts:** `typing.Optional[typing.Literal["text/event-stream"]]` — Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events. + +
+
+ +
+
+ +**model:** `typing.Optional[str]` + +The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model. + +Compatible Deployments: Cohere Platform, Private Deployments + +
+
+ +
+
+ +**preamble:** `typing.Optional[str]` + +When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style, and use the `SYSTEM` role. + +The `SYSTEM` role is also used for the contents of the optional `chat_history=` parameter. When used with the `chat_history=` parameter it adds content throughout a conversation. Conversely, when used with the `preamble=` parameter it adds content at the start of the conversation only. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**chat_history:** `typing.Optional[typing.Sequence[Message]]` + +A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`. + +Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content. + +The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**conversation_id:** `typing.Optional[str]` + +An alternative to `chat_history`. + +Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string. + +Compatible Deployments: Cohere Platform + +
+
+ +
+
+ +**prompt_truncation:** `typing.Optional[ChatRequestPromptTruncation]` + +Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. + +Dictates how the prompt will be constructed. + +With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance. + +With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API. + +With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. + +Compatible Deployments: + - AUTO: Cohere Platform Only + - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**connectors:** `typing.Optional[typing.Sequence[ChatConnector]]` + +Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/v1/docs/creating-and-deploying-a-connector) one. + +When specified, the model's reply will be enriched with information found by querying each of the connectors (RAG). + +Compatible Deployments: Cohere Platform + +
+
+ +
+
+ +**search_queries_only:** `typing.Optional[bool]` + +Defaults to `false`. + +When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.Sequence[ChatDocument]]` + +A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary. + +Example: +``` +[ + { "title": "Tall penguins", "text": "Emperor penguins are the tallest." }, + { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica." }, +] +``` + +Keys and values from each document will be serialized to a string and passed to the model. The resulting generation will include citations that reference some of these documents. + +Some suggested keys are "text", "author", and "date". For better generation quality, it is recommended to keep the total word count of the strings in the dictionary to under 300 words. + +An `id` field (string) can be optionally supplied to identify the document in the citations. This field will not be passed to the model. + +An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model. + +See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**citation_quality:** `typing.Optional[ChatRequestCitationQuality]` + +Defaults to `"enabled"`. +Citations are enabled by default for models that support it, but can be turned off by setting `"type": "disabled"`. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` + +Defaults to `0.3`. + +A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. + +Randomness can be further maximized by increasing the value of the `p` parameter. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**max_input_tokens:** `typing.Optional[int]` + +The maximum number of input tokens to send to the model. If not specified, `max_input_tokens` is the model's context length limit minus a small buffer. + +Input will be truncated according to the `prompt_truncation` parameter. + +Compatible Deployments: Cohere Platform + +
+
+ +
+
+ +**k:** `typing.Optional[int]` + +Ensures only the top `k` most likely tokens are considered for generation at each step. +Defaults to `0`, min value of `0`, max value of `500`. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**p:** `typing.Optional[float]` + +Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. +Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +If specified, the backend will make a best effort to sample tokens +deterministically, such that repeated requests with the same +seed and parameters should return the same result. However, +determinism cannot be totally guaranteed. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**stop_sequences:** `typing.Optional[typing.Sequence[str]]` + +A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**frequency_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + +Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**presence_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + +Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**raw_prompting:** `typing.Optional[bool]` + +When enabled, the user's prompt will be sent to the model without +any pre-processing. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[Tool]]` + +A list of available tools (functions) that the model may suggest invoking before producing a text response. + +When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**tool_results:** `typing.Optional[typing.Sequence[ToolResult]]` + +A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. +Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries. + +**Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list. +``` +tool_results = [ + { + "call": { + "name": , + "parameters": { + : + } + }, + "outputs": [{ + : + }] + }, + ... +] +``` +**Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**force_single_step:** `typing.Optional[bool]` — Forces the chat to be single step. Defaults to `false`. + +
+
+ +
+
+ +**response_format:** `typing.Optional[ResponseFormat]` + +
+
+ +
+
+ +**safety_mode:** `typing.Optional[ChatRequestSafetyMode]` + +Used to select the [safety instruction](https://docs.cohere.com/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. +When `NONE` is specified, the safety instruction will be omitted. + +Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters. + +**Note**: This parameter is only compatible newer Cohere models, starting with [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release) and [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release). + +**Note**: `command-r7b-12-2024` and newer models only support `"CONTEXTUAL"` and `"STRICT"` modes. + +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.generate_stream(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ + +This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using the Chat with Streaming API. + +Generates realistic text conditioned on a given input. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +response = client.generate_stream( + prompt="Please explain to me how LLMs work", +) +for chunk in response.data: + yield chunk + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**prompt:** `str` + +The input text that serves as the starting point for generating the response. +Note: The prompt will be pre-processed and modified before reaching the model. + +
+
+ +
+
+ +**model:** `typing.Optional[str]` + +The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). +Smaller, "light" models are faster, while larger models will perform better. [Custom models](https://docs.cohere.com/docs/training-custom-models) can also be supplied with their full ID. + +
+
+ +
+
+ +**num_generations:** `typing.Optional[int]` — The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + +This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. + +Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt. + +
+
+ +
+
+ +**truncate:** `typing.Optional[GenerateStreamRequestTruncate]` + +One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. + +Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + +If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` + +A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details. +Defaults to `0.75`, min value of `0.0`, max value of `5.0`. + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +If specified, the backend will make a best effort to sample tokens +deterministically, such that repeated requests with the same +seed and parameters should return the same result. However, +determinism cannot be totally guaranteed. +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**preset:** `typing.Optional[str]` + +Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.com/playground/generate). +When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters. + +
+
+ +
+
+ +**end_sequences:** `typing.Optional[typing.Sequence[str]]` — The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text. + +
+
+ +
+
+ +**stop_sequences:** `typing.Optional[typing.Sequence[str]]` — The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text. + +
+
+ +
+
+ +**k:** `typing.Optional[int]` + +Ensures only the top `k` most likely tokens are considered for generation at each step. +Defaults to `0`, min value of `0`, max value of `500`. + +
+
+ +
+
+ +**p:** `typing.Optional[float]` + +Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. +Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + +
+
+ +
+
+ +**frequency_penalty:** `typing.Optional[float]` + +Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + +Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models. + +
+
+ +
+
+ +**presence_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + +Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + +Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models. + +
+
+ +
+
+ +**return_likelihoods:** `typing.Optional[GenerateStreamRequestReturnLikelihoods]` + +One of `GENERATION|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`. + +If `GENERATION` is selected, the token likelihoods will only be provided for generated text. + +WARNING: `ALL` is deprecated, and will be removed in a future release. + +
+
+ +
+
+ +**raw_prompting:** `typing.Optional[bool]` — When enabled, the user's prompt will be sent to the model without any pre-processing. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.generate(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ + +This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using the Chat API. + +Generates realistic text conditioned on a given input. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.generate( + prompt="Please explain to me how LLMs work", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**prompt:** `str` + +The input text that serves as the starting point for generating the response. +Note: The prompt will be pre-processed and modified before reaching the model. + +
+
+ +
+
+ +**model:** `typing.Optional[str]` + +The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). +Smaller, "light" models are faster, while larger models will perform better. [Custom models](https://docs.cohere.com/docs/training-custom-models) can also be supplied with their full ID. + +
+
+ +
+
+ +**num_generations:** `typing.Optional[int]` — The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + +This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. + +Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt. + +
+
+ +
+
+ +**truncate:** `typing.Optional[GenerateRequestTruncate]` + +One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. + +Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + +If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` + +A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details. +Defaults to `0.75`, min value of `0.0`, max value of `5.0`. + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +If specified, the backend will make a best effort to sample tokens +deterministically, such that repeated requests with the same +seed and parameters should return the same result. However, +determinism cannot be totally guaranteed. +Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + +
+
+ +
+
+ +**preset:** `typing.Optional[str]` + +Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.com/playground/generate). +When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters. + +
+
+ +
+
+ +**end_sequences:** `typing.Optional[typing.Sequence[str]]` — The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text. + +
+
+ +
+
+ +**stop_sequences:** `typing.Optional[typing.Sequence[str]]` — The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text. + +
+
+ +
+
+ +**k:** `typing.Optional[int]` + +Ensures only the top `k` most likely tokens are considered for generation at each step. +Defaults to `0`, min value of `0`, max value of `500`. + +
+
+ +
+
+ +**p:** `typing.Optional[float]` + +Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. +Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + +
+
+ +
+
+ +**frequency_penalty:** `typing.Optional[float]` + +Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + +Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models. + +
+
+ +
+
+ +**presence_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + +Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + +Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models. + +
+
+ +
+
+ +**return_likelihoods:** `typing.Optional[GenerateRequestReturnLikelihoods]` + +One of `GENERATION|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`. + +If `GENERATION` is selected, the token likelihoods will only be provided for generated text. + +WARNING: `ALL` is deprecated, and will be removed in a future release. + +
+
+ +
+
+ +**raw_prompting:** `typing.Optional[bool]` — When enabled, the user's prompt will be sent to the model without any pre-processing. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.embed(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint returns text and image embeddings. An embedding is a list of floating point numbers that captures semantic information about the content that it represents. + +Embeddings can be used to create classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. + +If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](https://docs.cohere.com/docs/semantic-search). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.embed( + model="embed-v4.0", + input_type="image", + embedding_types=["float"], + images=[ + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD//gAfQ29tcHJlc3NlZCBieSBqcGVnLXJlY29tcHJlc3P/2wCEAAQEBAQEBAQEBAQGBgUGBggHBwcHCAwJCQkJCQwTDA4MDA4MExEUEA8QFBEeFxUVFx4iHRsdIiolJSo0MjRERFwBBAQEBAQEBAQEBAYGBQYGCAcHBwcIDAkJCQkJDBMMDgwMDgwTERQQDxAUER4XFRUXHiIdGx0iKiUlKjQyNEREXP/CABEIAZABkAMBIgACEQEDEQH/xAAdAAEAAQQDAQAAAAAAAAAAAAAABwEFBggCAwQJ/9oACAEBAAAAAN/gAAAAAAAAAAAAAAAAAAAAAAAAAAHTg9j6agAAp23/ADjsAAAPFrlAUYeagAAArdZ12uzcAAKax6jWUAAAAO/bna+oAC1aBxAAAAAAbM7rVABYvnRgYAAAAAbwbIABw+cMYAAAAAAvH1CuwA091RAAAAAAbpbPAGJfMXzAAAAAAJk+hdQGlmsQAAAAABk31JqBx+V1iAAAAAALp9W6gRp826AAAAAAGS/UqoGuGjwAAAAAAl76I1A1K1EAAAAAAG5G1ADUHU0AAAAAAu/1Cu4DVbTgAAAAAA3n2JAIG0IAAAAAArt3toAMV+XfEAAAAAL1uzPlQBT5qR2AAAAAenZDbm/AAa06SgAAAAerYra/LQADp+YmIAAAAC77J7Q5KAACIPnjwAAAAzbZzY24gAAGq+m4AAA7Zo2cmaoAAANWdOOAAAMl2N2TysAAAApEOj2HgAOyYtl5w5jw4zZPJyuGQ5H2AAAdes+suDUAVyfYbZTLajG8HxjgD153n3IAABH8QxxiVo4XPKpGlyTKjowvCbUAF4mD3AAACgqCzYPiPQAA900XAACmN4favRk+a9wB0xdiNAAAvU1cgAxeDcUoPdL0s1B44atQAACSs8AEewD0gM72I5jjDFiAAAPfO1QGL6z9IAlGdRgkaAAABMmRANZsSADls7k6kFW8AAAJIz4DHtW6AAk+d1jhUAAAGdyWBFcGgAX/AGnYZFgAAAM4k4CF4hAA9u3FcKi4AAAEiSEBCsRgAe3biuGxWAAACXsoAiKFgALttgs0J0AAAHpnvkBhOt4AGebE1pBtsAAAGeySA4an2wAGwEjGFxaAAAe+c+wAjKBgAyfZ3kUh3HAAAO6Yb+AKQLGgBctmb2HXDNjAAD1yzkQAENRF1gyvYG9AcI2wjgAByyuSveAAWWMcQtnoyOQs8qAPFhVh8HADt999y65gAAKKgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf/8QAGgEBAAMBAQEAAAAAAAAAAAAAAAEFBgIEA//aAAgBAhAAAAAAAAAAAAABEAAJkBEAAB0CIAABMhyAAA6EQAAA6EQAABMiIAAAmREAAAmQiAABMgOQAEyAHIATIACIBMu7H3fT419eACEnps7DoPFQch889Wd3V2TeWIBV0o+eF8I0OrXVoAIyvBm8uDe2Wp6ADO+Mw9WDV6rSgAzvjMNWA1Op1AARlvmZbOA3NnpfSAK6iHnwfnFttZ9Wh7AeXPcB5cxWd3Wk7Pvb+uR8q+rgAAAAAAAAP//EABsBAQABBQEAAAAAAAAAAAAAAAAEAQIDBQYH/9oACAEDEAAAAAAAAAC20AL6gCNDxAArnn3gpro4AAv2l4QIgAAJWwGLVAAAX7cQYYAAFdyNZgAAAy7UazAAABsZI18UAAE6YEfWgACRNygavCACsmZkALNZjAMkqVcAC2FFoKyJWe+fMyYoMAAUw2L8t0jYzqhE0dAzd70eHj+PK7mcAa7UDN7VvBwXmDb7EAU5uw9C9KCnh2n6WoAaKIey9ODy/jN+ADRRD2fpQeY8P0QAU5zGel+gg8V53oc4AgaYTfcJ45Tx5I31wCPobQ2PpPRYuP8APMZm2kqoxQddQAAAAAAAAP/EAFMQAAEDAgIDCQkMBwUIAwAAAAECAwQFEQAGBzFREhMhMEBBYXGBCBQYIjJCRlDSFSBSVGJygpGTobHREDRDc6LBwiMzU3CyFiQlNVVkdISSlLP/2gAIAQEAAT8A/wAo74nVaBAb32bNYitfDfcS2PrURiZpU0dwVFMjN1OVY8O8u7//APkFYc076LmfSVSvmQpB/ox4QGjH/r7v/wBGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0Y89fd7IMj2cN6e9GDpCTmRaOuFI9nEDSlo9qakpj5upoJNgH3d4+50JxGlxpbSH4r7bzSvJW0sLSeop5NWsw0fL8RU2rVGPDjJ4C6+4EAnYnaegYzV3StDhFcfK1LdqDuoSZBLDHWlPlqxXtNmkOulaVVxcFg3/sYA73A+kLrxKnTJrpfmSXX3jrcdWVqPWVYudvJ7nbil16s0R7vikVSVDduCVR3lNk9e5IvjKfdG5rpKmo+Yo7NXi8ALlgxJH0kiysZL0l5Uzsz/AMFn2l7m7kJ8BuSj6PnAbU8ieeZitOPPuoQ22krWtZCUpSkXJJOoDGkHui4MBT1MyW2ibITdJnuA97o/dJ1uHFczFXMyzV1Gu1N+bJV57yr7kbEjUkdA5dGlSYb7UqJIcZfaUFtuNLKFoUNRSocIONF3dBb6tih58eSCQEM1PUOqT7eELS4lK0KCkkAgg3BB4/M2Z6NlKlSKtWJiI8VoWueFS1nUhA85ZxpJ0v13Pj7kNorg0NC7tw0K4XNi3yPKPRqHqLQnpkeoD8XKmZZJVSHCG4klw/qijqQs/wCF/pwDfjc1ZqpOUKNLrVXf3qMyLJSLFbrh8ltA51qxn7P9az9V1z6istxWypMSIhRLbCD+Kj5yvUYJHCMdz7pLXWoByfWJBXUILV4bizwvRk+Z0qa4yoTodKgyZ859DEWO0t11xZslCEC5UrGlHSNOz/XVvBa26RFKkQY+xHO4v5a/UtArU3LlZptbpzm4lQ30ut7DbWk9ChwHGXq5EzHQ6ZWoCv8AdpsdDyRrIKtaFdKTwHi+6I0hrffGRKU/ZloodqSkngW5rQz1I1n1P3M2ZzJpFYyvIXdUJ0SowP8AhP8AAtI6AvitIWbWclZVqlbWElxpvcRmz+0kOcDaf5nEyXJnypM2Y8p2Q+6t11xRupa1m6lHpJ9T6B6uaVpHo7alEMz0PQnepxN0/wASRgauJ7pTNZmVynZTjuXZpzYkSRtkPDgB6UI9UZMlrgZsy1MQqxZqkRy/QHRfA4iZIaiRX5D6ghpptTi1bEIFycZmrL2YcwVitvk7ubLdfsfNClcCewcHqiiX91qbbX3yz/rGBxGmKse4ujnMz6F2dfjiGj/2VBs/ccE3J9UZOirm5ry3EQm5eqkRu3Qp0YHEd01PLGUqPT0mxk1QLV0oZaPteqdBtKNV0kUIkXah77Md6mkcH8RGBq4jupH7JyXG/wDPcP1tj1T3MuWVMQK5mt9FjJWmDGO1tHjuHqJ4nupEnvrJa+beZ4/jR6ooNGnZhrFOotNa3yXMeS02OvWo9CRwk4ytQIeWKDS6HC/V4TCWgq1itWtSz0rPCeJ7qKNenZSl2/upEtonpcShXqcC+NA+jFeW4H+1NbYKatOaswysWMaOrbscc4rujaYZuj/vzccMCpR3yehwFn+r1MAVGwGNDOhVbK4ubc4xLLFnYMB1PCNjrw/BHF58opzDk7MlHSndOSID28ja6gbtH3jChZRHqShZerOZag1S6JT3pcpzUhsahtUTwJTtJxow0G0vKRYreYS1PrIAUhNrx4yvkA+WsfCONXFnGlTLZytnqvU5KLRlvmTG2Fl/xwB0J1eookOXPkNRYUZ1991W5baaQVrWdiUi5JxkbudKzVCzOzg+abE196NWXKWOnWlvGW8p0DKMEU6g01qKzwFe5F1uEDynFnhUeO7pTJ5n0aBmyK3d+mneJVtZjOnxVfQX6ghwZtRktQ4EV6RJcNkNMoK1qOwJTcnGTe5yr9V3qXmuSKXFNj3uizkpY/0oxlbIOVslRt6oVKaZdIst9XjyHPnOK4ezkFVgw6vAmU2ewHYsllbDiFaloWNyoYz1lKZknMtRoEu6gyvdMO8zrC/IXy2j0Cs5glpg0WmyJkk+YwgrIG1WwdJxk7uap75amZyqQit6zChkLe6lueSnGWcl5ayjGEegUliKCAFuAbp5z57irqPI9NOjVOdqB31T2x7tU5KlxNryNa2CenWnDra2XFtOoUhaFFKkqFiCOAgg8qyro7zdnJwCh0Z5xi9lSVje46etarA22DGUe5spEPe5ebqgue78Ui3aj9Sl+WvFIodHoMREGj02PDjJ1NMNhAJ2m2s8m07aIHJi5WdMsxSZFiuoxG08LoGt9sDz/hjGrkzLD0hxDLDSluLISlKQSpRPMAMZU0C54zFvcidHTR4Sv2k24dI+SyPG+u2MqaBskZc3qRLimrzEftZoBaB+S0PFw0y2y2hppCUIQAEpSAAAOYAauU6XtBJmuycy5LjASVXcl05sWDu1bGxe1GHWnGXFtOoUhxCilSVAghSTYgg6iOR5eyfmXNT/AHvQKNJmKBspTaLNo+es2SntOMq9zNIc3uTm+sBoazEgWWvtdWLDGWchZTyk2E0KiR4zlrKkEbt9XW4u6uW6SNDNAzwHZ7BTTq3YkSm0XS7sS+ka/na8ZuyJmbJMwxK9T1NJJs1IR47D3S2vj2mXXlobabUtaiAlKRcknUAMZV0F56zJvT8iEKVCVY77PuhZHyWvLxlTuesl0Te3qqlysy08JMnxI4PQ0n+onEWDFhMNxokdphhsWQ20gIQkbEpFgPeyqnBg/rMhCCBfc3ur6hw4lZ1hNbpMdlbpGokhKT+OHs7zVf3EdpHzgVfzGDnGqnnbHUkYGcqqOZo/OT+VsMZ5eBG/w0K2lJKPaxDzfTJBCXFLZUTbxk3+q2GJTEhAcYdQtB1KSoEckqdLp1ThvQqnEZkxXU7lbLyAtCusKxnPubKVNU9NyhOMB03Pekm7kfsXwqRjM+jfOWUVLNZochEcapLY31gj56LgduLHZxNjjL+TM0ZpcDdCokuWL2LiEWaSflOKskYyt3M8t0tSM31hLCNZiwbLc7XVCwxljR9lHKDaRQ6Kww6BZUlQ32Qr6a7nAAHvFLSkEqUAAMT81UyGClDm/r2N6u1WKhm2oywpDKt4bPMjX/8ALC3HHCVLWSSbm+338adLhuB2O+tChzg4pOdOFDVRRbm31A/EflhiQ1IbS6y4laFaik3HJCkKBBAII4RjMOibIOYCtc/LkZD6tb0W8Zy+0luwVisdzDRX925RMyS4uxMtlD46gUFGKj3NWdY11wajSpbf71bS/qUnErQTpPjXIy2Xk7WZLCv68L0R6R2/KylO+ikK/A4Tom0jL1ZRqHa3bEXQjpPlkBGVXkDa48yj8V4p/c358lEGW/TIaOcOSCtfYG0qxSO5gp6AldczQ+9tbhsBr+NwqxRNDWjygFDjGXmpL4N99nEyVH6K/FGGmGY7SGm20oQgAJSkAJAHMAPeyJ8WEjfJD6EX1XP4DWTioZ1ZRdEBndnmWvgT2DE6tVCoE98SFFPMgGyR2DBN+E8XSq3MpToUyu7ZIK0HUcUmsRapGK46wlfBuknWnk5AOsY3I2YsNmLAagPf1HMFNp+6S68FOD9mjhV+QxUM5THrohJDKNutWHpL8halvOqWo6yokk8fT58inSESI6ylST2EbDtGKRU49VitvtkJI8tOsg7OOJA1nFSzhQKaVIkT21OA23DV3Fdu51Yk6VICCREpzznS4pKPw3WDpXk34KOgD9+fZwxpWB4JNIIG1D1/xTinaSMvylJDy3YyjwDfUXH1pviFPhTGw/FkNuoOpbagofdxU2fHhMqekOBDadus4q+bJcwqahkssfxnrOFKKjckk8iodWcpUxDySS2rgcTfWMMPtvstvNKCkLSFJI5weMzFm6mZfQUvL32UQCiOg+N1q2DFbzlWa2paXHyzGOplolKbfKOtWLnb72FUp9NeD8GU4y4OdBtfr2jGW9JTbqm4tdQlCr2D6fIPzxzYadbdQhxpYUlQBBBuCD7+pVKPTIq5D6uAcCUjWpWwYqtWlVV9Tr6yE6kIHkpHJcl1cqS5TXjfc+O3f7xxedc6IoqTAgEKnqHCdYZB5ztVsGH5D0p5x+Q6px1ZKlKUbknico5zk0J5EWWtTtPWeFOstdKejaMR5TMxhuQw4lbTiQpKkm4UD7151thtbriwlCElSidQAxXaw7VZalXsyglLadg/M8mpstcKbHko1oWDbb0duGXEOtIcQbpUkKB2g8Tm3MSMv0xbySDJduhhB+FtPQMSJD0p5yRIcK3XFFSlK1kni9HealU+UijzFjvZ5X9iVHyHDzdSve5yqqm2kU5pViuynCNnMOUZVld80lgKsVNEtns4QPqPEKNgTjOdbVWq0+tC7xmCWmRzWTrV2njEqUhQUkkEG4Ixk6ue7dFjPuuXeau08Plp5+0cP6VrS22pSiAACSdgGKpMXPnSJK/PWSBsHMOzlGRX/EmsW8koWOs3B4jONTNNoNQkIUUr3ve27awpzxb4PCTxujGpKYqkinKV4klvdJ+e3+nMkjvakS1DWtIb7FcB+7BNyTyjI67S5CDzsqP1EcRpUkqRTqfFBtvr6l9iE2/nx2V5XeeYKS9/3CEdizuD+OEm4/RnVak0+OhJtd256gm38+U5JTeY+rYyofeniNKyjv8AR0c24f8AxTx1NJTUYKhrD7Z/iGEeSP0Z63Pe8Xc6hur9dxynI7JtNeOqyAO0m/EaVv1mj/Mf/FPHU7/mEL98j8cI8gfozq2pdOZWnmdseopJ5TlKIWKShZFi8tSz2eL/AC4jSsx/Y0qR8FbqD9IA8dQmFSK1S2UjypTQ7N0L4SLJ/RmOOJVIloSk+Ijdjb4nCcEWJB5PDjrlSWWGxdS1hI7TiHHRGjsso8htCUDqSLcRpDppl5ckLABXHUl8DYBwH7jx2juAZeYmXyk7iM2t07L23I/HA/QtIWkpULggjFXgqp8+RHINkrO5O0axyfJlLK3l1F1Pit3S3cecRr7BxMqM3IjusOpCkOoKVjakixGKzTXaTU5cB4HdNOEAnzk6we0cbo3o5g0hU91FnZhCh+7T5PvM6UjfWkTmE3W0LObSnmPZyanQHqjKajMjhUeE2uANpxAhNQYzTDabNtpsOk85PXxWkjLJmRk1mGjdPR0WdA85rb9HjMqUByv1Rtgg97N2W+vYjZ1qww02y2htCQlCEhKUjUAPeLQlxCkLAUlQsQdRBxmKiOUqWopSox1m6FHht0HkjDDsl1DLKCpajYAYoFFRSYw3dlSF8K1bPkji1JCgUkXBxnjJTlJecqVOZvCWbrQn9kT/AEniqVSplYmNQoTRW4s9iRzqUeYDGXaBFoFPbiMC6/KdctYrVt/Ie+qECNMjKjyE7oLHaOkYrVEkUl8hQKmVE7hY1HkUOFInPoYjtla1bMUDLzNKb3xyy5KvKXzDoTxrjaHEKQ4gKSoWIIuCDzYzTo5WlTk2ggEG6lxr6vmH+WHmXWHFtPNqQ4k2UlQIIOwg+/y/lCq19xKm2yzFv4z7g8X6I844oOXoFBiiPDb4TYuOny1kbTxEmOxKaVHebS4hXlA4rWTpEdSnqfdxu5JR5w6tuFtONKKXEFJBsQeOShSzZIvilZTnTShySCwyfhDxj1DFPpcSmtBuM0B8JR4VK6zyCr5apFaQROiJWsCwdT4qx1KGKloseG7XSp4UnmQ+LfxJxJyLmaMoj3OU4n4TakqwrLVfSbGjy/sV4ZyhmN/yKRI+kncf6rYhaM64+QZa2YyOk7tQ7E4o+jyiU0h2SgzHhzu+R2I/PCEIbASgAJAsAOLqFFp84HvphKlkCyhwK4OnZiXkcElUKV9Fz2hh/KdZataPuwfOSoEYXQqog2MJ49Taj/LHuNVPiEj7Jf5Y9xqp8QkfZL/LHuNVPiEj7Jf5Y9xqp8QkfZL/ACx7jVT4hI+yX+WPcaqfEJH2S/yx7jVT4hI+yX+WEUCquaoTw+chQ/EYYyjWHQSpgN9K1C33XOIuR0+VMlfRbH8ziFRKdTwksRkhY89XjK+/VyWwxYf5ef/EADgRAAIBAgMDCQUHBQAAAAAAAAECAwQRAAUgMUFhEhMhIjBAUXGREDJQU6EGFDNCYoGSUnKiwdH/2gAIAQIBAT8A+L37e/wE9zHfj3k90Gk90Gk9ztqPcbd3t3e3b2129qRySGyIScRZY56ZXtwGFoKZfyX8zj7rT/JX0w+X0zbFKngcTZdLHdozyx9cbOg9pbFtENJPNYqlh4nEOWxJYykufQYVFQWRQBw1VVGk4LKAJPHxwysjFWFiNUsscKGSVwqjecVOfgErSxX/AFNhs5r2P4oHkoxHndchHKZXHFf+YpM7gnISYc0/+J0KpYhVFycUtCkQDygM/huHZZjThl59R1l97iNMsqQxvLIbKoucV1dLWykkkRg9VdOUZmyOtLO10PQhO4+Hty6mCrz7jpPu+XZsoZSp2EEYkQxyOh/KSNGf1JAipVO3rNq2EHGW1P3mkikJ6w6reYxGpd0QbyBhVCqFGwC3aV4tUycbHRnLFq+UeAUfTX9nmJhqE3BwfUYoxeqi8+1ryDVPwA0ZwCMwm4hT9Nf2eB5qobcWUfTFM3Inib9Q7QkAEnYMSvzkrv4knRn8BEkVQB0Ecg+Y15RTmCij5Qsz9c/v7KWYTQo28dDefZ5hUBI+aU9Z9vAaamnSqheF9jD0OKmmlpZWilFiNh3Eacqy9quUSSLaFDc8T4YAt7KWpNPJfap94YR1kUOhuD2NTVJTr4vuGHdpHZ3NydVVSQVaciZfIjaMVOR1URJhtKvocNSVSmzU8gP9pxHQVkhASnf9xbFJkJuHq2Fv6F/2cIiRoqIoVQLADRBUSwG6Ho3g7DiLMYX6Huh9RgTwtslT1GOdi+YnqMc7F8xP5DHOxfMT+Qxz0XzE9Rh6ymTbKD5dOJsyY3WFbcThmZiWYkk7z8W//8QAOREAAgECAgYHBwMDBQAAAAAAAQIDAAQFERITICExkQYwQVFSYXEQFCJAQlOBMlChI4KSYnJzsbL/2gAIAQMBAT8A/YCyjiwFa2PxjnWtj8Y51rY/GOda2PxjnWtj8Y51rY/GOda2PxjnWtj8Y51rY/GOda2PxjnWtj8YoMp4EHq5LlV3LvNPNI/FuXW5kcDUdw6cd4pJFkGanbJABJqacvmq7l+RR2Rgy0jiRQw2rmXM6CncOPydq+T6B4HZmfQjJ7eA+UQ6LqfMbN229V/Pyg4j1GzcnOVvlIV0pFH52bgZSt8pbRaC6TcTs3YycHvHyQBJAFQ2+WTyfgbVymlHmOI+Rjt3fe3wio4kj4Df39RNGY38jw60AscgMzSWrHe5yFJEkfBd/f1UiLIpU1JG0ZyPVJE7/pWktRxc/gUqKgyVQOtZVcZMMxUlqw3pvHdRBU5EEbIBO4CktpG3t8IpLeNOzM+fsSN5DkikmosPY75Wy8hS2duv0Z+te7wfaXlT2Nu3BSvoalsJE3xnTH81vG49UVVtzAGjbRH6cq90TxGvdE8RoW0Q7M6Cqu5VA9kVrNLvC5DvNRWEa75CWPIUqqgyVQB5bVzarMCy7n7++mUoxVhkRtW9tPdypBbRNJI3BVFYf0FdlWTErnQP24uP5JqLojgUYyNqznvZ2q46GYLKDq0khPejk/8ArOsU6HX1irTWre8xDeQBk4/FHduPtALEKozJq3skjAaQaT/wOqv4NJdco3jj6bNtby3c8VtAulJIwVRWCYJb4PbKqqGnYDWSdpPcPLZ6V9HEmikxOxjAlQaUqL9Q7x5+2xgCrrmG8/p9OrIDAg8CKkTQd07iRsdBcPV3ucSkX9H9KP1O8naIBBBG410gsBh2K3MCDKNjrE/2tSLpuqDtIFKAqhRwA6y9GVw/mAdjohEEwK2I4u0jH/Lb6exgXljL2tEwP9pq0GdzF69bfHO4fyAGx0ScPgVpl9JkB/yO309cG6w9O0ROeZq3bQnib/UOsJyBJqV9ZI7952Ogl8DDdYezfEra1B5HcdvpTfC+xicoc44QIl/t4/z7LaUTRK3bwPr1d9PoJqlPxN/A2cOvpsNvIbyA/Eh3jvHaDWHYjbYnapdWzgg/qHap7js9JseTDLZreBwbuVSAB9AP1GiSSSeJ9ltcGB8/pPEUjq6hlOYPU3FykC97dgp3aRi7HMnaw3FbzCptdaSZeJDvVh5isO6aYdcqq3gNvJ25705ikxXDJAGS/gI/5FqfHMIt10pb+H0DBjyGdYr03XRaLCojnw1sg/6FTTSzyPNNIXkc5szHMnYhuJIDmh3doPCo7+F9z5oaE0R4SrzrWR/cXnWsj+4vOtZH9xeYrWx/cXmKe6gTjID6b6lxAnMQrl5mmYsSzEkn92//2Q==" + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**texts:** `typing.Optional[typing.Sequence[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`. + +
+
+ +
+
+ +**images:** `typing.Optional[typing.Sequence[str]]` + +An array of image data URIs for the model to embed. Maximum number of images per call is `1`. + +The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg`, `image/png`, `image/webp`, or `image/gif` format and has a maximum size of 5MB. + +Images are only supported with Embed v3.0 and newer models. + +
+
+ +
+
+ +**model:** `typing.Optional[str]` — ID of one of the available [Embedding models](https://docs.cohere.com/docs/cohere-embed). + +
+
+ +
+
+ +**input_type:** `typing.Optional[EmbedInputType]` + +
+
+ +
+
+ +**embedding_types:** `typing.Optional[typing.Sequence[EmbeddingType]]` + +Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. + +* `"float"`: Use this when you want to get back the default float embeddings. Supported with all Embed models. +* `"int8"`: Use this when you want to get back signed int8 embeddings. Supported with Embed v3.0 and newer Embed models. +* `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models. +* `"binary"`: Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models. +* `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models. + +
+
+ +
+
+ +**truncate:** `typing.Optional[EmbedRequestTruncate]` + +One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. + +Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + +If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.rerank(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.rerank( + documents=[ + { + "text": "Carson City is the capital city of the American state of Nevada." + }, + { + "text": "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan." + }, + { + "text": "Capitalization or capitalisation in English grammar is the use of a capital letter at the start of a word. English usage varies from capitalization in other languages." + }, + { + "text": "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district." + }, + { + "text": "Capital punishment has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states." + }, + ], + query="What is the capital of the United States?", + top_n=3, + model="rerank-v3.5", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**query:** `str` — The search query + +
+
+ +
+
+ +**documents:** `typing.Sequence[RerankRequestDocumentsItem]` + +A list of document objects or strings to rerank. +If a document is provided the text fields is required and all other fields will be preserved in the response. + +The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000. + +We recommend a maximum of 1,000 documents for optimal endpoint performance. + +
+
+ +
+
+ +**model:** `typing.Optional[str]` — The identifier of the model to use, eg `rerank-v3.5`. + +
+
+ +
+
+ +**top_n:** `typing.Optional[int]` — The number of most relevant documents or indices to return, defaults to the length of the documents + +
+
+ +
+
+ +**rank_fields:** `typing.Optional[typing.Sequence[str]]` — If a JSON object is provided, you can specify which keys you would like to have considered for reranking. The model will rerank based on order of the fields passed in (i.e. rank_fields=['title','author','text'] will rerank using the values in title, author, text sequentially. If the length of title, author, and text exceeds the context length of the model, the chunking will not re-consider earlier fields). If not provided, the model will use the default text field for ranking. + +
+
+ +
+
+ +**return_documents:** `typing.Optional[bool]` + +- If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request. +- If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request. + +
+
+ +
+
+ +**max_chunks_per_doc:** `typing.Optional[int]` — The maximum number of chunks to produce internally from a document + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.classify(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint makes a prediction about which label fits the specified text inputs best. To make a prediction, Classify uses the provided `examples` of text + label pairs as a reference. +Note: [Fine-tuned models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import ClassifyExample, Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.classify( + examples=[ + ClassifyExample( + text="Dermatologists don't like her!", + label="Spam", + ), + ClassifyExample( + text="'Hello, open to this?'", + label="Spam", + ), + ClassifyExample( + text="I need help please wire me $1000 right now", + label="Spam", + ), + ClassifyExample( + text="Nice to know you ;)", + label="Spam", + ), + ClassifyExample( + text="Please help me?", + label="Spam", + ), + ClassifyExample( + text="Your parcel will be delivered today", + label="Not spam", + ), + ClassifyExample( + text="Review changes to our Terms and Conditions", + label="Not spam", + ), + ClassifyExample( + text="Weekly sync notes", + label="Not spam", + ), + ClassifyExample( + text="'Re: Follow up from today's meeting'", + label="Not spam", + ), + ClassifyExample( + text="Pre-read for tomorrow", + label="Not spam", + ), + ], + inputs=["Confirm your email address", "hey i need u to send some $"], + model="YOUR-FINE-TUNED-MODEL-ID", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**inputs:** `typing.Sequence[str]` + +A list of up to 96 texts to be classified. Each one must be a non-empty string. +There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models). +Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts. + +
+
+ +
+
+ +**examples:** `typing.Optional[typing.Sequence[ClassifyExample]]` + +An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`. +Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly. + +
+
+ +
+
+ +**model:** `typing.Optional[str]` — ID of a [Fine-tuned](https://docs.cohere.com/v2/docs/classify-starting-the-training) Classify model + +
+
+ +
+
+ +**preset:** `typing.Optional[str]` — The ID of a custom playground preset. You can create presets in the [playground](https://dashboard.cohere.com/playground). If you use a preset, all other parameters become optional, and any included parameters will override the preset's parameters. + +
+
+ +
+
+ +**truncate:** `typing.Optional[ClassifyRequestTruncate]` + +One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. +Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. +If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.summarize(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ + +This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using the Chat API. + +Generates a summary in English for a given text. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.summarize( + text='Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases.\n\nThe meaning of the name "ice cream" varies from one country to another. In some countries, such as the United States, "ice cream" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled "frozen dairy dessert" instead. In other countries, such as Italy and Argentina, one word is used fo\r all variants. Analogues made from dairy alternatives, such as goat\'s or sheep\'s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**text:** `str` — The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English. + +
+
+ +
+
+ +**length:** `typing.Optional[SummarizeRequestLength]` — One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text. + +
+
+ +
+
+ +**format:** `typing.Optional[SummarizeRequestFormat]` — One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text. + +
+
+ +
+
+ +**model:** `typing.Optional[str]` — The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. + +
+
+ +
+
+ +**extractiveness:** `typing.Optional[SummarizeRequestExtractiveness]` — One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text. + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` — Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. + +
+
+ +
+
+ +**additional_command:** `typing.Optional[str]` — A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda" + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.tokenize(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint splits input text into smaller units called tokens using byte-pair encoding (BPE). To learn more about tokenization and byte pair encoding, see the tokens page. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.tokenize( + text="tokenize me! :D", + model="command", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**text:** `str` — The string to be tokenized, the minimum text length is 1 character, and the maximum text length is 65536 characters. + +
+
+ +
+
+ +**model:** `str` — The input will be tokenized by the tokenizer that is used by this model. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.detokenize(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.detokenize( + tokens=[10002, 2261, 2012, 8, 2792, 43], + model="command", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**tokens:** `typing.Sequence[int]` — The list of tokens to be detokenized. + +
+
+ +
+
+ +**model:** `str` — An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.check_api_key() +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Checks that the api key in the Authorization header is valid and active +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.check_api_key() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## V2 +
client.v2.chat_stream(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Generates a text response to a user message. To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api). + +Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import ( + Client, + ImageUrl, + ImageUrlContent, + TextContent, + UserChatMessageV2, +) + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +response = client.v2.chat_stream( + model="command-a-vision-07-2025", + messages=[ + UserChatMessageV2( + content=[ + TextContent( + text="Describe this image", + ), + ImageUrlContent( + image_url=ImageUrl( + url="https://cohere.com/favicon-32x32.png", + detail="auto", + ), + ), + ], + ) + ], +) +for chunk in response.data: + yield chunk + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` — The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models). + +
+
+ +
+
+ +**messages:** `ChatMessages` + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[ToolV2]]` + +A list of tools (functions) available to the model. The model response may contain 'tool_calls' to the specified tools. + +Learn more in the [Tool Use guide](https://docs.cohere.com/docs/tools). + +
+
+ +
+
+ +**strict_tools:** `typing.Optional[bool]` + +When set to `true`, tool calls in the Assistant message will be forced to follow the tool definition strictly. Learn more in the [Structured Outputs (Tools) guide](https://docs.cohere.com/docs/structured-outputs-json#structured-outputs-tools). + +**Note**: The first few requests with a new set of tools will take longer to process. + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. + +
+
+ +
+
+ +**citation_options:** `typing.Optional[CitationOptions]` + +
+
+ +
+
+ +**response_format:** `typing.Optional[ResponseFormatV2]` + +
+
+ +
+
+ +**safety_mode:** `typing.Optional[V2ChatStreamRequestSafetyMode]` + +Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. +When `OFF` is specified, the safety instruction will be omitted. + +Safety modes are not yet configurable in combination with `tools` and `documents` parameters. + +**Note**: This parameter is only compatible newer Cohere models, starting with [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release) and [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release). + +**Note**: `command-r7b-12-2024` and newer models only support `"CONTEXTUAL"` and `"STRICT"` modes. + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models). + +**Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`. + +**Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit. + +
+
+ +
+
+ +**stop_sequences:** `typing.Optional[typing.Sequence[str]]` — A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` + +Defaults to `0.3`. + +A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. + +Randomness can be further maximized by increasing the value of the `p` parameter. + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +If specified, the backend will make a best effort to sample tokens +deterministically, such that repeated requests with the same +seed and parameters should return the same result. However, +determinism cannot be totally guaranteed. + +
+
+ +
+
+ +**frequency_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. +Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + +
+
+ +
+
+ +**presence_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. +Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + +
+
+ +
+
+ +**k:** `typing.Optional[int]` + +Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled. +Defaults to `0`, min value of `0`, max value of `500`. + +
+
+ +
+
+ +**p:** `typing.Optional[float]` + +Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. +Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + +
+
+ +
+
+ +**logprobs:** `typing.Optional[bool]` — Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response. + +
+
+ +
+
+ +**tool_choice:** `typing.Optional[V2ChatStreamRequestToolChoice]` + +Used to control whether or not the model will be forced to use a tool when answering. When `REQUIRED` is specified, the model will be forced to use at least one of the user-defined tools, and the `tools` parameter must be passed in the request. +When `NONE` is specified, the model will be forced **not** to use one of the specified tools, and give a direct response. +If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not. + +**Note**: This parameter is only compatible with models [Command-r7b](https://docs.cohere.com/v2/docs/command-r7b) and newer. + +
+
+ +
+
+ +**thinking:** `typing.Optional[Thinking]` + +
+
+ +
+
+ +**priority:** `typing.Optional[int]` + +The priority of the request (lower means earlier handling; default 0 highest priority). +Higher priority requests are handled first, and dropped last when the system is under load. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.v2.chat(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Generates a text response to a user message and streams it down, token by token. To learn how to use the Chat API with streaming follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api). + +Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import ( + Client, + ImageUrl, + ImageUrlContent, + TextContent, + UserChatMessageV2, +) + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.v2.chat( + model="command-a-vision-07-2025", + messages=[ + UserChatMessageV2( + content=[ + TextContent( + text="Describe this image", + ), + ImageUrlContent( + image_url=ImageUrl( + url="https://cohere.com/favicon-32x32.png", + detail="auto", + ), + ), + ], + ) + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` — The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models). + +
+
+ +
+
+ +**messages:** `ChatMessages` + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[ToolV2]]` + +A list of tools (functions) available to the model. The model response may contain 'tool_calls' to the specified tools. + +Learn more in the [Tool Use guide](https://docs.cohere.com/docs/tools). + +
+
+ +
+
+ +**strict_tools:** `typing.Optional[bool]` + +When set to `true`, tool calls in the Assistant message will be forced to follow the tool definition strictly. Learn more in the [Structured Outputs (Tools) guide](https://docs.cohere.com/docs/structured-outputs-json#structured-outputs-tools). + +**Note**: The first few requests with a new set of tools will take longer to process. + +
+
+ +
+
+ +**documents:** `typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. + +
+
+ +
+
+ +**citation_options:** `typing.Optional[CitationOptions]` + +
+
+ +
+
+ +**response_format:** `typing.Optional[ResponseFormatV2]` + +
+
+ +
+
+ +**safety_mode:** `typing.Optional[V2ChatRequestSafetyMode]` + +Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. +When `OFF` is specified, the safety instruction will be omitted. + +Safety modes are not yet configurable in combination with `tools` and `documents` parameters. + +**Note**: This parameter is only compatible newer Cohere models, starting with [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release) and [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release). + +**Note**: `command-r7b-12-2024` and newer models only support `"CONTEXTUAL"` and `"STRICT"` modes. + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models). + +**Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`. + +**Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit. + +
+
+ +
+
+ +**stop_sequences:** `typing.Optional[typing.Sequence[str]]` — A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` + +Defaults to `0.3`. + +A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. + +Randomness can be further maximized by increasing the value of the `p` parameter. + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` + +If specified, the backend will make a best effort to sample tokens +deterministically, such that repeated requests with the same +seed and parameters should return the same result. However, +determinism cannot be totally guaranteed. + +
+
+ +
+
+ +**frequency_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. +Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + +
+
+ +
+
+ +**presence_penalty:** `typing.Optional[float]` + +Defaults to `0.0`, min value of `0.0`, max value of `1.0`. +Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + +
+
+ +
+
+ +**k:** `typing.Optional[int]` + +Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled. +Defaults to `0`, min value of `0`, max value of `500`. + +
+
+ +
+
+ +**p:** `typing.Optional[float]` + +Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. +Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + +
+
+ +
+
+ +**logprobs:** `typing.Optional[bool]` — Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response. + +
+
+ +
+
+ +**tool_choice:** `typing.Optional[V2ChatRequestToolChoice]` + +Used to control whether or not the model will be forced to use a tool when answering. When `REQUIRED` is specified, the model will be forced to use at least one of the user-defined tools, and the `tools` parameter must be passed in the request. +When `NONE` is specified, the model will be forced **not** to use one of the specified tools, and give a direct response. +If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not. + +**Note**: This parameter is only compatible with models [Command-r7b](https://docs.cohere.com/v2/docs/command-r7b) and newer. + +
+
+ +
+
+ +**thinking:** `typing.Optional[Thinking]` + +
+
+ +
+
+ +**priority:** `typing.Optional[int]` + +The priority of the request (lower means earlier handling; default 0 highest priority). +Higher priority requests are handled first, and dropped last when the system is under load. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.v2.embed(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. + +Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. + +If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](https://docs.cohere.com/docs/semantic-search). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.v2.embed( + model="embed-v4.0", + input_type="image", + embedding_types=["float"], + images=[ + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD//gAfQ29tcHJlc3NlZCBieSBqcGVnLXJlY29tcHJlc3P/2wCEAAQEBAQEBAQEBAQGBgUGBggHBwcHCAwJCQkJCQwTDA4MDA4MExEUEA8QFBEeFxUVFx4iHRsdIiolJSo0MjRERFwBBAQEBAQEBAQEBAYGBQYGCAcHBwcIDAkJCQkJDBMMDgwMDgwTERQQDxAUER4XFRUXHiIdGx0iKiUlKjQyNEREXP/CABEIAZABkAMBIgACEQEDEQH/xAAdAAEAAQQDAQAAAAAAAAAAAAAABwEFBggCAwQJ/9oACAEBAAAAAN/gAAAAAAAAAAAAAAAAAAAAAAAAAAHTg9j6agAAp23/ADjsAAAPFrlAUYeagAAArdZ12uzcAAKax6jWUAAAAO/bna+oAC1aBxAAAAAAbM7rVABYvnRgYAAAAAbwbIABw+cMYAAAAAAvH1CuwA091RAAAAAAbpbPAGJfMXzAAAAAAJk+hdQGlmsQAAAAABk31JqBx+V1iAAAAAALp9W6gRp826AAAAAAGS/UqoGuGjwAAAAAAl76I1A1K1EAAAAAAG5G1ADUHU0AAAAAAu/1Cu4DVbTgAAAAAA3n2JAIG0IAAAAAArt3toAMV+XfEAAAAAL1uzPlQBT5qR2AAAAAenZDbm/AAa06SgAAAAerYra/LQADp+YmIAAAAC77J7Q5KAACIPnjwAAAAzbZzY24gAAGq+m4AAA7Zo2cmaoAAANWdOOAAAMl2N2TysAAAApEOj2HgAOyYtl5w5jw4zZPJyuGQ5H2AAAdes+suDUAVyfYbZTLajG8HxjgD153n3IAABH8QxxiVo4XPKpGlyTKjowvCbUAF4mD3AAACgqCzYPiPQAA900XAACmN4favRk+a9wB0xdiNAAAvU1cgAxeDcUoPdL0s1B44atQAACSs8AEewD0gM72I5jjDFiAAAPfO1QGL6z9IAlGdRgkaAAABMmRANZsSADls7k6kFW8AAAJIz4DHtW6AAk+d1jhUAAAGdyWBFcGgAX/AGnYZFgAAAM4k4CF4hAA9u3FcKi4AAAEiSEBCsRgAe3biuGxWAAACXsoAiKFgALttgs0J0AAAHpnvkBhOt4AGebE1pBtsAAAGeySA4an2wAGwEjGFxaAAAe+c+wAjKBgAyfZ3kUh3HAAAO6Yb+AKQLGgBctmb2HXDNjAAD1yzkQAENRF1gyvYG9AcI2wjgAByyuSveAAWWMcQtnoyOQs8qAPFhVh8HADt999y65gAAKKgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf/8QAGgEBAAMBAQEAAAAAAAAAAAAAAAEFBgIEA//aAAgBAhAAAAAAAAAAAAABEAAJkBEAAB0CIAABMhyAAA6EQAAA6EQAABMiIAAAmREAAAmQiAABMgOQAEyAHIATIACIBMu7H3fT419eACEnps7DoPFQch889Wd3V2TeWIBV0o+eF8I0OrXVoAIyvBm8uDe2Wp6ADO+Mw9WDV6rSgAzvjMNWA1Op1AARlvmZbOA3NnpfSAK6iHnwfnFttZ9Wh7AeXPcB5cxWd3Wk7Pvb+uR8q+rgAAAAAAAAP//EABsBAQABBQEAAAAAAAAAAAAAAAAEAQIDBQYH/9oACAEDEAAAAAAAAAC20AL6gCNDxAArnn3gpro4AAv2l4QIgAAJWwGLVAAAX7cQYYAAFdyNZgAAAy7UazAAABsZI18UAAE6YEfWgACRNygavCACsmZkALNZjAMkqVcAC2FFoKyJWe+fMyYoMAAUw2L8t0jYzqhE0dAzd70eHj+PK7mcAa7UDN7VvBwXmDb7EAU5uw9C9KCnh2n6WoAaKIey9ODy/jN+ADRRD2fpQeY8P0QAU5zGel+gg8V53oc4AgaYTfcJ45Tx5I31wCPobQ2PpPRYuP8APMZm2kqoxQddQAAAAAAAAP/EAFMQAAEDAgIDCQkMBwUIAwAAAAECAwQFEQAGBzFREhMhMEBBYXGBCBQYIjJCRlDSFSBSVGJygpGTobHREDRDc6LBwiMzU3CyFiQlNVVkdISSlLP/2gAIAQEAAT8A/wAo74nVaBAb32bNYitfDfcS2PrURiZpU0dwVFMjN1OVY8O8u7//APkFYc076LmfSVSvmQpB/ox4QGjH/r7v/wBGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0YH0ge7IMj2ceEBowPpA92QZHs48IDRgfSB7sgyPZx4QGjA+kD3ZBkezjwgNGB9IHuyDI9nHhAaMD6QPdkGR7OPCA0Y89fd7IMj2cN6e9GDpCTmRaOuFI9nEDSlo9qakpj5upoJNgH3d4+50JxGlxpbSH4r7bzSvJW0sLSeop5NWsw0fL8RU2rVGPDjJ4C6+4EAnYnaegYzV3StDhFcfK1LdqDuoSZBLDHWlPlqxXtNmkOulaVVxcFg3/sYA73A+kLrxKnTJrpfmSXX3jrcdWVqPWVYudvJ7nbil16s0R7vikVSVDduCVR3lNk9e5IvjKfdG5rpKmo+Yo7NXi8ALlgxJH0kiysZL0l5Uzsz/AMFn2l7m7kJ8BuSj6PnAbU8ieeZitOPPuoQ22krWtZCUpSkXJJOoDGkHui4MBT1MyW2ibITdJnuA97o/dJ1uHFczFXMyzV1Gu1N+bJV57yr7kbEjUkdA5dGlSYb7UqJIcZfaUFtuNLKFoUNRSocIONF3dBb6tih58eSCQEM1PUOqT7eELS4lK0KCkkAgg3BB4/M2Z6NlKlSKtWJiI8VoWueFS1nUhA85ZxpJ0v13Pj7kNorg0NC7tw0K4XNi3yPKPRqHqLQnpkeoD8XKmZZJVSHCG4klw/qijqQs/wCF/pwDfjc1ZqpOUKNLrVXf3qMyLJSLFbrh8ltA51qxn7P9az9V1z6istxWypMSIhRLbCD+Kj5yvUYJHCMdz7pLXWoByfWJBXUILV4bizwvRk+Z0qa4yoTodKgyZ859DEWO0t11xZslCEC5UrGlHSNOz/XVvBa26RFKkQY+xHO4v5a/UtArU3LlZptbpzm4lQ30ut7DbWk9ChwHGXq5EzHQ6ZWoCv8AdpsdDyRrIKtaFdKTwHi+6I0hrffGRKU/ZloodqSkngW5rQz1I1n1P3M2ZzJpFYyvIXdUJ0SowP8AhP8AAtI6AvitIWbWclZVqlbWElxpvcRmz+0kOcDaf5nEyXJnypM2Y8p2Q+6t11xRupa1m6lHpJ9T6B6uaVpHo7alEMz0PQnepxN0/wASRgauJ7pTNZmVynZTjuXZpzYkSRtkPDgB6UI9UZMlrgZsy1MQqxZqkRy/QHRfA4iZIaiRX5D6ghpptTi1bEIFycZmrL2YcwVitvk7ubLdfsfNClcCewcHqiiX91qbbX3yz/rGBxGmKse4ujnMz6F2dfjiGj/2VBs/ccE3J9UZOirm5ry3EQm5eqkRu3Qp0YHEd01PLGUqPT0mxk1QLV0oZaPteqdBtKNV0kUIkXah77Md6mkcH8RGBq4jupH7JyXG/wDPcP1tj1T3MuWVMQK5mt9FjJWmDGO1tHjuHqJ4nupEnvrJa+beZ4/jR6ooNGnZhrFOotNa3yXMeS02OvWo9CRwk4ytQIeWKDS6HC/V4TCWgq1itWtSz0rPCeJ7qKNenZSl2/upEtonpcShXqcC+NA+jFeW4H+1NbYKatOaswysWMaOrbscc4rujaYZuj/vzccMCpR3yehwFn+r1MAVGwGNDOhVbK4ubc4xLLFnYMB1PCNjrw/BHF58opzDk7MlHSndOSID28ja6gbtH3jChZRHqShZerOZag1S6JT3pcpzUhsahtUTwJTtJxow0G0vKRYreYS1PrIAUhNrx4yvkA+WsfCONXFnGlTLZytnqvU5KLRlvmTG2Fl/xwB0J1eookOXPkNRYUZ1991W5baaQVrWdiUi5JxkbudKzVCzOzg+abE196NWXKWOnWlvGW8p0DKMEU6g01qKzwFe5F1uEDynFnhUeO7pTJ5n0aBmyK3d+mneJVtZjOnxVfQX6ghwZtRktQ4EV6RJcNkNMoK1qOwJTcnGTe5yr9V3qXmuSKXFNj3uizkpY/0oxlbIOVslRt6oVKaZdIst9XjyHPnOK4ezkFVgw6vAmU2ewHYsllbDiFaloWNyoYz1lKZknMtRoEu6gyvdMO8zrC/IXy2j0Cs5glpg0WmyJkk+YwgrIG1WwdJxk7uap75amZyqQit6zChkLe6lueSnGWcl5ayjGEegUliKCAFuAbp5z57irqPI9NOjVOdqB31T2x7tU5KlxNryNa2CenWnDra2XFtOoUhaFFKkqFiCOAgg8qyro7zdnJwCh0Z5xi9lSVje46etarA22DGUe5spEPe5ebqgue78Ui3aj9Sl+WvFIodHoMREGj02PDjJ1NMNhAJ2m2s8m07aIHJi5WdMsxSZFiuoxG08LoGt9sDz/hjGrkzLD0hxDLDSluLISlKQSpRPMAMZU0C54zFvcidHTR4Sv2k24dI+SyPG+u2MqaBskZc3qRLimrzEftZoBaB+S0PFw0y2y2hppCUIQAEpSAAAOYAauU6XtBJmuycy5LjASVXcl05sWDu1bGxe1GHWnGXFtOoUhxCilSVAghSTYgg6iOR5eyfmXNT/AHvQKNJmKBspTaLNo+es2SntOMq9zNIc3uTm+sBoazEgWWvtdWLDGWchZTyk2E0KiR4zlrKkEbt9XW4u6uW6SNDNAzwHZ7BTTq3YkSm0XS7sS+ka/na8ZuyJmbJMwxK9T1NJJs1IR47D3S2vj2mXXlobabUtaiAlKRcknUAMZV0F56zJvT8iEKVCVY77PuhZHyWvLxlTuesl0Te3qqlysy08JMnxI4PQ0n+onEWDFhMNxokdphhsWQ20gIQkbEpFgPeyqnBg/rMhCCBfc3ur6hw4lZ1hNbpMdlbpGokhKT+OHs7zVf3EdpHzgVfzGDnGqnnbHUkYGcqqOZo/OT+VsMZ5eBG/w0K2lJKPaxDzfTJBCXFLZUTbxk3+q2GJTEhAcYdQtB1KSoEckqdLp1ThvQqnEZkxXU7lbLyAtCusKxnPubKVNU9NyhOMB03Pekm7kfsXwqRjM+jfOWUVLNZochEcapLY31gj56LgduLHZxNjjL+TM0ZpcDdCokuWL2LiEWaSflOKskYyt3M8t0tSM31hLCNZiwbLc7XVCwxljR9lHKDaRQ6Kww6BZUlQ32Qr6a7nAAHvFLSkEqUAAMT81UyGClDm/r2N6u1WKhm2oywpDKt4bPMjX/8ALC3HHCVLWSSbm+338adLhuB2O+tChzg4pOdOFDVRRbm31A/EflhiQ1IbS6y4laFaik3HJCkKBBAII4RjMOibIOYCtc/LkZD6tb0W8Zy+0luwVisdzDRX925RMyS4uxMtlD46gUFGKj3NWdY11wajSpbf71bS/qUnErQTpPjXIy2Xk7WZLCv68L0R6R2/KylO+ikK/A4Tom0jL1ZRqHa3bEXQjpPlkBGVXkDa48yj8V4p/c358lEGW/TIaOcOSCtfYG0qxSO5gp6AldczQ+9tbhsBr+NwqxRNDWjygFDjGXmpL4N99nEyVH6K/FGGmGY7SGm20oQgAJSkAJAHMAPeyJ8WEjfJD6EX1XP4DWTioZ1ZRdEBndnmWvgT2DE6tVCoE98SFFPMgGyR2DBN+E8XSq3MpToUyu7ZIK0HUcUmsRapGK46wlfBuknWnk5AOsY3I2YsNmLAagPf1HMFNp+6S68FOD9mjhV+QxUM5THrohJDKNutWHpL8halvOqWo6yokk8fT58inSESI6ylST2EbDtGKRU49VitvtkJI8tOsg7OOJA1nFSzhQKaVIkT21OA23DV3Fdu51Yk6VICCREpzznS4pKPw3WDpXk34KOgD9+fZwxpWB4JNIIG1D1/xTinaSMvylJDy3YyjwDfUXH1pviFPhTGw/FkNuoOpbagofdxU2fHhMqekOBDadus4q+bJcwqahkssfxnrOFKKjckk8iodWcpUxDySS2rgcTfWMMPtvstvNKCkLSFJI5weMzFm6mZfQUvL32UQCiOg+N1q2DFbzlWa2paXHyzGOplolKbfKOtWLnb72FUp9NeD8GU4y4OdBtfr2jGW9JTbqm4tdQlCr2D6fIPzxzYadbdQhxpYUlQBBBuCD7+pVKPTIq5D6uAcCUjWpWwYqtWlVV9Tr6yE6kIHkpHJcl1cqS5TXjfc+O3f7xxedc6IoqTAgEKnqHCdYZB5ztVsGH5D0p5x+Q6px1ZKlKUbknico5zk0J5EWWtTtPWeFOstdKejaMR5TMxhuQw4lbTiQpKkm4UD7151thtbriwlCElSidQAxXaw7VZalXsyglLadg/M8mpstcKbHko1oWDbb0duGXEOtIcQbpUkKB2g8Tm3MSMv0xbySDJduhhB+FtPQMSJD0p5yRIcK3XFFSlK1kni9HealU+UijzFjvZ5X9iVHyHDzdSve5yqqm2kU5pViuynCNnMOUZVld80lgKsVNEtns4QPqPEKNgTjOdbVWq0+tC7xmCWmRzWTrV2njEqUhQUkkEG4Ixk6ue7dFjPuuXeau08Plp5+0cP6VrS22pSiAACSdgGKpMXPnSJK/PWSBsHMOzlGRX/EmsW8koWOs3B4jONTNNoNQkIUUr3ve27awpzxb4PCTxujGpKYqkinKV4klvdJ+e3+nMkjvakS1DWtIb7FcB+7BNyTyjI67S5CDzsqP1EcRpUkqRTqfFBtvr6l9iE2/nx2V5XeeYKS9/3CEdizuD+OEm4/RnVak0+OhJtd256gm38+U5JTeY+rYyofeniNKyjv8AR0c24f8AxTx1NJTUYKhrD7Z/iGEeSP0Z63Pe8Xc6hur9dxynI7JtNeOqyAO0m/EaVv1mj/Mf/FPHU7/mEL98j8cI8gfozq2pdOZWnmdseopJ5TlKIWKShZFi8tSz2eL/AC4jSsx/Y0qR8FbqD9IA8dQmFSK1S2UjypTQ7N0L4SLJ/RmOOJVIloSk+Ijdjb4nCcEWJB5PDjrlSWWGxdS1hI7TiHHRGjsso8htCUDqSLcRpDppl5ckLABXHUl8DYBwH7jx2juAZeYmXyk7iM2t07L23I/HA/QtIWkpULggjFXgqp8+RHINkrO5O0axyfJlLK3l1F1Pit3S3cecRr7BxMqM3IjusOpCkOoKVjakixGKzTXaTU5cB4HdNOEAnzk6we0cbo3o5g0hU91FnZhCh+7T5PvM6UjfWkTmE3W0LObSnmPZyanQHqjKajMjhUeE2uANpxAhNQYzTDabNtpsOk85PXxWkjLJmRk1mGjdPR0WdA85rb9HjMqUByv1Rtgg97N2W+vYjZ1qww02y2htCQlCEhKUjUAPeLQlxCkLAUlQsQdRBxmKiOUqWopSox1m6FHht0HkjDDsl1DLKCpajYAYoFFRSYw3dlSF8K1bPkji1JCgUkXBxnjJTlJecqVOZvCWbrQn9kT/AEniqVSplYmNQoTRW4s9iRzqUeYDGXaBFoFPbiMC6/KdctYrVt/Ie+qECNMjKjyE7oLHaOkYrVEkUl8hQKmVE7hY1HkUOFInPoYjtla1bMUDLzNKb3xyy5KvKXzDoTxrjaHEKQ4gKSoWIIuCDzYzTo5WlTk2ggEG6lxr6vmH+WHmXWHFtPNqQ4k2UlQIIOwg+/y/lCq19xKm2yzFv4z7g8X6I844oOXoFBiiPDb4TYuOny1kbTxEmOxKaVHebS4hXlA4rWTpEdSnqfdxu5JR5w6tuFtONKKXEFJBsQeOShSzZIvilZTnTShySCwyfhDxj1DFPpcSmtBuM0B8JR4VK6zyCr5apFaQROiJWsCwdT4qx1KGKloseG7XSp4UnmQ+LfxJxJyLmaMoj3OU4n4TakqwrLVfSbGjy/sV4ZyhmN/yKRI+kncf6rYhaM64+QZa2YyOk7tQ7E4o+jyiU0h2SgzHhzu+R2I/PCEIbASgAJAsAOLqFFp84HvphKlkCyhwK4OnZiXkcElUKV9Fz2hh/KdZataPuwfOSoEYXQqog2MJ49Taj/LHuNVPiEj7Jf5Y9xqp8QkfZL/LHuNVPiEj7Jf5Y9xqp8QkfZL/ACx7jVT4hI+yX+WPcaqfEJH2S/yx7jVT4hI+yX+WEUCquaoTw+chQ/EYYyjWHQSpgN9K1C33XOIuR0+VMlfRbH8ziFRKdTwksRkhY89XjK+/VyWwxYf5ef/EADgRAAIBAgMDCQUHBQAAAAAAAAECAwQRAAUgMUFhEhMhIjBAUXGREDJQU6EGFDNCYoGSUnKiwdH/2gAIAQIBAT8A+L37e/wE9zHfj3k90Gk90Gk9ztqPcbd3t3e3b2129qRySGyIScRZY56ZXtwGFoKZfyX8zj7rT/JX0w+X0zbFKngcTZdLHdozyx9cbOg9pbFtENJPNYqlh4nEOWxJYykufQYVFQWRQBw1VVGk4LKAJPHxwysjFWFiNUsscKGSVwqjecVOfgErSxX/AFNhs5r2P4oHkoxHndchHKZXHFf+YpM7gnISYc0/+J0KpYhVFycUtCkQDygM/huHZZjThl59R1l97iNMsqQxvLIbKoucV1dLWykkkRg9VdOUZmyOtLO10PQhO4+Hty6mCrz7jpPu+XZsoZSp2EEYkQxyOh/KSNGf1JAipVO3rNq2EHGW1P3mkikJ6w6reYxGpd0QbyBhVCqFGwC3aV4tUycbHRnLFq+UeAUfTX9nmJhqE3BwfUYoxeqi8+1ryDVPwA0ZwCMwm4hT9Nf2eB5qobcWUfTFM3Inib9Q7QkAEnYMSvzkrv4knRn8BEkVQB0Ecg+Y15RTmCij5Qsz9c/v7KWYTQo28dDefZ5hUBI+aU9Z9vAaamnSqheF9jD0OKmmlpZWilFiNh3Eacqy9quUSSLaFDc8T4YAt7KWpNPJfap94YR1kUOhuD2NTVJTr4vuGHdpHZ3NydVVSQVaciZfIjaMVOR1URJhtKvocNSVSmzU8gP9pxHQVkhASnf9xbFJkJuHq2Fv6F/2cIiRoqIoVQLADRBUSwG6Ho3g7DiLMYX6Huh9RgTwtslT1GOdi+YnqMc7F8xP5DHOxfMT+Qxz0XzE9Rh6ymTbKD5dOJsyY3WFbcThmZiWYkk7z8W//8QAOREAAgECAgYHBwMDBQAAAAAAAQIDAAQFERITICExkQYwQVFSYXEQFCJAQlOBMlChI4KSYnJzsbL/2gAIAQMBAT8A/YCyjiwFa2PxjnWtj8Y51rY/GOda2PxjnWtj8Y51rY/GOda2PxjnWtj8Y51rY/GOda2PxjnWtj8YoMp4EHq5LlV3LvNPNI/FuXW5kcDUdw6cd4pJFkGanbJABJqacvmq7l+RR2Rgy0jiRQw2rmXM6CncOPydq+T6B4HZmfQjJ7eA+UQ6LqfMbN229V/Pyg4j1GzcnOVvlIV0pFH52bgZSt8pbRaC6TcTs3YycHvHyQBJAFQ2+WTyfgbVymlHmOI+Rjt3fe3wio4kj4Df39RNGY38jw60AscgMzSWrHe5yFJEkfBd/f1UiLIpU1JG0ZyPVJE7/pWktRxc/gUqKgyVQOtZVcZMMxUlqw3pvHdRBU5EEbIBO4CktpG3t8IpLeNOzM+fsSN5DkikmosPY75Wy8hS2duv0Z+te7wfaXlT2Nu3BSvoalsJE3xnTH81vG49UVVtzAGjbRH6cq90TxGvdE8RoW0Q7M6Cqu5VA9kVrNLvC5DvNRWEa75CWPIUqqgyVQB5bVzarMCy7n7++mUoxVhkRtW9tPdypBbRNJI3BVFYf0FdlWTErnQP24uP5JqLojgUYyNqznvZ2q46GYLKDq0khPejk/8ArOsU6HX1irTWre8xDeQBk4/FHduPtALEKozJq3skjAaQaT/wOqv4NJdco3jj6bNtby3c8VtAulJIwVRWCYJb4PbKqqGnYDWSdpPcPLZ6V9HEmikxOxjAlQaUqL9Q7x5+2xgCrrmG8/p9OrIDAg8CKkTQd07iRsdBcPV3ucSkX9H9KP1O8naIBBBG410gsBh2K3MCDKNjrE/2tSLpuqDtIFKAqhRwA6y9GVw/mAdjohEEwK2I4u0jH/Lb6exgXljL2tEwP9pq0GdzF69bfHO4fyAGx0ScPgVpl9JkB/yO309cG6w9O0ROeZq3bQnib/UOsJyBJqV9ZI7952Ogl8DDdYezfEra1B5HcdvpTfC+xicoc44QIl/t4/z7LaUTRK3bwPr1d9PoJqlPxN/A2cOvpsNvIbyA/Eh3jvHaDWHYjbYnapdWzgg/qHap7js9JseTDLZreBwbuVSAB9AP1GiSSSeJ9ltcGB8/pPEUjq6hlOYPU3FykC97dgp3aRi7HMnaw3FbzCptdaSZeJDvVh5isO6aYdcqq3gNvJ25705ikxXDJAGS/gI/5FqfHMIt10pb+H0DBjyGdYr03XRaLCojnw1sg/6FTTSzyPNNIXkc5szHMnYhuJIDmh3doPCo7+F9z5oaE0R4SrzrWR/cXnWsj+4vOtZH9xeYrWx/cXmKe6gTjID6b6lxAnMQrl5mmYsSzEkn92//2Q==" + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` — ID of one of the available [Embedding models](https://docs.cohere.com/docs/cohere-embed). + +
+
+ +
+
+ +**input_type:** `EmbedInputType` + +
+
+ +
+
+ +**texts:** `typing.Optional[typing.Sequence[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`. + +
+
+ +
+
+ +**images:** `typing.Optional[typing.Sequence[str]]` + +An array of image data URIs for the model to embed. Maximum number of images per call is `1`. + +The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg`, `image/png`, `image/webp`, or `image/gif` format and has a maximum size of 5MB. + +Image embeddings are supported with Embed v3.0 and newer models. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Sequence[EmbedInput]]` — An array of inputs for the model to embed. Maximum number of inputs per call is `96`. An input can contain a mix of text and image components. + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to embed per input. If the input text is longer than this, it will be truncated according to the `truncate` parameter. + +
+
+ +
+
+ +**output_dimension:** `typing.Optional[int]` + +The number of dimensions of the output embedding. This is only available for `embed-v4` and newer models. +Possible values are `256`, `512`, `1024`, and `1536`. The default is `1536`. + +
+
+ +
+
+ +**embedding_types:** `typing.Optional[typing.Sequence[EmbeddingType]]` + +Specifies the types of embeddings you want to get back. Can be one or more of the following types. + +* `"float"`: Use this when you want to get back the default float embeddings. Supported with all Embed models. +* `"int8"`: Use this when you want to get back signed int8 embeddings. Supported with Embed v3.0 and newer Embed models. +* `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models. +* `"binary"`: Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models. +* `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models. +* `"base64"`: Use this when you want to get back base64 embeddings. Supported with Embed v3.0 and newer Embed models. + +
+
+ +
+
+ +**truncate:** `typing.Optional[V2EmbedRequestTruncate]` + +One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. + +Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + +If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + +
+
+ +
+
+ +**priority:** `typing.Optional[int]` + +The priority of the request (lower means earlier handling; default 0 highest priority). +Higher priority requests are handled first, and dropped last when the system is under load. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.v2.rerank(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.v2.rerank( + documents=[ + "Carson City is the capital city of the American state of Nevada.", + "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.", + "Capitalization or capitalisation in English grammar is the use of a capital letter at the start of a word. English usage varies from capitalization in other languages.", + "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district.", + "Capital punishment has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states.", + ], + query="What is the capital of the United States?", + top_n=3, + model="rerank-v3.5", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` — The identifier of the model to use, eg `rerank-v3.5`. + +
+
+ +
+
+ +**query:** `str` — The search query + +
+
+ +
+
+ +**documents:** `typing.Sequence[str]` + +A list of texts that will be compared to the `query`. +For optimal performance we recommend against sending more than 1,000 documents in a single request. + +**Note**: long documents will automatically be truncated to the value of `max_tokens_per_doc`. + +**Note**: structured data should be formatted as YAML strings for best performance. + +
+
+ +
+
+ +**top_n:** `typing.Optional[int]` — Limits the number of returned rerank results to the specified value. If not passed, all the rerank results will be returned. + +
+
+ +
+
+ +**max_tokens_per_doc:** `typing.Optional[int]` — Defaults to `4096`. Long documents will be automatically truncated to the specified number of tokens. + +
+
+ +
+
+ +**priority:** `typing.Optional[int]` + +The priority of the request (lower means earlier handling; default 0 highest priority). +Higher priority requests are handled first, and dropped last when the system is under load. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Batches +
client.batches.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List the batches for the current user +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.batches.list( + page_size=1, + page_token="page_token", + order_by="order_by", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +The maximum number of batches to return. The service may return fewer than +this value. +If unspecified, at most 50 batches will be returned. +The maximum value is 1000; values above 1000 will be coerced to 1000. + +
+
+ +
+
+ +**page_token:** `typing.Optional[str]` + +A page token, received from a previous `ListBatches` call. +Provide this to retrieve the subsequent page. + +
+
+ +
+
+ +**order_by:** `typing.Optional[str]` + +Batches can be ordered by creation time or last updated time. +Use `created_at` for creation time or `updated_at` for last updated time. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.batches.create(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Creates and executes a batch from an uploaded dataset of requests +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client +from cohere.batches import Batch + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.batches.create( + request=Batch( + name="name", + input_dataset_id="input_dataset_id", + model="model", + ), +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Batch` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.batches.retrieve(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Retrieves a batch +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.batches.retrieve( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The batch ID. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.batches.cancel(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Cancels an in-progress batch +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.batches.cancel( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The batch ID. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## EmbedJobs +
client.embed_jobs.list() +
+
+ +#### 📝 Description + +
+
+ +
+
+ +The list embed job endpoint allows users to view all embed jobs history for that specific user. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.embed_jobs.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.embed_jobs.create(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This API launches an async Embed job for a [Dataset](https://docs.cohere.com/docs/datasets) of type `embed-input`. The result of a completed embed job is new Dataset of type `embed-output`, which contains the original text entries and the corresponding embeddings. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.embed_jobs.create( + model="model", + dataset_id="dataset_id", + input_type="search_document", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` + +ID of the embedding model. + +Available models and corresponding embedding dimensions: + +- `embed-english-v3.0` : 1024 +- `embed-multilingual-v3.0` : 1024 +- `embed-english-light-v3.0` : 384 +- `embed-multilingual-light-v3.0` : 384 + +
+
+ +
+
+ +**dataset_id:** `str` — ID of a [Dataset](https://docs.cohere.com/docs/datasets). The Dataset must be of type `embed-input` and must have a validation status `Validated` + +
+
+ +
+
+ +**input_type:** `EmbedInputType` + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — The name of the embed job. + +
+
+ +
+
+ +**embedding_types:** `typing.Optional[typing.Sequence[EmbeddingType]]` + +Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. + +* `"float"`: Use this when you want to get back the default float embeddings. Valid for all models. +* `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for v3 and newer model versions. +* `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for v3 and newer model versions. +* `"binary"`: Use this when you want to get back signed binary embeddings. Valid for v3 and newer model versions. +* `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for v3 and newer model versions. + +
+
+ +
+
+ +**truncate:** `typing.Optional[CreateEmbedJobRequestTruncate]` + +One of `START|END` to specify how the API will handle inputs longer than the maximum token length. + +Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.embed_jobs.get(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This API retrieves the details about an embed job started by the same user. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.embed_jobs.get( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The ID of the embed job to retrieve. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.embed_jobs.cancel(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This API allows users to cancel an active embed job. Once invoked, the embedding process will be terminated, and users will be charged for the embeddings processed up to the cancellation point. It's important to note that partial results will not be available to users after cancellation. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.embed_jobs.cancel( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The ID of the embed job to cancel. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Datasets +
client.datasets.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List datasets that have been created. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +import datetime + +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.datasets.list( + dataset_type="datasetType", + before=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + after=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + limit=1.1, + offset=1.1, + validation_status="unknown", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**dataset_type:** `typing.Optional[str]` — optional filter by dataset type + +
+
+ +
+
+ +**before:** `typing.Optional[dt.datetime]` — optional filter before a date + +
+
+ +
+
+ +**after:** `typing.Optional[dt.datetime]` — optional filter after a date + +
+
+ +
+
+ +**limit:** `typing.Optional[float]` — optional limit to number of results + +
+
+ +
+
+ +**offset:** `typing.Optional[float]` — optional offset to start of results + +
+
+ +
+
+ +**validation_status:** `typing.Optional[DatasetValidationStatus]` — optional filter by validation status + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.create(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a dataset by uploading a file. See ['Dataset Creation'](https://docs.cohere.com/docs/datasets#dataset-creation) for more information. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.datasets.create( + name="name", + type="embed-input", + keep_original_file=True, + skip_malformed_input=True, + text_separator="text_separator", + csv_delimiter="csv_delimiter", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**name:** `str` — The name of the uploaded dataset. + +
+
+ +
+
+ +**type:** `DatasetType` — The dataset type, which is used to validate the data. The only valid type is `embed-input` used in conjunction with the Embed Jobs API. + +
+
+ +
+
+ +**data:** `from __future__ import annotations + +core.File` — See core.File for more documentation + +
+
+ +
+
+ +**keep_original_file:** `typing.Optional[bool]` — Indicates if the original file should be stored. + +
+
+ +
+
+ +**skip_malformed_input:** `typing.Optional[bool]` — Indicates whether rows with malformed input should be dropped (instead of failing the validation check). Dropped rows will be returned in the warnings field. + +
+
+ +
+
+ +**keep_fields:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — List of names of fields that will be persisted in the Dataset. By default the Dataset will retain only the required fields indicated in the [schema for the corresponding Dataset type](https://docs.cohere.com/docs/datasets#dataset-types). For example, datasets of type `embed-input` will drop all fields other than the required `text` field. If any of the fields in `keep_fields` are missing from the uploaded file, Dataset validation will fail. + +
+
+ +
+
+ +**optional_fields:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — List of names of fields that will be persisted in the Dataset. By default the Dataset will retain only the required fields indicated in the [schema for the corresponding Dataset type](https://docs.cohere.com/docs/datasets#dataset-types). For example, Datasets of type `embed-input` will drop all fields other than the required `text` field. If any of the fields in `optional_fields` are missing from the uploaded file, Dataset validation will pass. + +
+
+ +
+
+ +**text_separator:** `typing.Optional[str]` — Raw .txt uploads will be split into entries using the text_separator value. + +
+
+ +
+
+ +**csv_delimiter:** `typing.Optional[str]` — The delimiter used for .csv uploads. + +
+
+ +
+
+ +**eval_data:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.get_usage() +
+
+ +#### 📝 Description + +
+
+ +
+
+ +View the dataset storage usage for your Organization. Each Organization can have up to 10GB of storage across all their users. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.datasets.get_usage() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.get(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Retrieve a dataset by ID. See ['Datasets'](https://docs.cohere.com/docs/datasets) for more information. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.datasets.get( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.delete(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a dataset by ID. Datasets are automatically deleted after 30 days, but they can also be deleted manually. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.datasets.delete( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Connectors +
client.connectors.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Returns a list of connectors ordered by descending creation date (newer first). See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.connectors.list( + limit=1.1, + offset=1.1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**limit:** `typing.Optional[float]` — Maximum number of connectors to return [0, 100]. + +
+
+ +
+
+ +**offset:** `typing.Optional[float]` — Number of connectors to skip before returning results [0, inf]. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.connectors.create(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Creates a new connector. The connector is tested during registration and will cancel registration when the test is unsuccessful. See ['Creating and Deploying a Connector'](https://docs.cohere.com/v1/docs/creating-and-deploying-a-connector) for more information. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.connectors.create( + name="name", + url="url", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**name:** `str` — A human-readable name for the connector. + +
+
+ +
+
+ +**url:** `str` — The URL of the connector that will be used to search for documents. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — A description of the connector. + +
+
+ +
+
+ +**excludes:** `typing.Optional[typing.Sequence[str]]` — A list of fields to exclude from the prompt (fields remain in the document). + +
+
+ +
+
+ +**oauth:** `typing.Optional[CreateConnectorOAuth]` — The OAuth 2.0 configuration for the connector. Cannot be specified if service_auth is specified. + +
+
+ +
+
+ +**active:** `typing.Optional[bool]` — Whether the connector is active or not. + +
+
+ +
+
+ +**continue_on_failure:** `typing.Optional[bool]` — Whether a chat request should continue or not if the request to this connector fails. + +
+
+ +
+
+ +**service_auth:** `typing.Optional[CreateConnectorServiceAuth]` — The service to service authentication configuration for the connector. Cannot be specified if oauth is specified. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.connectors.get(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Retrieve a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.connectors.get( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The ID of the connector to retrieve. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.connectors.delete(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.connectors.delete( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The ID of the connector to delete. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.connectors.update(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update a connector by ID. Omitted fields will not be updated. See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.connectors.update( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The ID of the connector to update. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — A human-readable name for the connector. + +
+
+ +
+
+ +**url:** `typing.Optional[str]` — The URL of the connector that will be used to search for documents. + +
+
+ +
+
+ +**excludes:** `typing.Optional[typing.Sequence[str]]` — A list of fields to exclude from the prompt (fields remain in the document). + +
+
+ +
+
+ +**oauth:** `typing.Optional[CreateConnectorOAuth]` — The OAuth 2.0 configuration for the connector. Cannot be specified if service_auth is specified. + +
+
+ +
+
+ +**active:** `typing.Optional[bool]` + +
+
+ +
+
+ +**continue_on_failure:** `typing.Optional[bool]` + +
+
+ +
+
+ +**service_auth:** `typing.Optional[CreateConnectorServiceAuth]` — The service to service authentication configuration for the connector. Cannot be specified if oauth is specified. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.connectors.o_auth_authorize(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Authorize the connector with the given ID for the connector oauth app. See ['Connector Authentication'](https://docs.cohere.com/docs/connector-authentication) for more information. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.connectors.o_auth_authorize( + id="id", + after_token_redirect="after_token_redirect", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The ID of the connector to authorize. + +
+
+ +
+
+ +**after_token_redirect:** `typing.Optional[str]` — The URL to redirect to after the connector has been authorized. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Models +
client.models.get(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Returns the details of a model, provided its name. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.models.get( + model="command-a-03-2025", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.models.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Returns a list of models available for use. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.models.list( + page_size=1.1, + page_token="page_token", + endpoint="chat", + default_only=True, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page_size:** `typing.Optional[float]` + +Maximum number of models to include in a page +Defaults to `20`, min value of `1`, max value of `1000`. + +
+
+ +
+
+ +**page_token:** `typing.Optional[str]` — Page token provided in the `next_page_token` field of a previous response. + +
+
+ +
+
+ +**endpoint:** `typing.Optional[CompatibleEndpoint]` — When provided, filters the list of models to only those that are compatible with the specified endpoint. + +
+
+ +
+
+ +**default_only:** `typing.Optional[bool]` — When provided, filters the list of models to only the default model to the endpoint. This parameter is only valid when `endpoint` is provided. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## /finetuning +
client.finetuning.list_finetuned_models(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Returns a list of fine-tuned models that the user has access to. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.finetuning.list_finetuned_models( + page_size=1, + page_token="page_token", + order_by="order_by", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Maximum number of results to be returned by the server. If 0, defaults to +50. + +
+
+ +
+
+ +**page_token:** `typing.Optional[str]` — Request a specific page of the list results. + +
+
+ +
+
+ +**order_by:** `typing.Optional[str]` + +Comma separated list of fields. For example: "created_at,name". The default +sorting order is ascending. To specify descending order for a field, append +" desc" to the field name. For example: "created_at desc,name". + +Supported sorting fields: + - created_at (default) + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.finetuning.create_finetuned_model(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Creates a new fine-tuned model. The model will be trained on the dataset specified in the request body. The training process may take some time, and the model will be available once the training is complete. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client +from cohere.finetuning.finetuning import BaseModel, FinetunedModel, Settings + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.finetuning.create_finetuned_model( + request=FinetunedModel( + name="name", + settings=Settings( + base_model=BaseModel( + base_type="BASE_TYPE_UNSPECIFIED", + ), + dataset_id="dataset_id", + ), + ), +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `FinetunedModel` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.finetuning.get_finetuned_model(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Retrieve a fine-tuned model by its ID. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.finetuning.get_finetuned_model( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The fine-tuned model ID. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.finetuning.delete_finetuned_model(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Deletes a fine-tuned model. The model will be removed from the system and will no longer be available for use. +This operation is irreversible. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.finetuning.delete_finetuned_model( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — The fine-tuned model ID. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.finetuning.update_finetuned_model(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Updates the fine-tuned model with the given ID. The model will be updated with the new settings and name provided in the request body. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client +from cohere.finetuning.finetuning import BaseModel, Settings + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.finetuning.update_finetuned_model( + id="id", + name="name", + settings=Settings( + base_model=BaseModel( + base_type="BASE_TYPE_UNSPECIFIED", + ), + dataset_id="dataset_id", + ), +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — FinetunedModel ID. + +
+
+ +
+
+ +**name:** `str` — FinetunedModel name (e.g. `foobar`). + +
+
+ +
+
+ +**settings:** `Settings` — FinetunedModel settings such as dataset, hyperparameters... + +
+
+ +
+
+ +**status:** `typing.Optional[Status]` — Current stage in the life-cycle of the fine-tuned model. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.finetuning.list_events(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Returns a list of events that occurred during the life-cycle of the fine-tuned model. +The events are ordered by creation time, with the most recent event first. +The list can be paginated using `page_size` and `page_token` parameters. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.finetuning.list_events( + finetuned_model_id="finetuned_model_id", + page_size=1, + page_token="page_token", + order_by="order_by", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**finetuned_model_id:** `str` — The parent fine-tuned model ID. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Maximum number of results to be returned by the server. If 0, defaults to +50. + +
+
+ +
+
+ +**page_token:** `typing.Optional[str]` — Request a specific page of the list results. + +
+
+ +
+
+ +**order_by:** `typing.Optional[str]` + +Comma separated list of fields. For example: "created_at,name". The default +sorting order is ascending. To specify descending order for a field, append +" desc" to the field name. For example: "created_at desc,name". + +Supported sorting fields: + - created_at (default) + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.finetuning.list_training_step_metrics(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Returns a list of metrics measured during the training of a fine-tuned model. +The metrics are ordered by step number, with the most recent step first. +The list can be paginated using `page_size` and `page_token` parameters. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.finetuning.list_training_step_metrics( + finetuned_model_id="finetuned_model_id", + page_size=1, + page_token="page_token", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**finetuned_model_id:** `str` — The parent fine-tuned model ID. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Maximum number of results to be returned by the server. If 0, defaults to +50. + +
+
+ +
+
+ +**page_token:** `typing.Optional[str]` — Request a specific page of the list results. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ diff --git a/requirements.txt b/requirements.txt index d4af08535..f01a8da5a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ fastavro==1.9.4 httpx>=0.21.2 httpx-sse==0.4.0 pydantic>= 1.9.2 -pydantic-core==2.18.2 +pydantic-core>=2.18.2 requests==2.0.0 tokenizers>=0.15,<1 types-requests==2.0.0 diff --git a/src/cohere/__init__.py b/src/cohere/__init__.py index a9308a07f..e325bfecb 100644 --- a/src/cohere/__init__.py +++ b/src/cohere/__init__.py @@ -2,303 +2,645 @@ # isort: skip_file -from .types import ( - ApiMeta, - ApiMetaApiVersion, - ApiMetaBilledUnits, - ApiMetaTokens, - AssistantChatMessageV2, - AssistantMessage, - AssistantMessageResponse, - AssistantMessageResponseContentItem, - AssistantMessageV2Content, - AssistantMessageV2ContentItem, - AuthTokenType, - ChatCitation, - ChatCitationGenerationEvent, - ChatCitationType, - ChatConnector, - ChatContentDeltaEvent, - ChatContentDeltaEventDelta, - ChatContentDeltaEventDeltaMessage, - ChatContentDeltaEventDeltaMessageContent, - ChatContentEndEvent, - ChatContentStartEvent, - ChatContentStartEventDelta, - ChatContentStartEventDeltaMessage, - ChatContentStartEventDeltaMessageContent, - ChatContentStartEventDeltaMessageContentType, - ChatDataMetrics, - ChatDebugEvent, - ChatDocument, - ChatDocumentSource, - ChatFinishReason, - ChatMessage, - ChatMessageEndEvent, - ChatMessageEndEventDelta, - ChatMessageStartEvent, - ChatMessageStartEventDelta, - ChatMessageStartEventDeltaMessage, - ChatMessageV2, - ChatMessages, - ChatRequestCitationQuality, - ChatRequestPromptTruncation, - ChatRequestSafetyMode, - ChatSearchQueriesGenerationEvent, - ChatSearchQuery, - ChatSearchResult, - ChatSearchResultConnector, - ChatSearchResultsEvent, - ChatStreamEndEvent, - ChatStreamEndEventFinishReason, - ChatStreamEvent, - ChatStreamEventType, - ChatStreamRequestCitationQuality, - ChatStreamRequestPromptTruncation, - ChatStreamRequestSafetyMode, - ChatStreamStartEvent, - ChatTextContent, - ChatTextGenerationEvent, - ChatTextResponseFormat, - ChatTextResponseFormatV2, - ChatThinkingContent, - ChatToolCallDeltaEvent, - ChatToolCallDeltaEventDelta, - ChatToolCallDeltaEventDeltaMessage, - ChatToolCallDeltaEventDeltaMessageToolCalls, - ChatToolCallDeltaEventDeltaMessageToolCallsFunction, - ChatToolCallEndEvent, - ChatToolCallStartEvent, - ChatToolCallStartEventDelta, - ChatToolCallStartEventDeltaMessage, - ChatToolCallsChunkEvent, - ChatToolCallsGenerationEvent, - ChatToolMessage, - ChatToolPlanDeltaEvent, - ChatToolPlanDeltaEventDelta, - ChatToolPlanDeltaEventDeltaMessage, - ChatToolSource, - ChatbotMessage, - CheckApiKeyResponse, - Citation, - CitationEndEvent, - CitationGenerationStreamedChatResponse, - CitationOptions, - CitationOptionsMode, - CitationStartEvent, - CitationStartEventDelta, - CitationStartEventDeltaMessage, - CitationType, - ClassifyDataMetrics, - ClassifyExample, - ClassifyRequestTruncate, - ClassifyResponse, - ClassifyResponseClassificationsItem, - ClassifyResponseClassificationsItemClassificationType, - ClassifyResponseClassificationsItemLabelsValue, - CompatibleEndpoint, - Connector, - ConnectorAuthStatus, - ConnectorOAuth, - Content, - CreateConnectorOAuth, - CreateConnectorResponse, - CreateConnectorServiceAuth, - CreateEmbedJobResponse, - Dataset, - DatasetPart, - DatasetType, - DatasetValidationStatus, - DebugStreamedChatResponse, - DeleteConnectorResponse, - DetokenizeResponse, - Document, - DocumentContent, - DocumentSource, - DocumentToolContent, - EmbedByTypeResponse, - EmbedByTypeResponseEmbeddings, - EmbedContent, - EmbedFloatsResponse, - EmbedImage, - EmbedImageUrl, - EmbedInput, - EmbedInputType, - EmbedJob, - EmbedJobStatus, - EmbedJobTruncate, - EmbedRequestTruncate, - EmbedResponse, - EmbedText, - EmbeddingType, - EmbeddingsByTypeEmbedResponse, - EmbeddingsFloatsEmbedResponse, - FinetuneDatasetMetrics, - FinishReason, - GenerateRequestReturnLikelihoods, - GenerateRequestTruncate, - GenerateStreamEnd, - GenerateStreamEndResponse, - GenerateStreamError, - GenerateStreamEvent, - GenerateStreamRequestReturnLikelihoods, - GenerateStreamRequestTruncate, - GenerateStreamText, - GenerateStreamedResponse, - Generation, - GetConnectorResponse, - GetModelResponse, - Image, - ImageContent, - ImageUrl, - ImageUrlContent, - ImageUrlDetail, - ImageUrlEmbedContent, - JsonObjectResponseFormat, - JsonObjectResponseFormatV2, - JsonResponseFormat, - JsonResponseFormatV2, - LabelMetric, - ListConnectorsResponse, - ListEmbedJobResponse, - ListModelsResponse, - LogprobItem, - Message, - Metrics, - NonStreamedChatResponse, - OAuthAuthorizeResponse, - ParseInfo, - RerankDocument, - RerankRequestDocumentsItem, - RerankResponse, - RerankResponseResultsItem, - RerankResponseResultsItemDocument, - RerankerDataMetrics, - ResponseFormat, - ResponseFormatV2, - SearchQueriesGenerationStreamedChatResponse, - SearchResultsStreamedChatResponse, - SingleGeneration, - SingleGenerationInStream, - SingleGenerationTokenLikelihoodsItem, - Source, - StreamEndGenerateStreamedResponse, - StreamEndStreamedChatResponse, - StreamErrorGenerateStreamedResponse, - StreamStartStreamedChatResponse, - StreamedChatResponse, - SummarizeRequestExtractiveness, - SummarizeRequestFormat, - SummarizeRequestLength, - SummarizeResponse, - SystemChatMessageV2, - SystemMessage, - SystemMessageV2, - SystemMessageV2Content, - SystemMessageV2ContentItem, - TextAssistantMessageResponseContentItem, - TextAssistantMessageV2ContentItem, - TextContent, - TextEmbedContent, - TextGenerationGenerateStreamedResponse, - TextGenerationStreamedChatResponse, - TextResponseFormat, - TextResponseFormatV2, - TextSystemMessageV2ContentItem, - TextToolContent, - Thinking, - ThinkingAssistantMessageResponseContentItem, - ThinkingAssistantMessageV2ContentItem, - ThinkingType, - TokenizeResponse, - Tool, - ToolCall, - ToolCallDelta, - ToolCallV2, - ToolCallV2Function, - ToolCallsChunkStreamedChatResponse, - ToolCallsGenerationStreamedChatResponse, - ToolChatMessageV2, - ToolContent, - ToolMessage, - ToolMessageV2, - ToolMessageV2Content, - ToolParameterDefinitionsValue, - ToolResult, - ToolSource, - ToolV2, - ToolV2Function, - UpdateConnectorResponse, - Usage, - UsageBilledUnits, - UsageTokens, - UserChatMessageV2, - UserMessage, - UserMessageV2, - UserMessageV2Content, -) -from .errors import ( - BadRequestError, - ClientClosedRequestError, - ForbiddenError, - GatewayTimeoutError, - InternalServerError, - InvalidTokenError, - NotFoundError, - NotImplementedError, - ServiceUnavailableError, - TooManyRequestsError, - UnauthorizedError, - UnprocessableEntityError, -) -from . import batches, connectors, datasets, embed_jobs, finetuning, models, v2 -from .aliases import ( - ChatResponse, - ContentDeltaStreamedChatResponseV2, - ContentEndStreamedChatResponseV2, - ContentStartStreamedChatResponseV2, - MessageEndStreamedChatResponseV2, - MessageStartStreamedChatResponseV2, - StreamedChatResponseV2, - ToolCallDeltaStreamedChatResponseV2, - ToolCallEndStreamedChatResponseV2, - ToolCallStartStreamedChatResponseV2, -) -from .aws_client import AwsClient -from .batches import Batch, BatchStatus, CancelBatchResponse, CreateBatchResponse, GetBatchResponse, ListBatchesResponse -from .bedrock_client import BedrockClient, BedrockClientV2 -from .client import AsyncClient, Client -from .client_v2 import AsyncClientV2, ClientV2 -from .datasets import DatasetsCreateResponse, DatasetsGetResponse, DatasetsGetUsageResponse, DatasetsListResponse -from .embed_jobs import CreateEmbedJobRequestTruncate -from .environment import ClientEnvironment -from .sagemaker_client import SagemakerClient, SagemakerClientV2 -from .v2 import ( - CitationEndV2ChatStreamResponse, - CitationStartV2ChatStreamResponse, - ContentDeltaV2ChatStreamResponse, - ContentEndV2ChatStreamResponse, - ContentStartV2ChatStreamResponse, - DebugV2ChatStreamResponse, - MessageEndV2ChatStreamResponse, - MessageStartV2ChatStreamResponse, - ToolCallDeltaV2ChatStreamResponse, - ToolCallEndV2ChatStreamResponse, - ToolCallStartV2ChatStreamResponse, - ToolPlanDeltaV2ChatStreamResponse, - V2ChatRequestDocumentsItem, - V2ChatRequestSafetyMode, - V2ChatRequestToolChoice, - V2ChatResponse, - V2ChatStreamRequestDocumentsItem, - V2ChatStreamRequestSafetyMode, - V2ChatStreamRequestToolChoice, - V2ChatStreamResponse, - V2EmbedRequestTruncate, - V2RerankResponse, - V2RerankResponseResultsItem, -) -from .version import __version__ +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .types import ( + ApiMeta, + ApiMetaApiVersion, + ApiMetaBilledUnits, + ApiMetaTokens, + AssistantChatMessageV2, + AssistantMessage, + AssistantMessageResponse, + AssistantMessageResponseContentItem, + AssistantMessageV2Content, + AssistantMessageV2ContentItem, + AuthTokenType, + ChatCitation, + ChatCitationGenerationEvent, + ChatCitationType, + ChatConnector, + ChatContentDeltaEvent, + ChatContentDeltaEventDelta, + ChatContentDeltaEventDeltaMessage, + ChatContentDeltaEventDeltaMessageContent, + ChatContentEndEvent, + ChatContentStartEvent, + ChatContentStartEventDelta, + ChatContentStartEventDeltaMessage, + ChatContentStartEventDeltaMessageContent, + ChatContentStartEventDeltaMessageContentType, + ChatDataMetrics, + ChatDebugEvent, + ChatDocument, + ChatDocumentSource, + ChatFinishReason, + ChatMessage, + ChatMessageEndEvent, + ChatMessageEndEventDelta, + ChatMessageStartEvent, + ChatMessageStartEventDelta, + ChatMessageStartEventDeltaMessage, + ChatMessageV2, + ChatMessages, + ChatRequestCitationQuality, + ChatRequestPromptTruncation, + ChatRequestSafetyMode, + ChatSearchQueriesGenerationEvent, + ChatSearchQuery, + ChatSearchResult, + ChatSearchResultConnector, + ChatSearchResultsEvent, + ChatStreamEndEvent, + ChatStreamEndEventFinishReason, + ChatStreamEvent, + ChatStreamEventType, + ChatStreamRequestCitationQuality, + ChatStreamRequestPromptTruncation, + ChatStreamRequestSafetyMode, + ChatStreamStartEvent, + ChatTextContent, + ChatTextGenerationEvent, + ChatTextResponseFormat, + ChatTextResponseFormatV2, + ChatThinkingContent, + ChatToolCallDeltaEvent, + ChatToolCallDeltaEventDelta, + ChatToolCallDeltaEventDeltaMessage, + ChatToolCallDeltaEventDeltaMessageToolCalls, + ChatToolCallDeltaEventDeltaMessageToolCallsFunction, + ChatToolCallEndEvent, + ChatToolCallStartEvent, + ChatToolCallStartEventDelta, + ChatToolCallStartEventDeltaMessage, + ChatToolCallsChunkEvent, + ChatToolCallsGenerationEvent, + ChatToolMessage, + ChatToolPlanDeltaEvent, + ChatToolPlanDeltaEventDelta, + ChatToolPlanDeltaEventDeltaMessage, + ChatToolSource, + ChatbotMessage, + CheckApiKeyResponse, + Citation, + CitationEndEvent, + CitationGenerationStreamedChatResponse, + CitationOptions, + CitationOptionsMode, + CitationStartEvent, + CitationStartEventDelta, + CitationStartEventDeltaMessage, + CitationType, + ClassifyDataMetrics, + ClassifyExample, + ClassifyRequestTruncate, + ClassifyResponse, + ClassifyResponseClassificationsItem, + ClassifyResponseClassificationsItemClassificationType, + ClassifyResponseClassificationsItemLabelsValue, + CompatibleEndpoint, + Connector, + ConnectorAuthStatus, + ConnectorOAuth, + Content, + CreateConnectorOAuth, + CreateConnectorResponse, + CreateConnectorServiceAuth, + CreateEmbedJobResponse, + Dataset, + DatasetPart, + DatasetType, + DatasetValidationStatus, + DebugStreamedChatResponse, + DeleteConnectorResponse, + DetokenizeResponse, + Document, + DocumentContent, + DocumentSource, + DocumentToolContent, + EmbedByTypeResponse, + EmbedByTypeResponseEmbeddings, + EmbedContent, + EmbedFloatsResponse, + EmbedImage, + EmbedImageUrl, + EmbedInput, + EmbedInputType, + EmbedJob, + EmbedJobStatus, + EmbedJobTruncate, + EmbedRequestTruncate, + EmbedResponse, + EmbedText, + EmbeddingType, + EmbeddingsByTypeEmbedResponse, + EmbeddingsFloatsEmbedResponse, + FinetuneDatasetMetrics, + FinishReason, + GenerateRequestReturnLikelihoods, + GenerateRequestTruncate, + GenerateStreamEnd, + GenerateStreamEndResponse, + GenerateStreamError, + GenerateStreamEvent, + GenerateStreamRequestReturnLikelihoods, + GenerateStreamRequestTruncate, + GenerateStreamText, + GenerateStreamedResponse, + Generation, + GetConnectorResponse, + GetModelResponse, + Image, + ImageContent, + ImageUrl, + ImageUrlContent, + ImageUrlDetail, + ImageUrlEmbedContent, + JsonObjectResponseFormat, + JsonObjectResponseFormatV2, + JsonResponseFormat, + JsonResponseFormatV2, + LabelMetric, + ListConnectorsResponse, + ListEmbedJobResponse, + ListModelsResponse, + LogprobItem, + Message, + Metrics, + NonStreamedChatResponse, + OAuthAuthorizeResponse, + ParseInfo, + RerankDocument, + RerankRequestDocumentsItem, + RerankResponse, + RerankResponseResultsItem, + RerankResponseResultsItemDocument, + RerankerDataMetrics, + ResponseFormat, + ResponseFormatV2, + SearchQueriesGenerationStreamedChatResponse, + SearchResultsStreamedChatResponse, + SingleGeneration, + SingleGenerationInStream, + SingleGenerationTokenLikelihoodsItem, + Source, + StreamEndGenerateStreamedResponse, + StreamEndStreamedChatResponse, + StreamErrorGenerateStreamedResponse, + StreamStartStreamedChatResponse, + StreamedChatResponse, + SummarizeRequestExtractiveness, + SummarizeRequestFormat, + SummarizeRequestLength, + SummarizeResponse, + SystemChatMessageV2, + SystemMessage, + SystemMessageV2, + SystemMessageV2Content, + SystemMessageV2ContentItem, + TextAssistantMessageResponseContentItem, + TextAssistantMessageV2ContentItem, + TextContent, + TextEmbedContent, + TextGenerationGenerateStreamedResponse, + TextGenerationStreamedChatResponse, + TextResponseFormat, + TextResponseFormatV2, + TextSystemMessageV2ContentItem, + TextToolContent, + Thinking, + ThinkingAssistantMessageResponseContentItem, + ThinkingAssistantMessageV2ContentItem, + ThinkingType, + TokenizeResponse, + Tool, + ToolCall, + ToolCallDelta, + ToolCallV2, + ToolCallV2Function, + ToolCallsChunkStreamedChatResponse, + ToolCallsGenerationStreamedChatResponse, + ToolChatMessageV2, + ToolContent, + ToolMessage, + ToolMessageV2, + ToolMessageV2Content, + ToolParameterDefinitionsValue, + ToolResult, + ToolSource, + ToolV2, + ToolV2Function, + UpdateConnectorResponse, + Usage, + UsageBilledUnits, + UsageTokens, + UserChatMessageV2, + UserMessage, + UserMessageV2, + UserMessageV2Content, + ) + from .errors import ( + BadRequestError, + ClientClosedRequestError, + ForbiddenError, + GatewayTimeoutError, + InternalServerError, + InvalidTokenError, + NotFoundError, + NotImplementedError, + ServiceUnavailableError, + TooManyRequestsError, + UnauthorizedError, + UnprocessableEntityError, + ) + from . import batches, connectors, datasets, embed_jobs, finetuning, models, v2 + from .aliases import ( + ChatResponse, + ContentDeltaStreamedChatResponseV2, + ContentEndStreamedChatResponseV2, + ContentStartStreamedChatResponseV2, + MessageEndStreamedChatResponseV2, + MessageStartStreamedChatResponseV2, + StreamedChatResponseV2, + ToolCallDeltaStreamedChatResponseV2, + ToolCallEndStreamedChatResponseV2, + ToolCallStartStreamedChatResponseV2, + ) + from .aws_client import AwsClient + from .batches import ( + Batch, + BatchStatus, + CancelBatchResponse, + CreateBatchResponse, + GetBatchResponse, + ListBatchesResponse, + ) + from .bedrock_client import BedrockClient, BedrockClientV2 + from .client import AsyncClient, Client + from .client_v2 import AsyncClientV2, ClientV2 + from .datasets import DatasetsCreateResponse, DatasetsGetResponse, DatasetsGetUsageResponse, DatasetsListResponse + from .embed_jobs import CreateEmbedJobRequestTruncate + from .environment import ClientEnvironment + from .sagemaker_client import SagemakerClient, SagemakerClientV2 + from .v2 import ( + CitationEndV2ChatStreamResponse, + CitationStartV2ChatStreamResponse, + ContentDeltaV2ChatStreamResponse, + ContentEndV2ChatStreamResponse, + ContentStartV2ChatStreamResponse, + DebugV2ChatStreamResponse, + MessageEndV2ChatStreamResponse, + MessageStartV2ChatStreamResponse, + ToolCallDeltaV2ChatStreamResponse, + ToolCallEndV2ChatStreamResponse, + ToolCallStartV2ChatStreamResponse, + ToolPlanDeltaV2ChatStreamResponse, + V2ChatRequestDocumentsItem, + V2ChatRequestSafetyMode, + V2ChatRequestToolChoice, + V2ChatResponse, + V2ChatStreamRequestDocumentsItem, + V2ChatStreamRequestSafetyMode, + V2ChatStreamRequestToolChoice, + V2ChatStreamResponse, + V2EmbedRequestTruncate, + V2RerankResponse, + V2RerankResponseResultsItem, + ) + from .version import __version__ +_dynamic_imports: typing.Dict[str, str] = { + "ApiMeta": ".types", + "ApiMetaApiVersion": ".types", + "ApiMetaBilledUnits": ".types", + "ApiMetaTokens": ".types", + "AssistantChatMessageV2": ".types", + "AssistantMessage": ".types", + "AssistantMessageResponse": ".types", + "AssistantMessageResponseContentItem": ".types", + "AssistantMessageV2Content": ".types", + "AssistantMessageV2ContentItem": ".types", + "AsyncClient": ".client", + "AsyncClientV2": ".client_v2", + "AuthTokenType": ".types", + "AwsClient": ".aws_client", + "BadRequestError": ".errors", + "Batch": ".batches", + "BatchStatus": ".batches", + "BedrockClient": ".bedrock_client", + "BedrockClientV2": ".bedrock_client", + "CancelBatchResponse": ".batches", + "ChatCitation": ".types", + "ChatCitationGenerationEvent": ".types", + "ChatCitationType": ".types", + "ChatConnector": ".types", + "ChatContentDeltaEvent": ".types", + "ChatContentDeltaEventDelta": ".types", + "ChatContentDeltaEventDeltaMessage": ".types", + "ChatContentDeltaEventDeltaMessageContent": ".types", + "ChatContentEndEvent": ".types", + "ChatContentStartEvent": ".types", + "ChatContentStartEventDelta": ".types", + "ChatContentStartEventDeltaMessage": ".types", + "ChatContentStartEventDeltaMessageContent": ".types", + "ChatContentStartEventDeltaMessageContentType": ".types", + "ChatDataMetrics": ".types", + "ChatDebugEvent": ".types", + "ChatDocument": ".types", + "ChatDocumentSource": ".types", + "ChatFinishReason": ".types", + "ChatMessage": ".types", + "ChatMessageEndEvent": ".types", + "ChatMessageEndEventDelta": ".types", + "ChatMessageStartEvent": ".types", + "ChatMessageStartEventDelta": ".types", + "ChatMessageStartEventDeltaMessage": ".types", + "ChatMessageV2": ".types", + "ChatMessages": ".types", + "ChatRequestCitationQuality": ".types", + "ChatRequestPromptTruncation": ".types", + "ChatRequestSafetyMode": ".types", + "ChatResponse": ".aliases", + "ChatSearchQueriesGenerationEvent": ".types", + "ChatSearchQuery": ".types", + "ChatSearchResult": ".types", + "ChatSearchResultConnector": ".types", + "ChatSearchResultsEvent": ".types", + "ChatStreamEndEvent": ".types", + "ChatStreamEndEventFinishReason": ".types", + "ChatStreamEvent": ".types", + "ChatStreamEventType": ".types", + "ChatStreamRequestCitationQuality": ".types", + "ChatStreamRequestPromptTruncation": ".types", + "ChatStreamRequestSafetyMode": ".types", + "ChatStreamStartEvent": ".types", + "ChatTextContent": ".types", + "ChatTextGenerationEvent": ".types", + "ChatTextResponseFormat": ".types", + "ChatTextResponseFormatV2": ".types", + "ChatThinkingContent": ".types", + "ChatToolCallDeltaEvent": ".types", + "ChatToolCallDeltaEventDelta": ".types", + "ChatToolCallDeltaEventDeltaMessage": ".types", + "ChatToolCallDeltaEventDeltaMessageToolCalls": ".types", + "ChatToolCallDeltaEventDeltaMessageToolCallsFunction": ".types", + "ChatToolCallEndEvent": ".types", + "ChatToolCallStartEvent": ".types", + "ChatToolCallStartEventDelta": ".types", + "ChatToolCallStartEventDeltaMessage": ".types", + "ChatToolCallsChunkEvent": ".types", + "ChatToolCallsGenerationEvent": ".types", + "ChatToolMessage": ".types", + "ChatToolPlanDeltaEvent": ".types", + "ChatToolPlanDeltaEventDelta": ".types", + "ChatToolPlanDeltaEventDeltaMessage": ".types", + "ChatToolSource": ".types", + "ChatbotMessage": ".types", + "CheckApiKeyResponse": ".types", + "Citation": ".types", + "CitationEndEvent": ".types", + "CitationEndV2ChatStreamResponse": ".v2", + "CitationGenerationStreamedChatResponse": ".types", + "CitationOptions": ".types", + "CitationOptionsMode": ".types", + "CitationStartEvent": ".types", + "CitationStartEventDelta": ".types", + "CitationStartEventDeltaMessage": ".types", + "CitationStartV2ChatStreamResponse": ".v2", + "CitationType": ".types", + "ClassifyDataMetrics": ".types", + "ClassifyExample": ".types", + "ClassifyRequestTruncate": ".types", + "ClassifyResponse": ".types", + "ClassifyResponseClassificationsItem": ".types", + "ClassifyResponseClassificationsItemClassificationType": ".types", + "ClassifyResponseClassificationsItemLabelsValue": ".types", + "Client": ".client", + "ClientClosedRequestError": ".errors", + "ClientEnvironment": ".environment", + "ClientV2": ".client_v2", + "CompatibleEndpoint": ".types", + "Connector": ".types", + "ConnectorAuthStatus": ".types", + "ConnectorOAuth": ".types", + "Content": ".types", + "ContentDeltaStreamedChatResponseV2": ".aliases", + "ContentDeltaV2ChatStreamResponse": ".v2", + "ContentEndStreamedChatResponseV2": ".aliases", + "ContentEndV2ChatStreamResponse": ".v2", + "ContentStartStreamedChatResponseV2": ".aliases", + "ContentStartV2ChatStreamResponse": ".v2", + "CreateBatchResponse": ".batches", + "CreateConnectorOAuth": ".types", + "CreateConnectorResponse": ".types", + "CreateConnectorServiceAuth": ".types", + "CreateEmbedJobRequestTruncate": ".embed_jobs", + "CreateEmbedJobResponse": ".types", + "Dataset": ".types", + "DatasetPart": ".types", + "DatasetType": ".types", + "DatasetValidationStatus": ".types", + "DatasetsCreateResponse": ".datasets", + "DatasetsGetResponse": ".datasets", + "DatasetsGetUsageResponse": ".datasets", + "DatasetsListResponse": ".datasets", + "DebugStreamedChatResponse": ".types", + "DebugV2ChatStreamResponse": ".v2", + "DeleteConnectorResponse": ".types", + "DetokenizeResponse": ".types", + "Document": ".types", + "DocumentContent": ".types", + "DocumentSource": ".types", + "DocumentToolContent": ".types", + "EmbedByTypeResponse": ".types", + "EmbedByTypeResponseEmbeddings": ".types", + "EmbedContent": ".types", + "EmbedFloatsResponse": ".types", + "EmbedImage": ".types", + "EmbedImageUrl": ".types", + "EmbedInput": ".types", + "EmbedInputType": ".types", + "EmbedJob": ".types", + "EmbedJobStatus": ".types", + "EmbedJobTruncate": ".types", + "EmbedRequestTruncate": ".types", + "EmbedResponse": ".types", + "EmbedText": ".types", + "EmbeddingType": ".types", + "EmbeddingsByTypeEmbedResponse": ".types", + "EmbeddingsFloatsEmbedResponse": ".types", + "FinetuneDatasetMetrics": ".types", + "FinishReason": ".types", + "ForbiddenError": ".errors", + "GatewayTimeoutError": ".errors", + "GenerateRequestReturnLikelihoods": ".types", + "GenerateRequestTruncate": ".types", + "GenerateStreamEnd": ".types", + "GenerateStreamEndResponse": ".types", + "GenerateStreamError": ".types", + "GenerateStreamEvent": ".types", + "GenerateStreamRequestReturnLikelihoods": ".types", + "GenerateStreamRequestTruncate": ".types", + "GenerateStreamText": ".types", + "GenerateStreamedResponse": ".types", + "Generation": ".types", + "GetBatchResponse": ".batches", + "GetConnectorResponse": ".types", + "GetModelResponse": ".types", + "Image": ".types", + "ImageContent": ".types", + "ImageUrl": ".types", + "ImageUrlContent": ".types", + "ImageUrlDetail": ".types", + "ImageUrlEmbedContent": ".types", + "InternalServerError": ".errors", + "InvalidTokenError": ".errors", + "JsonObjectResponseFormat": ".types", + "JsonObjectResponseFormatV2": ".types", + "JsonResponseFormat": ".types", + "JsonResponseFormatV2": ".types", + "LabelMetric": ".types", + "ListBatchesResponse": ".batches", + "ListConnectorsResponse": ".types", + "ListEmbedJobResponse": ".types", + "ListModelsResponse": ".types", + "LogprobItem": ".types", + "Message": ".types", + "MessageEndStreamedChatResponseV2": ".aliases", + "MessageEndV2ChatStreamResponse": ".v2", + "MessageStartStreamedChatResponseV2": ".aliases", + "MessageStartV2ChatStreamResponse": ".v2", + "Metrics": ".types", + "NonStreamedChatResponse": ".types", + "NotFoundError": ".errors", + "NotImplementedError": ".errors", + "OAuthAuthorizeResponse": ".types", + "ParseInfo": ".types", + "RerankDocument": ".types", + "RerankRequestDocumentsItem": ".types", + "RerankResponse": ".types", + "RerankResponseResultsItem": ".types", + "RerankResponseResultsItemDocument": ".types", + "RerankerDataMetrics": ".types", + "ResponseFormat": ".types", + "ResponseFormatV2": ".types", + "SagemakerClient": ".sagemaker_client", + "SagemakerClientV2": ".sagemaker_client", + "SearchQueriesGenerationStreamedChatResponse": ".types", + "SearchResultsStreamedChatResponse": ".types", + "ServiceUnavailableError": ".errors", + "SingleGeneration": ".types", + "SingleGenerationInStream": ".types", + "SingleGenerationTokenLikelihoodsItem": ".types", + "Source": ".types", + "StreamEndGenerateStreamedResponse": ".types", + "StreamEndStreamedChatResponse": ".types", + "StreamErrorGenerateStreamedResponse": ".types", + "StreamStartStreamedChatResponse": ".types", + "StreamedChatResponse": ".types", + "StreamedChatResponseV2": ".aliases", + "SummarizeRequestExtractiveness": ".types", + "SummarizeRequestFormat": ".types", + "SummarizeRequestLength": ".types", + "SummarizeResponse": ".types", + "SystemChatMessageV2": ".types", + "SystemMessage": ".types", + "SystemMessageV2": ".types", + "SystemMessageV2Content": ".types", + "SystemMessageV2ContentItem": ".types", + "TextAssistantMessageResponseContentItem": ".types", + "TextAssistantMessageV2ContentItem": ".types", + "TextContent": ".types", + "TextEmbedContent": ".types", + "TextGenerationGenerateStreamedResponse": ".types", + "TextGenerationStreamedChatResponse": ".types", + "TextResponseFormat": ".types", + "TextResponseFormatV2": ".types", + "TextSystemMessageV2ContentItem": ".types", + "TextToolContent": ".types", + "Thinking": ".types", + "ThinkingAssistantMessageResponseContentItem": ".types", + "ThinkingAssistantMessageV2ContentItem": ".types", + "ThinkingType": ".types", + "TokenizeResponse": ".types", + "TooManyRequestsError": ".errors", + "Tool": ".types", + "ToolCall": ".types", + "ToolCallDelta": ".types", + "ToolCallDeltaStreamedChatResponseV2": ".aliases", + "ToolCallDeltaV2ChatStreamResponse": ".v2", + "ToolCallEndStreamedChatResponseV2": ".aliases", + "ToolCallEndV2ChatStreamResponse": ".v2", + "ToolCallStartStreamedChatResponseV2": ".aliases", + "ToolCallStartV2ChatStreamResponse": ".v2", + "ToolCallV2": ".types", + "ToolCallV2Function": ".types", + "ToolCallsChunkStreamedChatResponse": ".types", + "ToolCallsGenerationStreamedChatResponse": ".types", + "ToolChatMessageV2": ".types", + "ToolContent": ".types", + "ToolMessage": ".types", + "ToolMessageV2": ".types", + "ToolMessageV2Content": ".types", + "ToolParameterDefinitionsValue": ".types", + "ToolPlanDeltaV2ChatStreamResponse": ".v2", + "ToolResult": ".types", + "ToolSource": ".types", + "ToolV2": ".types", + "ToolV2Function": ".types", + "UnauthorizedError": ".errors", + "UnprocessableEntityError": ".errors", + "UpdateConnectorResponse": ".types", + "Usage": ".types", + "UsageBilledUnits": ".types", + "UsageTokens": ".types", + "UserChatMessageV2": ".types", + "UserMessage": ".types", + "UserMessageV2": ".types", + "UserMessageV2Content": ".types", + "V2ChatRequestDocumentsItem": ".v2", + "V2ChatRequestSafetyMode": ".v2", + "V2ChatRequestToolChoice": ".v2", + "V2ChatResponse": ".v2", + "V2ChatStreamRequestDocumentsItem": ".v2", + "V2ChatStreamRequestSafetyMode": ".v2", + "V2ChatStreamRequestToolChoice": ".v2", + "V2ChatStreamResponse": ".v2", + "V2EmbedRequestTruncate": ".v2", + "V2RerankResponse": ".v2", + "V2RerankResponseResultsItem": ".v2", + "__version__": ".version", + "batches": ".batches", + "connectors": ".connectors", + "datasets": ".datasets", + "embed_jobs": ".embed_jobs", + "finetuning": ".finetuning", + "models": ".models", + "v2": ".v2", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "ApiMeta", diff --git a/src/cohere/base_client.py b/src/cohere/base_client.py index 5a306fb5f..ea606da17 100644 --- a/src/cohere/base_client.py +++ b/src/cohere/base_client.py @@ -1,19 +1,14 @@ # This file was auto-generated by Fern from our API Definition. +from __future__ import annotations + import os import typing import httpx -from .batches.client import AsyncBatchesClient, BatchesClient -from .connectors.client import AsyncConnectorsClient, ConnectorsClient -from .core.api_error import ApiError from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from .core.request_options import RequestOptions -from .datasets.client import AsyncDatasetsClient, DatasetsClient -from .embed_jobs.client import AsyncEmbedJobsClient, EmbedJobsClient from .environment import ClientEnvironment -from .finetuning.client import AsyncFinetuningClient, FinetuningClient -from .models.client import AsyncModelsClient, ModelsClient from .raw_base_client import AsyncRawBaseCohere, RawBaseCohere from .types.chat_connector import ChatConnector from .types.chat_document import ChatDocument @@ -51,8 +46,15 @@ from .types.tokenize_response import TokenizeResponse from .types.tool import Tool from .types.tool_result import ToolResult -from .v2.client import AsyncV2Client, V2Client +if typing.TYPE_CHECKING: + from .batches.client import AsyncBatchesClient, BatchesClient + from .connectors.client import AsyncConnectorsClient, ConnectorsClient + from .datasets.client import AsyncDatasetsClient, DatasetsClient + from .embed_jobs.client import AsyncEmbedJobsClient, EmbedJobsClient + from .finetuning.client import AsyncFinetuningClient, FinetuningClient + from .models.client import AsyncModelsClient, ModelsClient + from .v2.client import AsyncV2Client, V2Client # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -77,6 +79,9 @@ class BaseCohere: client_name : typing.Optional[str] token : typing.Optional[typing.Union[str, typing.Callable[[], str]]] + headers : typing.Optional[typing.Dict[str, str]] + Additional headers to send with every request. + timeout : typing.Optional[float] The timeout to be used, in seconds, for requests. By default the timeout is 300 seconds, unless a custom httpx client is used, in which case this default is not enforced. @@ -103,6 +108,7 @@ def __init__( environment: ClientEnvironment = ClientEnvironment.PRODUCTION, client_name: typing.Optional[str] = None, token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("CO_API_KEY"), + headers: typing.Optional[typing.Dict[str, str]] = None, timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.Client] = None, @@ -110,12 +116,11 @@ def __init__( _defaulted_timeout = ( timeout if timeout is not None else 300 if httpx_client is None else httpx_client.timeout.read ) - if token is None: - raise ApiError(body="The client must be instantiated be either passing in token or setting CO_API_KEY") self._client_wrapper = SyncClientWrapper( base_url=_get_base_url(base_url=base_url, environment=environment), client_name=client_name, token=token, + headers=headers, httpx_client=httpx_client if httpx_client is not None else httpx.Client(timeout=_defaulted_timeout, follow_redirects=follow_redirects) @@ -124,13 +129,13 @@ def __init__( timeout=_defaulted_timeout, ) self._raw_client = RawBaseCohere(client_wrapper=self._client_wrapper) - self.v2 = V2Client(client_wrapper=self._client_wrapper) - self.batches = BatchesClient(client_wrapper=self._client_wrapper) - self.embed_jobs = EmbedJobsClient(client_wrapper=self._client_wrapper) - self.datasets = DatasetsClient(client_wrapper=self._client_wrapper) - self.connectors = ConnectorsClient(client_wrapper=self._client_wrapper) - self.models = ModelsClient(client_wrapper=self._client_wrapper) - self.finetuning = FinetuningClient(client_wrapper=self._client_wrapper) + self._v2: typing.Optional[V2Client] = None + self._batches: typing.Optional[BatchesClient] = None + self._embed_jobs: typing.Optional[EmbedJobsClient] = None + self._datasets: typing.Optional[DatasetsClient] = None + self._connectors: typing.Optional[ConnectorsClient] = None + self._models: typing.Optional[ModelsClient] = None + self._finetuning: typing.Optional[FinetuningClient] = None @property def with_raw_response(self) -> RawBaseCohere: @@ -1500,6 +1505,62 @@ def check_api_key(self, *, request_options: typing.Optional[RequestOptions] = No _response = self._raw_client.check_api_key(request_options=request_options) return _response.data + @property + def v2(self): + if self._v2 is None: + from .v2.client import V2Client # noqa: E402 + + self._v2 = V2Client(client_wrapper=self._client_wrapper) + return self._v2 + + @property + def batches(self): + if self._batches is None: + from .batches.client import BatchesClient # noqa: E402 + + self._batches = BatchesClient(client_wrapper=self._client_wrapper) + return self._batches + + @property + def embed_jobs(self): + if self._embed_jobs is None: + from .embed_jobs.client import EmbedJobsClient # noqa: E402 + + self._embed_jobs = EmbedJobsClient(client_wrapper=self._client_wrapper) + return self._embed_jobs + + @property + def datasets(self): + if self._datasets is None: + from .datasets.client import DatasetsClient # noqa: E402 + + self._datasets = DatasetsClient(client_wrapper=self._client_wrapper) + return self._datasets + + @property + def connectors(self): + if self._connectors is None: + from .connectors.client import ConnectorsClient # noqa: E402 + + self._connectors = ConnectorsClient(client_wrapper=self._client_wrapper) + return self._connectors + + @property + def models(self): + if self._models is None: + from .models.client import ModelsClient # noqa: E402 + + self._models = ModelsClient(client_wrapper=self._client_wrapper) + return self._models + + @property + def finetuning(self): + if self._finetuning is None: + from .finetuning.client import FinetuningClient # noqa: E402 + + self._finetuning = FinetuningClient(client_wrapper=self._client_wrapper) + return self._finetuning + class AsyncBaseCohere: """ @@ -1521,6 +1582,9 @@ class AsyncBaseCohere: client_name : typing.Optional[str] token : typing.Optional[typing.Union[str, typing.Callable[[], str]]] + headers : typing.Optional[typing.Dict[str, str]] + Additional headers to send with every request. + timeout : typing.Optional[float] The timeout to be used, in seconds, for requests. By default the timeout is 300 seconds, unless a custom httpx client is used, in which case this default is not enforced. @@ -1547,6 +1611,7 @@ def __init__( environment: ClientEnvironment = ClientEnvironment.PRODUCTION, client_name: typing.Optional[str] = None, token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("CO_API_KEY"), + headers: typing.Optional[typing.Dict[str, str]] = None, timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.AsyncClient] = None, @@ -1554,12 +1619,11 @@ def __init__( _defaulted_timeout = ( timeout if timeout is not None else 300 if httpx_client is None else httpx_client.timeout.read ) - if token is None: - raise ApiError(body="The client must be instantiated be either passing in token or setting CO_API_KEY") self._client_wrapper = AsyncClientWrapper( base_url=_get_base_url(base_url=base_url, environment=environment), client_name=client_name, token=token, + headers=headers, httpx_client=httpx_client if httpx_client is not None else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) @@ -1568,13 +1632,13 @@ def __init__( timeout=_defaulted_timeout, ) self._raw_client = AsyncRawBaseCohere(client_wrapper=self._client_wrapper) - self.v2 = AsyncV2Client(client_wrapper=self._client_wrapper) - self.batches = AsyncBatchesClient(client_wrapper=self._client_wrapper) - self.embed_jobs = AsyncEmbedJobsClient(client_wrapper=self._client_wrapper) - self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper) - self.connectors = AsyncConnectorsClient(client_wrapper=self._client_wrapper) - self.models = AsyncModelsClient(client_wrapper=self._client_wrapper) - self.finetuning = AsyncFinetuningClient(client_wrapper=self._client_wrapper) + self._v2: typing.Optional[AsyncV2Client] = None + self._batches: typing.Optional[AsyncBatchesClient] = None + self._embed_jobs: typing.Optional[AsyncEmbedJobsClient] = None + self._datasets: typing.Optional[AsyncDatasetsClient] = None + self._connectors: typing.Optional[AsyncConnectorsClient] = None + self._models: typing.Optional[AsyncModelsClient] = None + self._finetuning: typing.Optional[AsyncFinetuningClient] = None @property def with_raw_response(self) -> AsyncRawBaseCohere: @@ -3034,6 +3098,62 @@ async def main() -> None: _response = await self._raw_client.check_api_key(request_options=request_options) return _response.data + @property + def v2(self): + if self._v2 is None: + from .v2.client import AsyncV2Client # noqa: E402 + + self._v2 = AsyncV2Client(client_wrapper=self._client_wrapper) + return self._v2 + + @property + def batches(self): + if self._batches is None: + from .batches.client import AsyncBatchesClient # noqa: E402 + + self._batches = AsyncBatchesClient(client_wrapper=self._client_wrapper) + return self._batches + + @property + def embed_jobs(self): + if self._embed_jobs is None: + from .embed_jobs.client import AsyncEmbedJobsClient # noqa: E402 + + self._embed_jobs = AsyncEmbedJobsClient(client_wrapper=self._client_wrapper) + return self._embed_jobs + + @property + def datasets(self): + if self._datasets is None: + from .datasets.client import AsyncDatasetsClient # noqa: E402 + + self._datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper) + return self._datasets + + @property + def connectors(self): + if self._connectors is None: + from .connectors.client import AsyncConnectorsClient # noqa: E402 + + self._connectors = AsyncConnectorsClient(client_wrapper=self._client_wrapper) + return self._connectors + + @property + def models(self): + if self._models is None: + from .models.client import AsyncModelsClient # noqa: E402 + + self._models = AsyncModelsClient(client_wrapper=self._client_wrapper) + return self._models + + @property + def finetuning(self): + if self._finetuning is None: + from .finetuning.client import AsyncFinetuningClient # noqa: E402 + + self._finetuning = AsyncFinetuningClient(client_wrapper=self._client_wrapper) + return self._finetuning + def _get_base_url(*, base_url: typing.Optional[str] = None, environment: ClientEnvironment) -> str: if base_url is not None: diff --git a/src/cohere/batches/__init__.py b/src/cohere/batches/__init__.py index 4f70ca193..2044e347e 100644 --- a/src/cohere/batches/__init__.py +++ b/src/cohere/batches/__init__.py @@ -2,7 +2,48 @@ # isort: skip_file -from .types import Batch, BatchStatus, CancelBatchResponse, CreateBatchResponse, GetBatchResponse, ListBatchesResponse +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .types import ( + Batch, + BatchStatus, + CancelBatchResponse, + CreateBatchResponse, + GetBatchResponse, + ListBatchesResponse, + ) +_dynamic_imports: typing.Dict[str, str] = { + "Batch": ".types", + "BatchStatus": ".types", + "CancelBatchResponse": ".types", + "CreateBatchResponse": ".types", + "GetBatchResponse": ".types", + "ListBatchesResponse": ".types", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "Batch", diff --git a/src/cohere/batches/client.py b/src/cohere/batches/client.py index d454f50bb..981907918 100644 --- a/src/cohere/batches/client.py +++ b/src/cohere/batches/client.py @@ -73,7 +73,11 @@ def list( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) - client.batches.list() + client.batches.list( + page_size=1, + page_token="page_token", + order_by="order_by", + ) """ _response = self._raw_client.list( page_size=page_size, page_token=page_token, order_by=order_by, request_options=request_options @@ -244,7 +248,11 @@ async def list( async def main() -> None: - await client.batches.list() + await client.batches.list( + page_size=1, + page_token="page_token", + order_by="order_by", + ) asyncio.run(main()) diff --git a/src/cohere/batches/types/__init__.py b/src/cohere/batches/types/__init__.py index e6443e7ae..5a46b8a3e 100644 --- a/src/cohere/batches/types/__init__.py +++ b/src/cohere/batches/types/__init__.py @@ -2,12 +2,46 @@ # isort: skip_file -from .batch import Batch -from .batch_status import BatchStatus -from .cancel_batch_response import CancelBatchResponse -from .create_batch_response import CreateBatchResponse -from .get_batch_response import GetBatchResponse -from .list_batches_response import ListBatchesResponse +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .batch import Batch + from .batch_status import BatchStatus + from .cancel_batch_response import CancelBatchResponse + from .create_batch_response import CreateBatchResponse + from .get_batch_response import GetBatchResponse + from .list_batches_response import ListBatchesResponse +_dynamic_imports: typing.Dict[str, str] = { + "Batch": ".batch", + "BatchStatus": ".batch_status", + "CancelBatchResponse": ".cancel_batch_response", + "CreateBatchResponse": ".create_batch_response", + "GetBatchResponse": ".get_batch_response", + "ListBatchesResponse": ".list_batches_response", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "Batch", diff --git a/src/cohere/connectors/client.py b/src/cohere/connectors/client.py index adc3be2cf..a09a72f8a 100644 --- a/src/cohere/connectors/client.py +++ b/src/cohere/connectors/client.py @@ -67,7 +67,10 @@ def list( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) - client.connectors.list() + client.connectors.list( + limit=1.1, + offset=1.1, + ) """ _response = self._raw_client.list(limit=limit, offset=offset, request_options=request_options) return _response.data @@ -321,6 +324,7 @@ def o_auth_authorize( ) client.connectors.o_auth_authorize( id="id", + after_token_redirect="after_token_redirect", ) """ _response = self._raw_client.o_auth_authorize( @@ -383,7 +387,10 @@ async def list( async def main() -> None: - await client.connectors.list() + await client.connectors.list( + limit=1.1, + offset=1.1, + ) asyncio.run(main()) @@ -679,6 +686,7 @@ async def o_auth_authorize( async def main() -> None: await client.connectors.o_auth_authorize( id="id", + after_token_redirect="after_token_redirect", ) diff --git a/src/cohere/core/__init__.py b/src/cohere/core/__init__.py index 869898d76..a7e9f0334 100644 --- a/src/cohere/core/__init__.py +++ b/src/cohere/core/__init__.py @@ -2,27 +2,83 @@ # isort: skip_file -from .api_error import ApiError -from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper -from .datetime_utils import serialize_datetime -from .file import File, convert_file_dict_to_httpx_tuples, with_content_type -from .http_client import AsyncHttpClient, HttpClient -from .http_response import AsyncHttpResponse, HttpResponse -from .jsonable_encoder import jsonable_encoder -from .pydantic_utilities import ( - IS_PYDANTIC_V2, - UniversalBaseModel, - UniversalRootModel, - parse_obj_as, - universal_field_validator, - universal_root_validator, - update_forward_refs, -) -from .query_encoder import encode_query -from .remove_none_from_dict import remove_none_from_dict -from .request_options import RequestOptions -from .serialization import FieldMetadata, convert_and_respect_annotation_metadata -from .unchecked_base_model import UncheckedBaseModel, UnionMetadata, construct_type +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .api_error import ApiError + from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper + from .datetime_utils import serialize_datetime + from .file import File, convert_file_dict_to_httpx_tuples, with_content_type + from .http_client import AsyncHttpClient, HttpClient + from .http_response import AsyncHttpResponse, HttpResponse + from .jsonable_encoder import jsonable_encoder + from .pydantic_utilities import ( + IS_PYDANTIC_V2, + UniversalBaseModel, + UniversalRootModel, + parse_obj_as, + universal_field_validator, + universal_root_validator, + update_forward_refs, + ) + from .query_encoder import encode_query + from .remove_none_from_dict import remove_none_from_dict + from .request_options import RequestOptions + from .serialization import FieldMetadata, convert_and_respect_annotation_metadata + from .unchecked_base_model import UncheckedBaseModel, UnionMetadata, construct_type +_dynamic_imports: typing.Dict[str, str] = { + "ApiError": ".api_error", + "AsyncClientWrapper": ".client_wrapper", + "AsyncHttpClient": ".http_client", + "AsyncHttpResponse": ".http_response", + "BaseClientWrapper": ".client_wrapper", + "FieldMetadata": ".serialization", + "File": ".file", + "HttpClient": ".http_client", + "HttpResponse": ".http_response", + "IS_PYDANTIC_V2": ".pydantic_utilities", + "RequestOptions": ".request_options", + "SyncClientWrapper": ".client_wrapper", + "UncheckedBaseModel": ".unchecked_base_model", + "UnionMetadata": ".unchecked_base_model", + "UniversalBaseModel": ".pydantic_utilities", + "UniversalRootModel": ".pydantic_utilities", + "construct_type": ".unchecked_base_model", + "convert_and_respect_annotation_metadata": ".serialization", + "convert_file_dict_to_httpx_tuples": ".file", + "encode_query": ".query_encoder", + "jsonable_encoder": ".jsonable_encoder", + "parse_obj_as": ".pydantic_utilities", + "remove_none_from_dict": ".remove_none_from_dict", + "serialize_datetime": ".datetime_utils", + "universal_field_validator": ".pydantic_utilities", + "universal_root_validator": ".pydantic_utilities", + "update_forward_refs": ".pydantic_utilities", + "with_content_type": ".file", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "ApiError", diff --git a/src/cohere/core/client_wrapper.py b/src/cohere/core/client_wrapper.py index ea006cfd2..d6d338dbe 100644 --- a/src/cohere/core/client_wrapper.py +++ b/src/cohere/core/client_wrapper.py @@ -11,33 +11,41 @@ def __init__( self, *, client_name: typing.Optional[str] = None, - token: typing.Union[str, typing.Callable[[], str]], + token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None, + headers: typing.Optional[typing.Dict[str, str]] = None, base_url: str, timeout: typing.Optional[float] = None, ): self._client_name = client_name self._token = token + self._headers = headers self._base_url = base_url self._timeout = timeout def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { - "User-Agent": "cohere/5.20.0", + "User-Agent": "cohere/5.20.1", "X-Fern-Language": "Python", "X-Fern-SDK-Name": "cohere", - "X-Fern-SDK-Version": "5.20.0", + "X-Fern-SDK-Version": "5.20.1", + **(self.get_custom_headers() or {}), } if self._client_name is not None: headers["X-Client-Name"] = self._client_name - headers["Authorization"] = f"Bearer {self._get_token()}" + token = self._get_token() + if token is not None: + headers["Authorization"] = f"Bearer {token}" return headers - def _get_token(self) -> str: - if isinstance(self._token, str): + def _get_token(self) -> typing.Optional[str]: + if isinstance(self._token, str) or self._token is None: return self._token else: return self._token() + def get_custom_headers(self) -> typing.Optional[typing.Dict[str, str]]: + return self._headers + def get_base_url(self) -> str: return self._base_url @@ -50,12 +58,13 @@ def __init__( self, *, client_name: typing.Optional[str] = None, - token: typing.Union[str, typing.Callable[[], str]], + token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None, + headers: typing.Optional[typing.Dict[str, str]] = None, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.Client, ): - super().__init__(client_name=client_name, token=token, base_url=base_url, timeout=timeout) + super().__init__(client_name=client_name, token=token, headers=headers, base_url=base_url, timeout=timeout) self.httpx_client = HttpClient( httpx_client=httpx_client, base_headers=self.get_headers, @@ -69,12 +78,13 @@ def __init__( self, *, client_name: typing.Optional[str] = None, - token: typing.Union[str, typing.Callable[[], str]], + token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None, + headers: typing.Optional[typing.Dict[str, str]] = None, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.AsyncClient, ): - super().__init__(client_name=client_name, token=token, base_url=base_url, timeout=timeout) + super().__init__(client_name=client_name, token=token, headers=headers, base_url=base_url, timeout=timeout) self.httpx_client = AsyncHttpClient( httpx_client=httpx_client, base_headers=self.get_headers, diff --git a/src/cohere/core/force_multipart.py b/src/cohere/core/force_multipart.py index ae24ccff1..5440913fd 100644 --- a/src/cohere/core/force_multipart.py +++ b/src/cohere/core/force_multipart.py @@ -1,7 +1,9 @@ # This file was auto-generated by Fern from our API Definition. +from typing import Any, Dict -class ForceMultipartDict(dict): + +class ForceMultipartDict(Dict[str, Any]): """ A dictionary subclass that always evaluates to True in boolean contexts. @@ -9,7 +11,7 @@ class ForceMultipartDict(dict): the dictionary is empty, which would normally evaluate to False. """ - def __bool__(self): + def __bool__(self) -> bool: return True diff --git a/src/cohere/core/http_response.py b/src/cohere/core/http_response.py index 48a1798a5..2479747e8 100644 --- a/src/cohere/core/http_response.py +++ b/src/cohere/core/http_response.py @@ -4,8 +4,8 @@ import httpx +# Generic to represent the underlying type of the data wrapped by the HTTP response. T = TypeVar("T") -"""Generic to represent the underlying type of the data wrapped by the HTTP response.""" class BaseHttpResponse: diff --git a/src/cohere/core/http_sse/__init__.py b/src/cohere/core/http_sse/__init__.py new file mode 100644 index 000000000..730e5a338 --- /dev/null +++ b/src/cohere/core/http_sse/__init__.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from ._api import EventSource, aconnect_sse, connect_sse + from ._exceptions import SSEError + from ._models import ServerSentEvent +_dynamic_imports: typing.Dict[str, str] = { + "EventSource": "._api", + "SSEError": "._exceptions", + "ServerSentEvent": "._models", + "aconnect_sse": "._api", + "connect_sse": "._api", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + + +__all__ = ["EventSource", "SSEError", "ServerSentEvent", "aconnect_sse", "connect_sse"] diff --git a/src/cohere/core/http_sse/_api.py b/src/cohere/core/http_sse/_api.py new file mode 100644 index 000000000..f900b3b68 --- /dev/null +++ b/src/cohere/core/http_sse/_api.py @@ -0,0 +1,112 @@ +# This file was auto-generated by Fern from our API Definition. + +import re +from contextlib import asynccontextmanager, contextmanager +from typing import Any, AsyncGenerator, AsyncIterator, Iterator, cast + +import httpx +from ._decoders import SSEDecoder +from ._exceptions import SSEError +from ._models import ServerSentEvent + + +class EventSource: + def __init__(self, response: httpx.Response) -> None: + self._response = response + + def _check_content_type(self) -> None: + content_type = self._response.headers.get("content-type", "").partition(";")[0] + if "text/event-stream" not in content_type: + raise SSEError( + f"Expected response header Content-Type to contain 'text/event-stream', got {content_type!r}" + ) + + def _get_charset(self) -> str: + """Extract charset from Content-Type header, fallback to UTF-8.""" + content_type = self._response.headers.get("content-type", "") + + # Parse charset parameter using regex + charset_match = re.search(r"charset=([^;\s]+)", content_type, re.IGNORECASE) + if charset_match: + charset = charset_match.group(1).strip("\"'") + # Validate that it's a known encoding + try: + # Test if the charset is valid by trying to encode/decode + "test".encode(charset).decode(charset) + return charset + except (LookupError, UnicodeError): + # If charset is invalid, fall back to UTF-8 + pass + + # Default to UTF-8 if no charset specified or invalid charset + return "utf-8" + + @property + def response(self) -> httpx.Response: + return self._response + + def iter_sse(self) -> Iterator[ServerSentEvent]: + self._check_content_type() + decoder = SSEDecoder() + charset = self._get_charset() + + buffer = "" + for chunk in self._response.iter_bytes(): + # Decode chunk using detected charset + text_chunk = chunk.decode(charset, errors="replace") + buffer += text_chunk + + # Process complete lines + while "\n" in buffer: + line, buffer = buffer.split("\n", 1) + line = line.rstrip("\r") + sse = decoder.decode(line) + # when we reach a "\n\n" => line = '' + # => decoder will attempt to return an SSE Event + if sse is not None: + yield sse + + # Process any remaining data in buffer + if buffer.strip(): + line = buffer.rstrip("\r") + sse = decoder.decode(line) + if sse is not None: + yield sse + + async def aiter_sse(self) -> AsyncGenerator[ServerSentEvent, None]: + self._check_content_type() + decoder = SSEDecoder() + lines = cast(AsyncGenerator[str, None], self._response.aiter_lines()) + try: + async for line in lines: + line = line.rstrip("\n") + sse = decoder.decode(line) + if sse is not None: + yield sse + finally: + await lines.aclose() + + +@contextmanager +def connect_sse(client: httpx.Client, method: str, url: str, **kwargs: Any) -> Iterator[EventSource]: + headers = kwargs.pop("headers", {}) + headers["Accept"] = "text/event-stream" + headers["Cache-Control"] = "no-store" + + with client.stream(method, url, headers=headers, **kwargs) as response: + yield EventSource(response) + + +@asynccontextmanager +async def aconnect_sse( + client: httpx.AsyncClient, + method: str, + url: str, + **kwargs: Any, +) -> AsyncIterator[EventSource]: + headers = kwargs.pop("headers", {}) + headers["Accept"] = "text/event-stream" + headers["Cache-Control"] = "no-store" + + async with client.stream(method, url, headers=headers, **kwargs) as response: + yield EventSource(response) diff --git a/src/cohere/core/http_sse/_decoders.py b/src/cohere/core/http_sse/_decoders.py new file mode 100644 index 000000000..339b08901 --- /dev/null +++ b/src/cohere/core/http_sse/_decoders.py @@ -0,0 +1,61 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import List, Optional + +from ._models import ServerSentEvent + + +class SSEDecoder: + def __init__(self) -> None: + self._event = "" + self._data: List[str] = [] + self._last_event_id = "" + self._retry: Optional[int] = None + + def decode(self, line: str) -> Optional[ServerSentEvent]: + # See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501 + + if not line: + if not self._event and not self._data and not self._last_event_id and self._retry is None: + return None + + sse = ServerSentEvent( + event=self._event, + data="\n".join(self._data), + id=self._last_event_id, + retry=self._retry, + ) + + # NOTE: as per the SSE spec, do not reset last_event_id. + self._event = "" + self._data = [] + self._retry = None + + return sse + + if line.startswith(":"): + return None + + fieldname, _, value = line.partition(":") + + if value.startswith(" "): + value = value[1:] + + if fieldname == "event": + self._event = value + elif fieldname == "data": + self._data.append(value) + elif fieldname == "id": + if "\0" in value: + pass + else: + self._last_event_id = value + elif fieldname == "retry": + try: + self._retry = int(value) + except (TypeError, ValueError): + pass + else: + pass # Field is ignored. + + return None diff --git a/src/cohere/core/http_sse/_exceptions.py b/src/cohere/core/http_sse/_exceptions.py new file mode 100644 index 000000000..81605a8a6 --- /dev/null +++ b/src/cohere/core/http_sse/_exceptions.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import httpx + + +class SSEError(httpx.TransportError): + pass diff --git a/src/cohere/core/http_sse/_models.py b/src/cohere/core/http_sse/_models.py new file mode 100644 index 000000000..1af57f8fd --- /dev/null +++ b/src/cohere/core/http_sse/_models.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +import json +from dataclasses import dataclass +from typing import Any, Optional + + +@dataclass(frozen=True) +class ServerSentEvent: + event: str = "message" + data: str = "" + id: str = "" + retry: Optional[int] = None + + def json(self) -> Any: + """Parse the data field as JSON.""" + return json.loads(self.data) diff --git a/src/cohere/core/pydantic_utilities.py b/src/cohere/core/pydantic_utilities.py index 0360ef49a..185e5c4f6 100644 --- a/src/cohere/core/pydantic_utilities.py +++ b/src/cohere/core/pydantic_utilities.py @@ -59,9 +59,9 @@ class UniversalBaseModel(pydantic.BaseModel): protected_namespaces=(), ) - @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore[attr-defined] - def serialize_model(self, handler: pydantic.SerializerFunctionWrapHandler) -> Any: # type: ignore[name-defined] - serialized = handler(self) + @pydantic.model_serializer(mode="plain", when_used="json") # type: ignore[attr-defined] + def serialize_model(self) -> Any: # type: ignore[name-defined] + serialized = self.dict() # type: ignore[attr-defined] data = {k: serialize_datetime(v) if isinstance(v, dt.datetime) else v for k, v in serialized.items()} return data @@ -147,7 +147,10 @@ def dict(self, **kwargs: Any) -> Dict[str, Any]: dict_dump = super().dict(**kwargs_with_defaults_exclude_unset_include_fields) - return convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write") + return cast( + Dict[str, Any], + convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write"), + ) def _union_list_of_pydantic_dicts(source: List[Any], destination: List[Any]) -> List[Any]: @@ -217,7 +220,9 @@ def universal_root_validator( ) -> Callable[[AnyCallable], AnyCallable]: def decorator(func: AnyCallable) -> AnyCallable: if IS_PYDANTIC_V2: - return cast(AnyCallable, pydantic.model_validator(mode="before" if pre else "after")(func)) # type: ignore[attr-defined] + # In Pydantic v2, for RootModel we always use "before" mode + # The custom validators transform the input value before the model is created + return cast(AnyCallable, pydantic.model_validator(mode="before")(func)) # type: ignore[attr-defined] return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload] return decorator diff --git a/src/cohere/core/unchecked_base_model.py b/src/cohere/core/unchecked_base_model.py index 2c2d92a7b..9ea71ca68 100644 --- a/src/cohere/core/unchecked_base_model.py +++ b/src/cohere/core/unchecked_base_model.py @@ -124,12 +124,50 @@ def construct( return m +def _validate_collection_items_compatible(collection: typing.Any, target_type: typing.Type[typing.Any]) -> bool: + """ + Validate that all items in a collection are compatible with the target type. + + Args: + collection: The collection to validate (list, set, or dict values) + target_type: The target type to validate against + + Returns: + True if all items are compatible, False otherwise + """ + if inspect.isclass(target_type) and issubclass(target_type, pydantic.BaseModel): + for item in collection: + try: + # Try to validate the item against the target type + if isinstance(item, dict): + parse_obj_as(target_type, item) + else: + # If it's not a dict, it might already be the right type + if not isinstance(item, target_type): + return False + except Exception: + return False + return True + + def _convert_undiscriminated_union_type(union_type: typing.Type[typing.Any], object_: typing.Any) -> typing.Any: inner_types = get_args(union_type) if typing.Any in inner_types: return object_ for inner_type in inner_types: + # Handle lists of objects that need parsing + if get_origin(inner_type) is list and isinstance(object_, list): + list_inner_type = get_args(inner_type)[0] + try: + if inspect.isclass(list_inner_type) and issubclass(list_inner_type, pydantic.BaseModel): + # Validate that all items in the list are compatible with the target type + if _validate_collection_items_compatible(object_, list_inner_type): + parsed_list = [parse_obj_as(object_=item, type_=list_inner_type) for item in object_] + return parsed_list + except Exception: + pass + try: if inspect.isclass(inner_type) and issubclass(inner_type, pydantic.BaseModel): # Attempt a validated parse until one works @@ -137,7 +175,42 @@ def _convert_undiscriminated_union_type(union_type: typing.Type[typing.Any], obj except Exception: continue - # If none of the types work, just return the first successful cast + # If none of the types work, try matching literal fields first, then fall back + # First pass: try types where all literal fields match the object's values + for inner_type in inner_types: + if inspect.isclass(inner_type) and issubclass(inner_type, pydantic.BaseModel): + fields = _get_model_fields(inner_type) + literal_fields_match = True + + for field_name, field in fields.items(): + # Check if this field has a Literal type + if IS_PYDANTIC_V2: + field_type = field.annotation # type: ignore # Pydantic v2 + else: + field_type = field.outer_type_ # type: ignore # Pydantic v1 + + if is_literal_type(field_type): # type: ignore[arg-type] + field_default = _get_field_default(field) + name_or_alias = get_field_to_alias_mapping(inner_type).get(field_name, field_name) + # Get the value from the object + if isinstance(object_, dict): + object_value = object_.get(name_or_alias) + else: + object_value = getattr(object_, name_or_alias, None) + + # If the literal field value doesn't match, this type is not a match + if object_value is not None and field_default != object_value: + literal_fields_match = False + break + + # If all literal fields match, try to construct this type + if literal_fields_match: + try: + return construct_type(object_=object_, type_=inner_type) + except Exception: + continue + + # Second pass: if no literal matches, just return the first successful cast for inner_type in inner_types: try: return construct_type(object_=object_, type_=inner_type) @@ -148,7 +221,7 @@ def _convert_undiscriminated_union_type(union_type: typing.Type[typing.Any], obj def _convert_union_type(type_: typing.Type[typing.Any], object_: typing.Any) -> typing.Any: base_type = get_origin(type_) or type_ union_type = type_ - if base_type == typing_extensions.Annotated: + if base_type == typing_extensions.Annotated: # type: ignore[comparison-overlap] union_type = get_args(type_)[0] annotated_metadata = get_args(type_)[1:] for metadata in annotated_metadata: @@ -179,11 +252,11 @@ def construct_type(*, type_: typing.Type[typing.Any], object_: typing.Any) -> ty return None base_type = get_origin(type_) or type_ - is_annotated = base_type == typing_extensions.Annotated + is_annotated = base_type == typing_extensions.Annotated # type: ignore[comparison-overlap] maybe_annotation_members = get_args(type_) is_annotated_union = is_annotated and is_union(get_origin(maybe_annotation_members[0])) - if base_type == typing.Any: + if base_type == typing.Any: # type: ignore[comparison-overlap] return object_ if base_type == dict: diff --git a/src/cohere/datasets/__init__.py b/src/cohere/datasets/__init__.py index c1bbcaa4b..e81b54b14 100644 --- a/src/cohere/datasets/__init__.py +++ b/src/cohere/datasets/__init__.py @@ -2,6 +2,38 @@ # isort: skip_file -from .types import DatasetsCreateResponse, DatasetsGetResponse, DatasetsGetUsageResponse, DatasetsListResponse +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .types import DatasetsCreateResponse, DatasetsGetResponse, DatasetsGetUsageResponse, DatasetsListResponse +_dynamic_imports: typing.Dict[str, str] = { + "DatasetsCreateResponse": ".types", + "DatasetsGetResponse": ".types", + "DatasetsGetUsageResponse": ".types", + "DatasetsListResponse": ".types", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = ["DatasetsCreateResponse", "DatasetsGetResponse", "DatasetsGetUsageResponse", "DatasetsListResponse"] diff --git a/src/cohere/datasets/client.py b/src/cohere/datasets/client.py index e00e404d8..3b6a56d03 100644 --- a/src/cohere/datasets/client.py +++ b/src/cohere/datasets/client.py @@ -77,13 +77,26 @@ def list( Examples -------- + import datetime + from cohere import Client client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) - client.datasets.list() + client.datasets.list( + dataset_type="datasetType", + before=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + after=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + limit=1.1, + offset=1.1, + validation_status="unknown", + ) """ _response = self._raw_client.list( dataset_type=dataset_type, @@ -165,6 +178,10 @@ def create( client.datasets.create( name="name", type="embed-input", + keep_original_file=True, + skip_malformed_input=True, + text_separator="text_separator", + csv_delimiter="csv_delimiter", ) """ _response = self._raw_client.create( @@ -334,6 +351,7 @@ async def list( Examples -------- import asyncio + import datetime from cohere import AsyncClient @@ -344,7 +362,18 @@ async def list( async def main() -> None: - await client.datasets.list() + await client.datasets.list( + dataset_type="datasetType", + before=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + after=datetime.datetime.fromisoformat( + "2024-01-15 09:30:00+00:00", + ), + limit=1.1, + offset=1.1, + validation_status="unknown", + ) asyncio.run(main()) @@ -434,6 +463,10 @@ async def main() -> None: await client.datasets.create( name="name", type="embed-input", + keep_original_file=True, + skip_malformed_input=True, + text_separator="text_separator", + csv_delimiter="csv_delimiter", ) diff --git a/src/cohere/datasets/types/__init__.py b/src/cohere/datasets/types/__init__.py index 6ab852406..44406153d 100644 --- a/src/cohere/datasets/types/__init__.py +++ b/src/cohere/datasets/types/__init__.py @@ -2,9 +2,41 @@ # isort: skip_file -from .datasets_create_response import DatasetsCreateResponse -from .datasets_get_response import DatasetsGetResponse -from .datasets_get_usage_response import DatasetsGetUsageResponse -from .datasets_list_response import DatasetsListResponse +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .datasets_create_response import DatasetsCreateResponse + from .datasets_get_response import DatasetsGetResponse + from .datasets_get_usage_response import DatasetsGetUsageResponse + from .datasets_list_response import DatasetsListResponse +_dynamic_imports: typing.Dict[str, str] = { + "DatasetsCreateResponse": ".datasets_create_response", + "DatasetsGetResponse": ".datasets_get_response", + "DatasetsGetUsageResponse": ".datasets_get_usage_response", + "DatasetsListResponse": ".datasets_list_response", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = ["DatasetsCreateResponse", "DatasetsGetResponse", "DatasetsGetUsageResponse", "DatasetsListResponse"] diff --git a/src/cohere/embed_jobs/__init__.py b/src/cohere/embed_jobs/__init__.py index 4e3669f0b..e4995ab08 100644 --- a/src/cohere/embed_jobs/__init__.py +++ b/src/cohere/embed_jobs/__init__.py @@ -2,6 +2,33 @@ # isort: skip_file -from .types import CreateEmbedJobRequestTruncate +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .types import CreateEmbedJobRequestTruncate +_dynamic_imports: typing.Dict[str, str] = {"CreateEmbedJobRequestTruncate": ".types"} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = ["CreateEmbedJobRequestTruncate"] diff --git a/src/cohere/embed_jobs/types/__init__.py b/src/cohere/embed_jobs/types/__init__.py index c918300d5..1142ba881 100644 --- a/src/cohere/embed_jobs/types/__init__.py +++ b/src/cohere/embed_jobs/types/__init__.py @@ -2,6 +2,33 @@ # isort: skip_file -from .create_embed_job_request_truncate import CreateEmbedJobRequestTruncate +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .create_embed_job_request_truncate import CreateEmbedJobRequestTruncate +_dynamic_imports: typing.Dict[str, str] = {"CreateEmbedJobRequestTruncate": ".create_embed_job_request_truncate"} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = ["CreateEmbedJobRequestTruncate"] diff --git a/src/cohere/errors/__init__.py b/src/cohere/errors/__init__.py index 13858f501..9c314d756 100644 --- a/src/cohere/errors/__init__.py +++ b/src/cohere/errors/__init__.py @@ -2,18 +2,58 @@ # isort: skip_file -from .bad_request_error import BadRequestError -from .client_closed_request_error import ClientClosedRequestError -from .forbidden_error import ForbiddenError -from .gateway_timeout_error import GatewayTimeoutError -from .internal_server_error import InternalServerError -from .invalid_token_error import InvalidTokenError -from .not_found_error import NotFoundError -from .not_implemented_error import NotImplementedError -from .service_unavailable_error import ServiceUnavailableError -from .too_many_requests_error import TooManyRequestsError -from .unauthorized_error import UnauthorizedError -from .unprocessable_entity_error import UnprocessableEntityError +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .bad_request_error import BadRequestError + from .client_closed_request_error import ClientClosedRequestError + from .forbidden_error import ForbiddenError + from .gateway_timeout_error import GatewayTimeoutError + from .internal_server_error import InternalServerError + from .invalid_token_error import InvalidTokenError + from .not_found_error import NotFoundError + from .not_implemented_error import NotImplementedError + from .service_unavailable_error import ServiceUnavailableError + from .too_many_requests_error import TooManyRequestsError + from .unauthorized_error import UnauthorizedError + from .unprocessable_entity_error import UnprocessableEntityError +_dynamic_imports: typing.Dict[str, str] = { + "BadRequestError": ".bad_request_error", + "ClientClosedRequestError": ".client_closed_request_error", + "ForbiddenError": ".forbidden_error", + "GatewayTimeoutError": ".gateway_timeout_error", + "InternalServerError": ".internal_server_error", + "InvalidTokenError": ".invalid_token_error", + "NotFoundError": ".not_found_error", + "NotImplementedError": ".not_implemented_error", + "ServiceUnavailableError": ".service_unavailable_error", + "TooManyRequestsError": ".too_many_requests_error", + "UnauthorizedError": ".unauthorized_error", + "UnprocessableEntityError": ".unprocessable_entity_error", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "BadRequestError", diff --git a/src/cohere/finetuning/__init__.py b/src/cohere/finetuning/__init__.py index b87e4ec1a..5cd9d4724 100644 --- a/src/cohere/finetuning/__init__.py +++ b/src/cohere/finetuning/__init__.py @@ -2,27 +2,74 @@ # isort: skip_file -from . import finetuning -from .finetuning import ( - BaseModel, - BaseType, - CreateFinetunedModelResponse, - DeleteFinetunedModelResponse, - Event, - FinetunedModel, - GetFinetunedModelResponse, - Hyperparameters, - ListEventsResponse, - ListFinetunedModelsResponse, - ListTrainingStepMetricsResponse, - LoraTargetModules, - Settings, - Status, - Strategy, - TrainingStepMetrics, - UpdateFinetunedModelResponse, - WandbConfig, -) +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from . import finetuning + from .finetuning import ( + BaseModel, + BaseType, + CreateFinetunedModelResponse, + DeleteFinetunedModelResponse, + Event, + FinetunedModel, + GetFinetunedModelResponse, + Hyperparameters, + ListEventsResponse, + ListFinetunedModelsResponse, + ListTrainingStepMetricsResponse, + LoraTargetModules, + Settings, + Status, + Strategy, + TrainingStepMetrics, + UpdateFinetunedModelResponse, + WandbConfig, + ) +_dynamic_imports: typing.Dict[str, str] = { + "BaseModel": ".finetuning", + "BaseType": ".finetuning", + "CreateFinetunedModelResponse": ".finetuning", + "DeleteFinetunedModelResponse": ".finetuning", + "Event": ".finetuning", + "FinetunedModel": ".finetuning", + "GetFinetunedModelResponse": ".finetuning", + "Hyperparameters": ".finetuning", + "ListEventsResponse": ".finetuning", + "ListFinetunedModelsResponse": ".finetuning", + "ListTrainingStepMetricsResponse": ".finetuning", + "LoraTargetModules": ".finetuning", + "Settings": ".finetuning", + "Status": ".finetuning", + "Strategy": ".finetuning", + "TrainingStepMetrics": ".finetuning", + "UpdateFinetunedModelResponse": ".finetuning", + "WandbConfig": ".finetuning", + "finetuning": ".finetuning", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "BaseModel", diff --git a/src/cohere/finetuning/client.py b/src/cohere/finetuning/client.py index aa4a8d066..4a128ccbe 100644 --- a/src/cohere/finetuning/client.py +++ b/src/cohere/finetuning/client.py @@ -79,7 +79,11 @@ def list_finetuned_models( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) - client.finetuning.list_finetuned_models() + client.finetuning.list_finetuned_models( + page_size=1, + page_token="page_token", + order_by="order_by", + ) """ _response = self._raw_client.list_finetuned_models( page_size=page_size, page_token=page_token, order_by=order_by, request_options=request_options @@ -308,6 +312,9 @@ def list_events( ) client.finetuning.list_events( finetuned_model_id="finetuned_model_id", + page_size=1, + page_token="page_token", + order_by="order_by", ) """ _response = self._raw_client.list_events( @@ -362,6 +369,8 @@ def list_training_step_metrics( ) client.finetuning.list_training_step_metrics( finetuned_model_id="finetuned_model_id", + page_size=1, + page_token="page_token", ) """ _response = self._raw_client.list_training_step_metrics( @@ -434,7 +443,11 @@ async def list_finetuned_models( async def main() -> None: - await client.finetuning.list_finetuned_models() + await client.finetuning.list_finetuned_models( + page_size=1, + page_token="page_token", + order_by="order_by", + ) asyncio.run(main()) @@ -703,6 +716,9 @@ async def list_events( async def main() -> None: await client.finetuning.list_events( finetuned_model_id="finetuned_model_id", + page_size=1, + page_token="page_token", + order_by="order_by", ) @@ -765,6 +781,8 @@ async def list_training_step_metrics( async def main() -> None: await client.finetuning.list_training_step_metrics( finetuned_model_id="finetuned_model_id", + page_size=1, + page_token="page_token", ) diff --git a/src/cohere/finetuning/finetuning/__init__.py b/src/cohere/finetuning/finetuning/__init__.py index 5fff105fa..1091c0e34 100644 --- a/src/cohere/finetuning/finetuning/__init__.py +++ b/src/cohere/finetuning/finetuning/__init__.py @@ -2,26 +2,72 @@ # isort: skip_file -from .types import ( - BaseModel, - BaseType, - CreateFinetunedModelResponse, - DeleteFinetunedModelResponse, - Event, - FinetunedModel, - GetFinetunedModelResponse, - Hyperparameters, - ListEventsResponse, - ListFinetunedModelsResponse, - ListTrainingStepMetricsResponse, - LoraTargetModules, - Settings, - Status, - Strategy, - TrainingStepMetrics, - UpdateFinetunedModelResponse, - WandbConfig, -) +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .types import ( + BaseModel, + BaseType, + CreateFinetunedModelResponse, + DeleteFinetunedModelResponse, + Event, + FinetunedModel, + GetFinetunedModelResponse, + Hyperparameters, + ListEventsResponse, + ListFinetunedModelsResponse, + ListTrainingStepMetricsResponse, + LoraTargetModules, + Settings, + Status, + Strategy, + TrainingStepMetrics, + UpdateFinetunedModelResponse, + WandbConfig, + ) +_dynamic_imports: typing.Dict[str, str] = { + "BaseModel": ".types", + "BaseType": ".types", + "CreateFinetunedModelResponse": ".types", + "DeleteFinetunedModelResponse": ".types", + "Event": ".types", + "FinetunedModel": ".types", + "GetFinetunedModelResponse": ".types", + "Hyperparameters": ".types", + "ListEventsResponse": ".types", + "ListFinetunedModelsResponse": ".types", + "ListTrainingStepMetricsResponse": ".types", + "LoraTargetModules": ".types", + "Settings": ".types", + "Status": ".types", + "Strategy": ".types", + "TrainingStepMetrics": ".types", + "UpdateFinetunedModelResponse": ".types", + "WandbConfig": ".types", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "BaseModel", diff --git a/src/cohere/finetuning/finetuning/types/__init__.py b/src/cohere/finetuning/finetuning/types/__init__.py index 09731cd48..a96470875 100644 --- a/src/cohere/finetuning/finetuning/types/__init__.py +++ b/src/cohere/finetuning/finetuning/types/__init__.py @@ -2,24 +2,70 @@ # isort: skip_file -from .base_model import BaseModel -from .base_type import BaseType -from .create_finetuned_model_response import CreateFinetunedModelResponse -from .delete_finetuned_model_response import DeleteFinetunedModelResponse -from .event import Event -from .finetuned_model import FinetunedModel -from .get_finetuned_model_response import GetFinetunedModelResponse -from .hyperparameters import Hyperparameters -from .list_events_response import ListEventsResponse -from .list_finetuned_models_response import ListFinetunedModelsResponse -from .list_training_step_metrics_response import ListTrainingStepMetricsResponse -from .lora_target_modules import LoraTargetModules -from .settings import Settings -from .status import Status -from .strategy import Strategy -from .training_step_metrics import TrainingStepMetrics -from .update_finetuned_model_response import UpdateFinetunedModelResponse -from .wandb_config import WandbConfig +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .base_model import BaseModel + from .base_type import BaseType + from .create_finetuned_model_response import CreateFinetunedModelResponse + from .delete_finetuned_model_response import DeleteFinetunedModelResponse + from .event import Event + from .finetuned_model import FinetunedModel + from .get_finetuned_model_response import GetFinetunedModelResponse + from .hyperparameters import Hyperparameters + from .list_events_response import ListEventsResponse + from .list_finetuned_models_response import ListFinetunedModelsResponse + from .list_training_step_metrics_response import ListTrainingStepMetricsResponse + from .lora_target_modules import LoraTargetModules + from .settings import Settings + from .status import Status + from .strategy import Strategy + from .training_step_metrics import TrainingStepMetrics + from .update_finetuned_model_response import UpdateFinetunedModelResponse + from .wandb_config import WandbConfig +_dynamic_imports: typing.Dict[str, str] = { + "BaseModel": ".base_model", + "BaseType": ".base_type", + "CreateFinetunedModelResponse": ".create_finetuned_model_response", + "DeleteFinetunedModelResponse": ".delete_finetuned_model_response", + "Event": ".event", + "FinetunedModel": ".finetuned_model", + "GetFinetunedModelResponse": ".get_finetuned_model_response", + "Hyperparameters": ".hyperparameters", + "ListEventsResponse": ".list_events_response", + "ListFinetunedModelsResponse": ".list_finetuned_models_response", + "ListTrainingStepMetricsResponse": ".list_training_step_metrics_response", + "LoraTargetModules": ".lora_target_modules", + "Settings": ".settings", + "Status": ".status", + "Strategy": ".strategy", + "TrainingStepMetrics": ".training_step_metrics", + "UpdateFinetunedModelResponse": ".update_finetuned_model_response", + "WandbConfig": ".wandb_config", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "BaseModel", diff --git a/src/cohere/models/client.py b/src/cohere/models/client.py index 61bd15f60..1637805dd 100644 --- a/src/cohere/models/client.py +++ b/src/cohere/models/client.py @@ -99,7 +99,12 @@ def list( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) - client.models.list() + client.models.list( + page_size=1.1, + page_token="page_token", + endpoint="chat", + default_only=True, + ) """ _response = self._raw_client.list( page_size=page_size, @@ -213,7 +218,12 @@ async def list( async def main() -> None: - await client.models.list() + await client.models.list( + page_size=1.1, + page_token="page_token", + endpoint="chat", + default_only=True, + ) asyncio.run(main()) diff --git a/src/cohere/types/__init__.py b/src/cohere/types/__init__.py index 5e5b9bc68..54bd3509c 100644 --- a/src/cohere/types/__init__.py +++ b/src/cohere/types/__init__.py @@ -2,234 +2,495 @@ # isort: skip_file -from .api_meta import ApiMeta -from .api_meta_api_version import ApiMetaApiVersion -from .api_meta_billed_units import ApiMetaBilledUnits -from .api_meta_tokens import ApiMetaTokens -from .assistant_message import AssistantMessage -from .assistant_message_response import AssistantMessageResponse -from .assistant_message_response_content_item import ( - AssistantMessageResponseContentItem, - TextAssistantMessageResponseContentItem, - ThinkingAssistantMessageResponseContentItem, -) -from .assistant_message_v2content import AssistantMessageV2Content -from .assistant_message_v2content_item import ( - AssistantMessageV2ContentItem, - TextAssistantMessageV2ContentItem, - ThinkingAssistantMessageV2ContentItem, -) -from .auth_token_type import AuthTokenType -from .chat_citation import ChatCitation -from .chat_citation_generation_event import ChatCitationGenerationEvent -from .chat_citation_type import ChatCitationType -from .chat_connector import ChatConnector -from .chat_content_delta_event import ChatContentDeltaEvent -from .chat_content_delta_event_delta import ChatContentDeltaEventDelta -from .chat_content_delta_event_delta_message import ChatContentDeltaEventDeltaMessage -from .chat_content_delta_event_delta_message_content import ChatContentDeltaEventDeltaMessageContent -from .chat_content_end_event import ChatContentEndEvent -from .chat_content_start_event import ChatContentStartEvent -from .chat_content_start_event_delta import ChatContentStartEventDelta -from .chat_content_start_event_delta_message import ChatContentStartEventDeltaMessage -from .chat_content_start_event_delta_message_content import ChatContentStartEventDeltaMessageContent -from .chat_content_start_event_delta_message_content_type import ChatContentStartEventDeltaMessageContentType -from .chat_data_metrics import ChatDataMetrics -from .chat_debug_event import ChatDebugEvent -from .chat_document import ChatDocument -from .chat_document_source import ChatDocumentSource -from .chat_finish_reason import ChatFinishReason -from .chat_message import ChatMessage -from .chat_message_end_event import ChatMessageEndEvent -from .chat_message_end_event_delta import ChatMessageEndEventDelta -from .chat_message_start_event import ChatMessageStartEvent -from .chat_message_start_event_delta import ChatMessageStartEventDelta -from .chat_message_start_event_delta_message import ChatMessageStartEventDeltaMessage -from .chat_message_v2 import ( - AssistantChatMessageV2, - ChatMessageV2, - SystemChatMessageV2, - ToolChatMessageV2, - UserChatMessageV2, -) -from .chat_messages import ChatMessages -from .chat_request_citation_quality import ChatRequestCitationQuality -from .chat_request_prompt_truncation import ChatRequestPromptTruncation -from .chat_request_safety_mode import ChatRequestSafetyMode -from .chat_search_queries_generation_event import ChatSearchQueriesGenerationEvent -from .chat_search_query import ChatSearchQuery -from .chat_search_result import ChatSearchResult -from .chat_search_result_connector import ChatSearchResultConnector -from .chat_search_results_event import ChatSearchResultsEvent -from .chat_stream_end_event import ChatStreamEndEvent -from .chat_stream_end_event_finish_reason import ChatStreamEndEventFinishReason -from .chat_stream_event import ChatStreamEvent -from .chat_stream_event_type import ChatStreamEventType -from .chat_stream_request_citation_quality import ChatStreamRequestCitationQuality -from .chat_stream_request_prompt_truncation import ChatStreamRequestPromptTruncation -from .chat_stream_request_safety_mode import ChatStreamRequestSafetyMode -from .chat_stream_start_event import ChatStreamStartEvent -from .chat_text_content import ChatTextContent -from .chat_text_generation_event import ChatTextGenerationEvent -from .chat_text_response_format import ChatTextResponseFormat -from .chat_text_response_format_v2 import ChatTextResponseFormatV2 -from .chat_thinking_content import ChatThinkingContent -from .chat_tool_call_delta_event import ChatToolCallDeltaEvent -from .chat_tool_call_delta_event_delta import ChatToolCallDeltaEventDelta -from .chat_tool_call_delta_event_delta_message import ChatToolCallDeltaEventDeltaMessage -from .chat_tool_call_delta_event_delta_message_tool_calls import ChatToolCallDeltaEventDeltaMessageToolCalls -from .chat_tool_call_delta_event_delta_message_tool_calls_function import ( - ChatToolCallDeltaEventDeltaMessageToolCallsFunction, -) -from .chat_tool_call_end_event import ChatToolCallEndEvent -from .chat_tool_call_start_event import ChatToolCallStartEvent -from .chat_tool_call_start_event_delta import ChatToolCallStartEventDelta -from .chat_tool_call_start_event_delta_message import ChatToolCallStartEventDeltaMessage -from .chat_tool_calls_chunk_event import ChatToolCallsChunkEvent -from .chat_tool_calls_generation_event import ChatToolCallsGenerationEvent -from .chat_tool_message import ChatToolMessage -from .chat_tool_plan_delta_event import ChatToolPlanDeltaEvent -from .chat_tool_plan_delta_event_delta import ChatToolPlanDeltaEventDelta -from .chat_tool_plan_delta_event_delta_message import ChatToolPlanDeltaEventDeltaMessage -from .chat_tool_source import ChatToolSource -from .check_api_key_response import CheckApiKeyResponse -from .citation import Citation -from .citation_end_event import CitationEndEvent -from .citation_options import CitationOptions -from .citation_options_mode import CitationOptionsMode -from .citation_start_event import CitationStartEvent -from .citation_start_event_delta import CitationStartEventDelta -from .citation_start_event_delta_message import CitationStartEventDeltaMessage -from .citation_type import CitationType -from .classify_data_metrics import ClassifyDataMetrics -from .classify_example import ClassifyExample -from .classify_request_truncate import ClassifyRequestTruncate -from .classify_response import ClassifyResponse -from .classify_response_classifications_item import ClassifyResponseClassificationsItem -from .classify_response_classifications_item_classification_type import ( - ClassifyResponseClassificationsItemClassificationType, -) -from .classify_response_classifications_item_labels_value import ClassifyResponseClassificationsItemLabelsValue -from .compatible_endpoint import CompatibleEndpoint -from .connector import Connector -from .connector_auth_status import ConnectorAuthStatus -from .connector_o_auth import ConnectorOAuth -from .content import Content, ImageUrlContent, TextContent -from .create_connector_o_auth import CreateConnectorOAuth -from .create_connector_response import CreateConnectorResponse -from .create_connector_service_auth import CreateConnectorServiceAuth -from .create_embed_job_response import CreateEmbedJobResponse -from .dataset import Dataset -from .dataset_part import DatasetPart -from .dataset_type import DatasetType -from .dataset_validation_status import DatasetValidationStatus -from .delete_connector_response import DeleteConnectorResponse -from .detokenize_response import DetokenizeResponse -from .document import Document -from .document_content import DocumentContent -from .embed_by_type_response import EmbedByTypeResponse -from .embed_by_type_response_embeddings import EmbedByTypeResponseEmbeddings -from .embed_content import EmbedContent, ImageUrlEmbedContent, TextEmbedContent -from .embed_floats_response import EmbedFloatsResponse -from .embed_image import EmbedImage -from .embed_image_url import EmbedImageUrl -from .embed_input import EmbedInput -from .embed_input_type import EmbedInputType -from .embed_job import EmbedJob -from .embed_job_status import EmbedJobStatus -from .embed_job_truncate import EmbedJobTruncate -from .embed_request_truncate import EmbedRequestTruncate -from .embed_response import EmbedResponse, EmbeddingsByTypeEmbedResponse, EmbeddingsFloatsEmbedResponse -from .embed_text import EmbedText -from .embedding_type import EmbeddingType -from .finetune_dataset_metrics import FinetuneDatasetMetrics -from .finish_reason import FinishReason -from .generate_request_return_likelihoods import GenerateRequestReturnLikelihoods -from .generate_request_truncate import GenerateRequestTruncate -from .generate_stream_end import GenerateStreamEnd -from .generate_stream_end_response import GenerateStreamEndResponse -from .generate_stream_error import GenerateStreamError -from .generate_stream_event import GenerateStreamEvent -from .generate_stream_request_return_likelihoods import GenerateStreamRequestReturnLikelihoods -from .generate_stream_request_truncate import GenerateStreamRequestTruncate -from .generate_stream_text import GenerateStreamText -from .generate_streamed_response import ( - GenerateStreamedResponse, - StreamEndGenerateStreamedResponse, - StreamErrorGenerateStreamedResponse, - TextGenerationGenerateStreamedResponse, -) -from .generation import Generation -from .get_connector_response import GetConnectorResponse -from .get_model_response import GetModelResponse -from .image import Image -from .image_content import ImageContent -from .image_url import ImageUrl -from .image_url_detail import ImageUrlDetail -from .json_response_format import JsonResponseFormat -from .json_response_format_v2 import JsonResponseFormatV2 -from .label_metric import LabelMetric -from .list_connectors_response import ListConnectorsResponse -from .list_embed_job_response import ListEmbedJobResponse -from .list_models_response import ListModelsResponse -from .logprob_item import LogprobItem -from .message import ChatbotMessage, Message, SystemMessage, ToolMessage, UserMessage -from .metrics import Metrics -from .non_streamed_chat_response import NonStreamedChatResponse -from .o_auth_authorize_response import OAuthAuthorizeResponse -from .parse_info import ParseInfo -from .rerank_document import RerankDocument -from .rerank_request_documents_item import RerankRequestDocumentsItem -from .rerank_response import RerankResponse -from .rerank_response_results_item import RerankResponseResultsItem -from .rerank_response_results_item_document import RerankResponseResultsItemDocument -from .reranker_data_metrics import RerankerDataMetrics -from .response_format import JsonObjectResponseFormat, ResponseFormat, TextResponseFormat -from .response_format_v2 import JsonObjectResponseFormatV2, ResponseFormatV2, TextResponseFormatV2 -from .single_generation import SingleGeneration -from .single_generation_in_stream import SingleGenerationInStream -from .single_generation_token_likelihoods_item import SingleGenerationTokenLikelihoodsItem -from .source import DocumentSource, Source, ToolSource -from .streamed_chat_response import ( - CitationGenerationStreamedChatResponse, - DebugStreamedChatResponse, - SearchQueriesGenerationStreamedChatResponse, - SearchResultsStreamedChatResponse, - StreamEndStreamedChatResponse, - StreamStartStreamedChatResponse, - StreamedChatResponse, - TextGenerationStreamedChatResponse, - ToolCallsChunkStreamedChatResponse, - ToolCallsGenerationStreamedChatResponse, -) -from .summarize_request_extractiveness import SummarizeRequestExtractiveness -from .summarize_request_format import SummarizeRequestFormat -from .summarize_request_length import SummarizeRequestLength -from .summarize_response import SummarizeResponse -from .system_message_v2 import SystemMessageV2 -from .system_message_v2content import SystemMessageV2Content -from .system_message_v2content_item import SystemMessageV2ContentItem, TextSystemMessageV2ContentItem -from .thinking import Thinking -from .thinking_type import ThinkingType -from .tokenize_response import TokenizeResponse -from .tool import Tool -from .tool_call import ToolCall -from .tool_call_delta import ToolCallDelta -from .tool_call_v2 import ToolCallV2 -from .tool_call_v2function import ToolCallV2Function -from .tool_content import DocumentToolContent, TextToolContent, ToolContent -from .tool_message_v2 import ToolMessageV2 -from .tool_message_v2content import ToolMessageV2Content -from .tool_parameter_definitions_value import ToolParameterDefinitionsValue -from .tool_result import ToolResult -from .tool_v2 import ToolV2 -from .tool_v2function import ToolV2Function -from .update_connector_response import UpdateConnectorResponse -from .usage import Usage -from .usage_billed_units import UsageBilledUnits -from .usage_tokens import UsageTokens -from .user_message_v2 import UserMessageV2 -from .user_message_v2content import UserMessageV2Content +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .api_meta import ApiMeta + from .api_meta_api_version import ApiMetaApiVersion + from .api_meta_billed_units import ApiMetaBilledUnits + from .api_meta_tokens import ApiMetaTokens + from .assistant_message import AssistantMessage + from .assistant_message_response import AssistantMessageResponse + from .assistant_message_response_content_item import ( + AssistantMessageResponseContentItem, + TextAssistantMessageResponseContentItem, + ThinkingAssistantMessageResponseContentItem, + ) + from .assistant_message_v2content import AssistantMessageV2Content + from .assistant_message_v2content_item import ( + AssistantMessageV2ContentItem, + TextAssistantMessageV2ContentItem, + ThinkingAssistantMessageV2ContentItem, + ) + from .auth_token_type import AuthTokenType + from .chat_citation import ChatCitation + from .chat_citation_generation_event import ChatCitationGenerationEvent + from .chat_citation_type import ChatCitationType + from .chat_connector import ChatConnector + from .chat_content_delta_event import ChatContentDeltaEvent + from .chat_content_delta_event_delta import ChatContentDeltaEventDelta + from .chat_content_delta_event_delta_message import ChatContentDeltaEventDeltaMessage + from .chat_content_delta_event_delta_message_content import ChatContentDeltaEventDeltaMessageContent + from .chat_content_end_event import ChatContentEndEvent + from .chat_content_start_event import ChatContentStartEvent + from .chat_content_start_event_delta import ChatContentStartEventDelta + from .chat_content_start_event_delta_message import ChatContentStartEventDeltaMessage + from .chat_content_start_event_delta_message_content import ChatContentStartEventDeltaMessageContent + from .chat_content_start_event_delta_message_content_type import ChatContentStartEventDeltaMessageContentType + from .chat_data_metrics import ChatDataMetrics + from .chat_debug_event import ChatDebugEvent + from .chat_document import ChatDocument + from .chat_document_source import ChatDocumentSource + from .chat_finish_reason import ChatFinishReason + from .chat_message import ChatMessage + from .chat_message_end_event import ChatMessageEndEvent + from .chat_message_end_event_delta import ChatMessageEndEventDelta + from .chat_message_start_event import ChatMessageStartEvent + from .chat_message_start_event_delta import ChatMessageStartEventDelta + from .chat_message_start_event_delta_message import ChatMessageStartEventDeltaMessage + from .chat_message_v2 import ( + AssistantChatMessageV2, + ChatMessageV2, + SystemChatMessageV2, + ToolChatMessageV2, + UserChatMessageV2, + ) + from .chat_messages import ChatMessages + from .chat_request_citation_quality import ChatRequestCitationQuality + from .chat_request_prompt_truncation import ChatRequestPromptTruncation + from .chat_request_safety_mode import ChatRequestSafetyMode + from .chat_search_queries_generation_event import ChatSearchQueriesGenerationEvent + from .chat_search_query import ChatSearchQuery + from .chat_search_result import ChatSearchResult + from .chat_search_result_connector import ChatSearchResultConnector + from .chat_search_results_event import ChatSearchResultsEvent + from .chat_stream_end_event import ChatStreamEndEvent + from .chat_stream_end_event_finish_reason import ChatStreamEndEventFinishReason + from .chat_stream_event import ChatStreamEvent + from .chat_stream_event_type import ChatStreamEventType + from .chat_stream_request_citation_quality import ChatStreamRequestCitationQuality + from .chat_stream_request_prompt_truncation import ChatStreamRequestPromptTruncation + from .chat_stream_request_safety_mode import ChatStreamRequestSafetyMode + from .chat_stream_start_event import ChatStreamStartEvent + from .chat_text_content import ChatTextContent + from .chat_text_generation_event import ChatTextGenerationEvent + from .chat_text_response_format import ChatTextResponseFormat + from .chat_text_response_format_v2 import ChatTextResponseFormatV2 + from .chat_thinking_content import ChatThinkingContent + from .chat_tool_call_delta_event import ChatToolCallDeltaEvent + from .chat_tool_call_delta_event_delta import ChatToolCallDeltaEventDelta + from .chat_tool_call_delta_event_delta_message import ChatToolCallDeltaEventDeltaMessage + from .chat_tool_call_delta_event_delta_message_tool_calls import ChatToolCallDeltaEventDeltaMessageToolCalls + from .chat_tool_call_delta_event_delta_message_tool_calls_function import ( + ChatToolCallDeltaEventDeltaMessageToolCallsFunction, + ) + from .chat_tool_call_end_event import ChatToolCallEndEvent + from .chat_tool_call_start_event import ChatToolCallStartEvent + from .chat_tool_call_start_event_delta import ChatToolCallStartEventDelta + from .chat_tool_call_start_event_delta_message import ChatToolCallStartEventDeltaMessage + from .chat_tool_calls_chunk_event import ChatToolCallsChunkEvent + from .chat_tool_calls_generation_event import ChatToolCallsGenerationEvent + from .chat_tool_message import ChatToolMessage + from .chat_tool_plan_delta_event import ChatToolPlanDeltaEvent + from .chat_tool_plan_delta_event_delta import ChatToolPlanDeltaEventDelta + from .chat_tool_plan_delta_event_delta_message import ChatToolPlanDeltaEventDeltaMessage + from .chat_tool_source import ChatToolSource + from .check_api_key_response import CheckApiKeyResponse + from .citation import Citation + from .citation_end_event import CitationEndEvent + from .citation_options import CitationOptions + from .citation_options_mode import CitationOptionsMode + from .citation_start_event import CitationStartEvent + from .citation_start_event_delta import CitationStartEventDelta + from .citation_start_event_delta_message import CitationStartEventDeltaMessage + from .citation_type import CitationType + from .classify_data_metrics import ClassifyDataMetrics + from .classify_example import ClassifyExample + from .classify_request_truncate import ClassifyRequestTruncate + from .classify_response import ClassifyResponse + from .classify_response_classifications_item import ClassifyResponseClassificationsItem + from .classify_response_classifications_item_classification_type import ( + ClassifyResponseClassificationsItemClassificationType, + ) + from .classify_response_classifications_item_labels_value import ClassifyResponseClassificationsItemLabelsValue + from .compatible_endpoint import CompatibleEndpoint + from .connector import Connector + from .connector_auth_status import ConnectorAuthStatus + from .connector_o_auth import ConnectorOAuth + from .content import Content, ImageUrlContent, TextContent + from .create_connector_o_auth import CreateConnectorOAuth + from .create_connector_response import CreateConnectorResponse + from .create_connector_service_auth import CreateConnectorServiceAuth + from .create_embed_job_response import CreateEmbedJobResponse + from .dataset import Dataset + from .dataset_part import DatasetPart + from .dataset_type import DatasetType + from .dataset_validation_status import DatasetValidationStatus + from .delete_connector_response import DeleteConnectorResponse + from .detokenize_response import DetokenizeResponse + from .document import Document + from .document_content import DocumentContent + from .embed_by_type_response import EmbedByTypeResponse + from .embed_by_type_response_embeddings import EmbedByTypeResponseEmbeddings + from .embed_content import EmbedContent, ImageUrlEmbedContent, TextEmbedContent + from .embed_floats_response import EmbedFloatsResponse + from .embed_image import EmbedImage + from .embed_image_url import EmbedImageUrl + from .embed_input import EmbedInput + from .embed_input_type import EmbedInputType + from .embed_job import EmbedJob + from .embed_job_status import EmbedJobStatus + from .embed_job_truncate import EmbedJobTruncate + from .embed_request_truncate import EmbedRequestTruncate + from .embed_response import EmbedResponse, EmbeddingsByTypeEmbedResponse, EmbeddingsFloatsEmbedResponse + from .embed_text import EmbedText + from .embedding_type import EmbeddingType + from .finetune_dataset_metrics import FinetuneDatasetMetrics + from .finish_reason import FinishReason + from .generate_request_return_likelihoods import GenerateRequestReturnLikelihoods + from .generate_request_truncate import GenerateRequestTruncate + from .generate_stream_end import GenerateStreamEnd + from .generate_stream_end_response import GenerateStreamEndResponse + from .generate_stream_error import GenerateStreamError + from .generate_stream_event import GenerateStreamEvent + from .generate_stream_request_return_likelihoods import GenerateStreamRequestReturnLikelihoods + from .generate_stream_request_truncate import GenerateStreamRequestTruncate + from .generate_stream_text import GenerateStreamText + from .generate_streamed_response import ( + GenerateStreamedResponse, + StreamEndGenerateStreamedResponse, + StreamErrorGenerateStreamedResponse, + TextGenerationGenerateStreamedResponse, + ) + from .generation import Generation + from .get_connector_response import GetConnectorResponse + from .get_model_response import GetModelResponse + from .image import Image + from .image_content import ImageContent + from .image_url import ImageUrl + from .image_url_detail import ImageUrlDetail + from .json_response_format import JsonResponseFormat + from .json_response_format_v2 import JsonResponseFormatV2 + from .label_metric import LabelMetric + from .list_connectors_response import ListConnectorsResponse + from .list_embed_job_response import ListEmbedJobResponse + from .list_models_response import ListModelsResponse + from .logprob_item import LogprobItem + from .message import ChatbotMessage, Message, SystemMessage, ToolMessage, UserMessage + from .metrics import Metrics + from .non_streamed_chat_response import NonStreamedChatResponse + from .o_auth_authorize_response import OAuthAuthorizeResponse + from .parse_info import ParseInfo + from .rerank_document import RerankDocument + from .rerank_request_documents_item import RerankRequestDocumentsItem + from .rerank_response import RerankResponse + from .rerank_response_results_item import RerankResponseResultsItem + from .rerank_response_results_item_document import RerankResponseResultsItemDocument + from .reranker_data_metrics import RerankerDataMetrics + from .response_format import JsonObjectResponseFormat, ResponseFormat, TextResponseFormat + from .response_format_v2 import JsonObjectResponseFormatV2, ResponseFormatV2, TextResponseFormatV2 + from .single_generation import SingleGeneration + from .single_generation_in_stream import SingleGenerationInStream + from .single_generation_token_likelihoods_item import SingleGenerationTokenLikelihoodsItem + from .source import DocumentSource, Source, ToolSource + from .streamed_chat_response import ( + CitationGenerationStreamedChatResponse, + DebugStreamedChatResponse, + SearchQueriesGenerationStreamedChatResponse, + SearchResultsStreamedChatResponse, + StreamEndStreamedChatResponse, + StreamStartStreamedChatResponse, + StreamedChatResponse, + TextGenerationStreamedChatResponse, + ToolCallsChunkStreamedChatResponse, + ToolCallsGenerationStreamedChatResponse, + ) + from .summarize_request_extractiveness import SummarizeRequestExtractiveness + from .summarize_request_format import SummarizeRequestFormat + from .summarize_request_length import SummarizeRequestLength + from .summarize_response import SummarizeResponse + from .system_message_v2 import SystemMessageV2 + from .system_message_v2content import SystemMessageV2Content + from .system_message_v2content_item import SystemMessageV2ContentItem, TextSystemMessageV2ContentItem + from .thinking import Thinking + from .thinking_type import ThinkingType + from .tokenize_response import TokenizeResponse + from .tool import Tool + from .tool_call import ToolCall + from .tool_call_delta import ToolCallDelta + from .tool_call_v2 import ToolCallV2 + from .tool_call_v2function import ToolCallV2Function + from .tool_content import DocumentToolContent, TextToolContent, ToolContent + from .tool_message_v2 import ToolMessageV2 + from .tool_message_v2content import ToolMessageV2Content + from .tool_parameter_definitions_value import ToolParameterDefinitionsValue + from .tool_result import ToolResult + from .tool_v2 import ToolV2 + from .tool_v2function import ToolV2Function + from .update_connector_response import UpdateConnectorResponse + from .usage import Usage + from .usage_billed_units import UsageBilledUnits + from .usage_tokens import UsageTokens + from .user_message_v2 import UserMessageV2 + from .user_message_v2content import UserMessageV2Content +_dynamic_imports: typing.Dict[str, str] = { + "ApiMeta": ".api_meta", + "ApiMetaApiVersion": ".api_meta_api_version", + "ApiMetaBilledUnits": ".api_meta_billed_units", + "ApiMetaTokens": ".api_meta_tokens", + "AssistantChatMessageV2": ".chat_message_v2", + "AssistantMessage": ".assistant_message", + "AssistantMessageResponse": ".assistant_message_response", + "AssistantMessageResponseContentItem": ".assistant_message_response_content_item", + "AssistantMessageV2Content": ".assistant_message_v2content", + "AssistantMessageV2ContentItem": ".assistant_message_v2content_item", + "AuthTokenType": ".auth_token_type", + "ChatCitation": ".chat_citation", + "ChatCitationGenerationEvent": ".chat_citation_generation_event", + "ChatCitationType": ".chat_citation_type", + "ChatConnector": ".chat_connector", + "ChatContentDeltaEvent": ".chat_content_delta_event", + "ChatContentDeltaEventDelta": ".chat_content_delta_event_delta", + "ChatContentDeltaEventDeltaMessage": ".chat_content_delta_event_delta_message", + "ChatContentDeltaEventDeltaMessageContent": ".chat_content_delta_event_delta_message_content", + "ChatContentEndEvent": ".chat_content_end_event", + "ChatContentStartEvent": ".chat_content_start_event", + "ChatContentStartEventDelta": ".chat_content_start_event_delta", + "ChatContentStartEventDeltaMessage": ".chat_content_start_event_delta_message", + "ChatContentStartEventDeltaMessageContent": ".chat_content_start_event_delta_message_content", + "ChatContentStartEventDeltaMessageContentType": ".chat_content_start_event_delta_message_content_type", + "ChatDataMetrics": ".chat_data_metrics", + "ChatDebugEvent": ".chat_debug_event", + "ChatDocument": ".chat_document", + "ChatDocumentSource": ".chat_document_source", + "ChatFinishReason": ".chat_finish_reason", + "ChatMessage": ".chat_message", + "ChatMessageEndEvent": ".chat_message_end_event", + "ChatMessageEndEventDelta": ".chat_message_end_event_delta", + "ChatMessageStartEvent": ".chat_message_start_event", + "ChatMessageStartEventDelta": ".chat_message_start_event_delta", + "ChatMessageStartEventDeltaMessage": ".chat_message_start_event_delta_message", + "ChatMessageV2": ".chat_message_v2", + "ChatMessages": ".chat_messages", + "ChatRequestCitationQuality": ".chat_request_citation_quality", + "ChatRequestPromptTruncation": ".chat_request_prompt_truncation", + "ChatRequestSafetyMode": ".chat_request_safety_mode", + "ChatSearchQueriesGenerationEvent": ".chat_search_queries_generation_event", + "ChatSearchQuery": ".chat_search_query", + "ChatSearchResult": ".chat_search_result", + "ChatSearchResultConnector": ".chat_search_result_connector", + "ChatSearchResultsEvent": ".chat_search_results_event", + "ChatStreamEndEvent": ".chat_stream_end_event", + "ChatStreamEndEventFinishReason": ".chat_stream_end_event_finish_reason", + "ChatStreamEvent": ".chat_stream_event", + "ChatStreamEventType": ".chat_stream_event_type", + "ChatStreamRequestCitationQuality": ".chat_stream_request_citation_quality", + "ChatStreamRequestPromptTruncation": ".chat_stream_request_prompt_truncation", + "ChatStreamRequestSafetyMode": ".chat_stream_request_safety_mode", + "ChatStreamStartEvent": ".chat_stream_start_event", + "ChatTextContent": ".chat_text_content", + "ChatTextGenerationEvent": ".chat_text_generation_event", + "ChatTextResponseFormat": ".chat_text_response_format", + "ChatTextResponseFormatV2": ".chat_text_response_format_v2", + "ChatThinkingContent": ".chat_thinking_content", + "ChatToolCallDeltaEvent": ".chat_tool_call_delta_event", + "ChatToolCallDeltaEventDelta": ".chat_tool_call_delta_event_delta", + "ChatToolCallDeltaEventDeltaMessage": ".chat_tool_call_delta_event_delta_message", + "ChatToolCallDeltaEventDeltaMessageToolCalls": ".chat_tool_call_delta_event_delta_message_tool_calls", + "ChatToolCallDeltaEventDeltaMessageToolCallsFunction": ".chat_tool_call_delta_event_delta_message_tool_calls_function", + "ChatToolCallEndEvent": ".chat_tool_call_end_event", + "ChatToolCallStartEvent": ".chat_tool_call_start_event", + "ChatToolCallStartEventDelta": ".chat_tool_call_start_event_delta", + "ChatToolCallStartEventDeltaMessage": ".chat_tool_call_start_event_delta_message", + "ChatToolCallsChunkEvent": ".chat_tool_calls_chunk_event", + "ChatToolCallsGenerationEvent": ".chat_tool_calls_generation_event", + "ChatToolMessage": ".chat_tool_message", + "ChatToolPlanDeltaEvent": ".chat_tool_plan_delta_event", + "ChatToolPlanDeltaEventDelta": ".chat_tool_plan_delta_event_delta", + "ChatToolPlanDeltaEventDeltaMessage": ".chat_tool_plan_delta_event_delta_message", + "ChatToolSource": ".chat_tool_source", + "ChatbotMessage": ".message", + "CheckApiKeyResponse": ".check_api_key_response", + "Citation": ".citation", + "CitationEndEvent": ".citation_end_event", + "CitationGenerationStreamedChatResponse": ".streamed_chat_response", + "CitationOptions": ".citation_options", + "CitationOptionsMode": ".citation_options_mode", + "CitationStartEvent": ".citation_start_event", + "CitationStartEventDelta": ".citation_start_event_delta", + "CitationStartEventDeltaMessage": ".citation_start_event_delta_message", + "CitationType": ".citation_type", + "ClassifyDataMetrics": ".classify_data_metrics", + "ClassifyExample": ".classify_example", + "ClassifyRequestTruncate": ".classify_request_truncate", + "ClassifyResponse": ".classify_response", + "ClassifyResponseClassificationsItem": ".classify_response_classifications_item", + "ClassifyResponseClassificationsItemClassificationType": ".classify_response_classifications_item_classification_type", + "ClassifyResponseClassificationsItemLabelsValue": ".classify_response_classifications_item_labels_value", + "CompatibleEndpoint": ".compatible_endpoint", + "Connector": ".connector", + "ConnectorAuthStatus": ".connector_auth_status", + "ConnectorOAuth": ".connector_o_auth", + "Content": ".content", + "CreateConnectorOAuth": ".create_connector_o_auth", + "CreateConnectorResponse": ".create_connector_response", + "CreateConnectorServiceAuth": ".create_connector_service_auth", + "CreateEmbedJobResponse": ".create_embed_job_response", + "Dataset": ".dataset", + "DatasetPart": ".dataset_part", + "DatasetType": ".dataset_type", + "DatasetValidationStatus": ".dataset_validation_status", + "DebugStreamedChatResponse": ".streamed_chat_response", + "DeleteConnectorResponse": ".delete_connector_response", + "DetokenizeResponse": ".detokenize_response", + "Document": ".document", + "DocumentContent": ".document_content", + "DocumentSource": ".source", + "DocumentToolContent": ".tool_content", + "EmbedByTypeResponse": ".embed_by_type_response", + "EmbedByTypeResponseEmbeddings": ".embed_by_type_response_embeddings", + "EmbedContent": ".embed_content", + "EmbedFloatsResponse": ".embed_floats_response", + "EmbedImage": ".embed_image", + "EmbedImageUrl": ".embed_image_url", + "EmbedInput": ".embed_input", + "EmbedInputType": ".embed_input_type", + "EmbedJob": ".embed_job", + "EmbedJobStatus": ".embed_job_status", + "EmbedJobTruncate": ".embed_job_truncate", + "EmbedRequestTruncate": ".embed_request_truncate", + "EmbedResponse": ".embed_response", + "EmbedText": ".embed_text", + "EmbeddingType": ".embedding_type", + "EmbeddingsByTypeEmbedResponse": ".embed_response", + "EmbeddingsFloatsEmbedResponse": ".embed_response", + "FinetuneDatasetMetrics": ".finetune_dataset_metrics", + "FinishReason": ".finish_reason", + "GenerateRequestReturnLikelihoods": ".generate_request_return_likelihoods", + "GenerateRequestTruncate": ".generate_request_truncate", + "GenerateStreamEnd": ".generate_stream_end", + "GenerateStreamEndResponse": ".generate_stream_end_response", + "GenerateStreamError": ".generate_stream_error", + "GenerateStreamEvent": ".generate_stream_event", + "GenerateStreamRequestReturnLikelihoods": ".generate_stream_request_return_likelihoods", + "GenerateStreamRequestTruncate": ".generate_stream_request_truncate", + "GenerateStreamText": ".generate_stream_text", + "GenerateStreamedResponse": ".generate_streamed_response", + "Generation": ".generation", + "GetConnectorResponse": ".get_connector_response", + "GetModelResponse": ".get_model_response", + "Image": ".image", + "ImageContent": ".image_content", + "ImageUrl": ".image_url", + "ImageUrlContent": ".content", + "ImageUrlDetail": ".image_url_detail", + "ImageUrlEmbedContent": ".embed_content", + "JsonObjectResponseFormat": ".response_format", + "JsonObjectResponseFormatV2": ".response_format_v2", + "JsonResponseFormat": ".json_response_format", + "JsonResponseFormatV2": ".json_response_format_v2", + "LabelMetric": ".label_metric", + "ListConnectorsResponse": ".list_connectors_response", + "ListEmbedJobResponse": ".list_embed_job_response", + "ListModelsResponse": ".list_models_response", + "LogprobItem": ".logprob_item", + "Message": ".message", + "Metrics": ".metrics", + "NonStreamedChatResponse": ".non_streamed_chat_response", + "OAuthAuthorizeResponse": ".o_auth_authorize_response", + "ParseInfo": ".parse_info", + "RerankDocument": ".rerank_document", + "RerankRequestDocumentsItem": ".rerank_request_documents_item", + "RerankResponse": ".rerank_response", + "RerankResponseResultsItem": ".rerank_response_results_item", + "RerankResponseResultsItemDocument": ".rerank_response_results_item_document", + "RerankerDataMetrics": ".reranker_data_metrics", + "ResponseFormat": ".response_format", + "ResponseFormatV2": ".response_format_v2", + "SearchQueriesGenerationStreamedChatResponse": ".streamed_chat_response", + "SearchResultsStreamedChatResponse": ".streamed_chat_response", + "SingleGeneration": ".single_generation", + "SingleGenerationInStream": ".single_generation_in_stream", + "SingleGenerationTokenLikelihoodsItem": ".single_generation_token_likelihoods_item", + "Source": ".source", + "StreamEndGenerateStreamedResponse": ".generate_streamed_response", + "StreamEndStreamedChatResponse": ".streamed_chat_response", + "StreamErrorGenerateStreamedResponse": ".generate_streamed_response", + "StreamStartStreamedChatResponse": ".streamed_chat_response", + "StreamedChatResponse": ".streamed_chat_response", + "SummarizeRequestExtractiveness": ".summarize_request_extractiveness", + "SummarizeRequestFormat": ".summarize_request_format", + "SummarizeRequestLength": ".summarize_request_length", + "SummarizeResponse": ".summarize_response", + "SystemChatMessageV2": ".chat_message_v2", + "SystemMessage": ".message", + "SystemMessageV2": ".system_message_v2", + "SystemMessageV2Content": ".system_message_v2content", + "SystemMessageV2ContentItem": ".system_message_v2content_item", + "TextAssistantMessageResponseContentItem": ".assistant_message_response_content_item", + "TextAssistantMessageV2ContentItem": ".assistant_message_v2content_item", + "TextContent": ".content", + "TextEmbedContent": ".embed_content", + "TextGenerationGenerateStreamedResponse": ".generate_streamed_response", + "TextGenerationStreamedChatResponse": ".streamed_chat_response", + "TextResponseFormat": ".response_format", + "TextResponseFormatV2": ".response_format_v2", + "TextSystemMessageV2ContentItem": ".system_message_v2content_item", + "TextToolContent": ".tool_content", + "Thinking": ".thinking", + "ThinkingAssistantMessageResponseContentItem": ".assistant_message_response_content_item", + "ThinkingAssistantMessageV2ContentItem": ".assistant_message_v2content_item", + "ThinkingType": ".thinking_type", + "TokenizeResponse": ".tokenize_response", + "Tool": ".tool", + "ToolCall": ".tool_call", + "ToolCallDelta": ".tool_call_delta", + "ToolCallV2": ".tool_call_v2", + "ToolCallV2Function": ".tool_call_v2function", + "ToolCallsChunkStreamedChatResponse": ".streamed_chat_response", + "ToolCallsGenerationStreamedChatResponse": ".streamed_chat_response", + "ToolChatMessageV2": ".chat_message_v2", + "ToolContent": ".tool_content", + "ToolMessage": ".message", + "ToolMessageV2": ".tool_message_v2", + "ToolMessageV2Content": ".tool_message_v2content", + "ToolParameterDefinitionsValue": ".tool_parameter_definitions_value", + "ToolResult": ".tool_result", + "ToolSource": ".source", + "ToolV2": ".tool_v2", + "ToolV2Function": ".tool_v2function", + "UpdateConnectorResponse": ".update_connector_response", + "Usage": ".usage", + "UsageBilledUnits": ".usage_billed_units", + "UsageTokens": ".usage_tokens", + "UserChatMessageV2": ".chat_message_v2", + "UserMessage": ".message", + "UserMessageV2": ".user_message_v2", + "UserMessageV2Content": ".user_message_v2content", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "ApiMeta", diff --git a/src/cohere/types/tool_call_v2.py b/src/cohere/types/tool_call_v2.py index 33f7f3c97..bda7d2793 100644 --- a/src/cohere/types/tool_call_v2.py +++ b/src/cohere/types/tool_call_v2.py @@ -13,8 +13,8 @@ class ToolCallV2(UncheckedBaseModel): An array of tool calls to be made. """ - id: typing.Optional[str] = None - type: typing.Optional[typing.Literal["function"]] = None + id: str + type: typing.Literal["function"] = "function" function: typing.Optional[ToolCallV2Function] = None if IS_PYDANTIC_V2: diff --git a/src/cohere/types/tool_v2.py b/src/cohere/types/tool_v2.py index a4b67fd02..f54c66c11 100644 --- a/src/cohere/types/tool_v2.py +++ b/src/cohere/types/tool_v2.py @@ -9,7 +9,7 @@ class ToolV2(UncheckedBaseModel): - type: typing.Optional[typing.Literal["function"]] = None + type: typing.Literal["function"] = "function" function: typing.Optional[ToolV2Function] = pydantic.Field(default=None) """ The function to be executed. diff --git a/src/cohere/v2/__init__.py b/src/cohere/v2/__init__.py index 4632be777..ad8978bc9 100644 --- a/src/cohere/v2/__init__.py +++ b/src/cohere/v2/__init__.py @@ -2,31 +2,82 @@ # isort: skip_file -from .types import ( - CitationEndV2ChatStreamResponse, - CitationStartV2ChatStreamResponse, - ContentDeltaV2ChatStreamResponse, - ContentEndV2ChatStreamResponse, - ContentStartV2ChatStreamResponse, - DebugV2ChatStreamResponse, - MessageEndV2ChatStreamResponse, - MessageStartV2ChatStreamResponse, - ToolCallDeltaV2ChatStreamResponse, - ToolCallEndV2ChatStreamResponse, - ToolCallStartV2ChatStreamResponse, - ToolPlanDeltaV2ChatStreamResponse, - V2ChatRequestDocumentsItem, - V2ChatRequestSafetyMode, - V2ChatRequestToolChoice, - V2ChatResponse, - V2ChatStreamRequestDocumentsItem, - V2ChatStreamRequestSafetyMode, - V2ChatStreamRequestToolChoice, - V2ChatStreamResponse, - V2EmbedRequestTruncate, - V2RerankResponse, - V2RerankResponseResultsItem, -) +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .types import ( + CitationEndV2ChatStreamResponse, + CitationStartV2ChatStreamResponse, + ContentDeltaV2ChatStreamResponse, + ContentEndV2ChatStreamResponse, + ContentStartV2ChatStreamResponse, + DebugV2ChatStreamResponse, + MessageEndV2ChatStreamResponse, + MessageStartV2ChatStreamResponse, + ToolCallDeltaV2ChatStreamResponse, + ToolCallEndV2ChatStreamResponse, + ToolCallStartV2ChatStreamResponse, + ToolPlanDeltaV2ChatStreamResponse, + V2ChatRequestDocumentsItem, + V2ChatRequestSafetyMode, + V2ChatRequestToolChoice, + V2ChatResponse, + V2ChatStreamRequestDocumentsItem, + V2ChatStreamRequestSafetyMode, + V2ChatStreamRequestToolChoice, + V2ChatStreamResponse, + V2EmbedRequestTruncate, + V2RerankResponse, + V2RerankResponseResultsItem, + ) +_dynamic_imports: typing.Dict[str, str] = { + "CitationEndV2ChatStreamResponse": ".types", + "CitationStartV2ChatStreamResponse": ".types", + "ContentDeltaV2ChatStreamResponse": ".types", + "ContentEndV2ChatStreamResponse": ".types", + "ContentStartV2ChatStreamResponse": ".types", + "DebugV2ChatStreamResponse": ".types", + "MessageEndV2ChatStreamResponse": ".types", + "MessageStartV2ChatStreamResponse": ".types", + "ToolCallDeltaV2ChatStreamResponse": ".types", + "ToolCallEndV2ChatStreamResponse": ".types", + "ToolCallStartV2ChatStreamResponse": ".types", + "ToolPlanDeltaV2ChatStreamResponse": ".types", + "V2ChatRequestDocumentsItem": ".types", + "V2ChatRequestSafetyMode": ".types", + "V2ChatRequestToolChoice": ".types", + "V2ChatResponse": ".types", + "V2ChatStreamRequestDocumentsItem": ".types", + "V2ChatStreamRequestSafetyMode": ".types", + "V2ChatStreamRequestToolChoice": ".types", + "V2ChatStreamResponse": ".types", + "V2EmbedRequestTruncate": ".types", + "V2RerankResponse": ".types", + "V2RerankResponseResultsItem": ".types", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "CitationEndV2ChatStreamResponse", diff --git a/src/cohere/v2/raw_client.py b/src/cohere/v2/raw_client.py index 3879be0af..f1a2424e9 100644 --- a/src/cohere/v2/raw_client.py +++ b/src/cohere/v2/raw_client.py @@ -1,14 +1,14 @@ # This file was auto-generated by Fern from our API Definition. import contextlib -import json import typing from json.decoder import JSONDecodeError +from logging import error, warning -import httpx_sse from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.http_sse._api import EventSource from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata from ..core.unchecked_base_model import construct_type @@ -230,7 +230,7 @@ def _stream() -> HttpResponse[typing.Iterator[V2ChatStreamResponse]]: if 200 <= _response.status_code < 300: def _iter(): - _event_source = httpx_sse.EventSource(_response) + _event_source = EventSource(_response) for _sse in _event_source.iter_sse(): if _sse.data == None: return @@ -239,11 +239,19 @@ def _iter(): V2ChatStreamResponse, construct_type( type_=V2ChatStreamResponse, # type: ignore - object_=json.loads(_sse.data), + object_=_sse.json(), ), ) - except Exception: - pass + except JSONDecodeError as e: + warning(f"Skipping SSE event with invalid JSON: {e}, sse: {_sse!r}") + except (TypeError, ValueError, KeyError, AttributeError) as e: + warning( + f"Skipping SSE event due to model construction error: {type(e).__name__}: {e}, sse: {_sse!r}" + ) + except Exception as e: + error( + f"Unexpected error processing SSE event: {type(e).__name__}: {e}, sse: {_sse!r}" + ) return return HttpResponse(response=_response, data=_iter()) @@ -1350,7 +1358,7 @@ async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[V2ChatStreamRespon if 200 <= _response.status_code < 300: async def _iter(): - _event_source = httpx_sse.EventSource(_response) + _event_source = EventSource(_response) async for _sse in _event_source.aiter_sse(): if _sse.data == None: return @@ -1359,11 +1367,19 @@ async def _iter(): V2ChatStreamResponse, construct_type( type_=V2ChatStreamResponse, # type: ignore - object_=json.loads(_sse.data), + object_=_sse.json(), ), ) - except Exception: - pass + except JSONDecodeError as e: + warning(f"Skipping SSE event with invalid JSON: {e}, sse: {_sse!r}") + except (TypeError, ValueError, KeyError, AttributeError) as e: + warning( + f"Skipping SSE event due to model construction error: {type(e).__name__}: {e}, sse: {_sse!r}" + ) + except Exception as e: + error( + f"Unexpected error processing SSE event: {type(e).__name__}: {e}, sse: {_sse!r}" + ) return return AsyncHttpResponse(response=_response, data=_iter()) diff --git a/src/cohere/v2/types/__init__.py b/src/cohere/v2/types/__init__.py index 3f7e6dc2b..9041aed31 100644 --- a/src/cohere/v2/types/__init__.py +++ b/src/cohere/v2/types/__init__.py @@ -2,31 +2,82 @@ # isort: skip_file -from .v2chat_request_documents_item import V2ChatRequestDocumentsItem -from .v2chat_request_safety_mode import V2ChatRequestSafetyMode -from .v2chat_request_tool_choice import V2ChatRequestToolChoice -from .v2chat_response import V2ChatResponse -from .v2chat_stream_request_documents_item import V2ChatStreamRequestDocumentsItem -from .v2chat_stream_request_safety_mode import V2ChatStreamRequestSafetyMode -from .v2chat_stream_request_tool_choice import V2ChatStreamRequestToolChoice -from .v2chat_stream_response import ( - CitationEndV2ChatStreamResponse, - CitationStartV2ChatStreamResponse, - ContentDeltaV2ChatStreamResponse, - ContentEndV2ChatStreamResponse, - ContentStartV2ChatStreamResponse, - DebugV2ChatStreamResponse, - MessageEndV2ChatStreamResponse, - MessageStartV2ChatStreamResponse, - ToolCallDeltaV2ChatStreamResponse, - ToolCallEndV2ChatStreamResponse, - ToolCallStartV2ChatStreamResponse, - ToolPlanDeltaV2ChatStreamResponse, - V2ChatStreamResponse, -) -from .v2embed_request_truncate import V2EmbedRequestTruncate -from .v2rerank_response import V2RerankResponse -from .v2rerank_response_results_item import V2RerankResponseResultsItem +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from .v2chat_request_documents_item import V2ChatRequestDocumentsItem + from .v2chat_request_safety_mode import V2ChatRequestSafetyMode + from .v2chat_request_tool_choice import V2ChatRequestToolChoice + from .v2chat_response import V2ChatResponse + from .v2chat_stream_request_documents_item import V2ChatStreamRequestDocumentsItem + from .v2chat_stream_request_safety_mode import V2ChatStreamRequestSafetyMode + from .v2chat_stream_request_tool_choice import V2ChatStreamRequestToolChoice + from .v2chat_stream_response import ( + CitationEndV2ChatStreamResponse, + CitationStartV2ChatStreamResponse, + ContentDeltaV2ChatStreamResponse, + ContentEndV2ChatStreamResponse, + ContentStartV2ChatStreamResponse, + DebugV2ChatStreamResponse, + MessageEndV2ChatStreamResponse, + MessageStartV2ChatStreamResponse, + ToolCallDeltaV2ChatStreamResponse, + ToolCallEndV2ChatStreamResponse, + ToolCallStartV2ChatStreamResponse, + ToolPlanDeltaV2ChatStreamResponse, + V2ChatStreamResponse, + ) + from .v2embed_request_truncate import V2EmbedRequestTruncate + from .v2rerank_response import V2RerankResponse + from .v2rerank_response_results_item import V2RerankResponseResultsItem +_dynamic_imports: typing.Dict[str, str] = { + "CitationEndV2ChatStreamResponse": ".v2chat_stream_response", + "CitationStartV2ChatStreamResponse": ".v2chat_stream_response", + "ContentDeltaV2ChatStreamResponse": ".v2chat_stream_response", + "ContentEndV2ChatStreamResponse": ".v2chat_stream_response", + "ContentStartV2ChatStreamResponse": ".v2chat_stream_response", + "DebugV2ChatStreamResponse": ".v2chat_stream_response", + "MessageEndV2ChatStreamResponse": ".v2chat_stream_response", + "MessageStartV2ChatStreamResponse": ".v2chat_stream_response", + "ToolCallDeltaV2ChatStreamResponse": ".v2chat_stream_response", + "ToolCallEndV2ChatStreamResponse": ".v2chat_stream_response", + "ToolCallStartV2ChatStreamResponse": ".v2chat_stream_response", + "ToolPlanDeltaV2ChatStreamResponse": ".v2chat_stream_response", + "V2ChatRequestDocumentsItem": ".v2chat_request_documents_item", + "V2ChatRequestSafetyMode": ".v2chat_request_safety_mode", + "V2ChatRequestToolChoice": ".v2chat_request_tool_choice", + "V2ChatResponse": ".v2chat_response", + "V2ChatStreamRequestDocumentsItem": ".v2chat_stream_request_documents_item", + "V2ChatStreamRequestSafetyMode": ".v2chat_stream_request_safety_mode", + "V2ChatStreamRequestToolChoice": ".v2chat_stream_request_tool_choice", + "V2ChatStreamResponse": ".v2chat_stream_response", + "V2EmbedRequestTruncate": ".v2embed_request_truncate", + "V2RerankResponse": ".v2rerank_response", + "V2RerankResponseResultsItem": ".v2rerank_response_results_item", +} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + __all__ = [ "CitationEndV2ChatStreamResponse",