Skip to content

Commit b1dbf95

Browse files
🌿 Fern Regeneration -- November 27, 2024 (#608)
* SDK regeneration * Fixes * reset lock * Update src/cohere/v2/client.py --------- Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Co-authored-by: Billy Trend <[email protected]> Co-authored-by: billytrend-cohere <[email protected]>
1 parent 756515a commit b1dbf95

File tree

13 files changed

+157
-87
lines changed

13 files changed

+157
-87
lines changed

.gitignore

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,3 @@ dist/
33
__pycache__/
44
poetry.toml
55
.ruff_cache/
6-
.venv/

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "cohere"
3-
version = "5.11.4"
3+
version = "5.12.0"
44
description = ""
55
readme = "README.md"
66
authors = []

reference.md

Lines changed: 50 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2365,6 +2365,7 @@ response = client.v2.chat_stream(
23652365
),
23662366
)
23672367
],
2368+
strict_tools=True,
23682369
documents=["string"],
23692370
citation_options=CitationOptions(
23702371
mode="FAST",
@@ -2381,6 +2382,7 @@ response = client.v2.chat_stream(
23812382
p=1.1,
23822383
return_prompt=True,
23832384
logprobs=True,
2385+
stream=True,
23842386
)
23852387
for chunk in response:
23862388
yield chunk
@@ -2422,6 +2424,19 @@ A list of available tools (functions) that the model may suggest invoking before
24222424
When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
24232425

24242426

2427+
</dd>
2428+
</dl>
2429+
2430+
<dl>
2431+
<dd>
2432+
2433+
**strict_tools:** `typing.Optional[bool]`
2434+
2435+
When set to `true`, tool calls in the Assistant message will be forced to follow the tool definition strictly. Learn more in the [Strict Tools guide](https://docs.cohere.com/docs/structured-outputs-json#structured-outputs-tools).
2436+
2437+
**Note**: The first few requests with a new set of tools will take longer to process.
2438+
2439+
24252440
</dd>
24262441
</dl>
24272442

@@ -2546,7 +2561,7 @@ Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty
25462561

25472562
**k:** `typing.Optional[float]`
25482563

2549-
Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled.
2564+
Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled.
25502565
Defaults to `0`, min value of `0`, max value of `500`.
25512566

25522567

@@ -2576,7 +2591,7 @@ Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
25762591
<dl>
25772592
<dd>
25782593

2579-
**logprobs:** `typing.Optional[bool]`Whether to return the log probabilities of the generated tokens. Defaults to false.
2594+
**logprobs:** `typing.Optional[bool]`Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response.
25802595

25812596

25822597
</dd>
@@ -2640,6 +2655,7 @@ client.v2.chat(
26402655
content="messages",
26412656
)
26422657
],
2658+
stream=False,
26432659
)
26442660

26452661
```
@@ -2679,6 +2695,19 @@ A list of available tools (functions) that the model may suggest invoking before
26792695
When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
26802696

26812697

2698+
</dd>
2699+
</dl>
2700+
2701+
<dl>
2702+
<dd>
2703+
2704+
**strict_tools:** `typing.Optional[bool]`
2705+
2706+
When set to `true`, tool calls in the Assistant message will be forced to follow the tool definition strictly. Learn more in the [Strict Tools guide](https://docs.cohere.com/docs/structured-outputs-json#structured-outputs-tools).
2707+
2708+
**Note**: The first few requests with a new set of tools will take longer to process.
2709+
2710+
26822711
</dd>
26832712
</dl>
26842713

@@ -2803,7 +2832,7 @@ Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty
28032832

28042833
**k:** `typing.Optional[float]`
28052834

2806-
Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled.
2835+
Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled.
28072836
Defaults to `0`, min value of `0`, max value of `500`.
28082837

28092838

@@ -2833,7 +2862,7 @@ Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
28332862
<dl>
28342863
<dd>
28352864

2836-
**logprobs:** `typing.Optional[bool]`Whether to return the log probabilities of the generated tokens. Defaults to false.
2865+
**logprobs:** `typing.Optional[bool]`Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response.
28372866

28382867

28392868
</dd>
@@ -3057,7 +3086,15 @@ client.v2.rerank(
30573086
<dl>
30583087
<dd>
30593088

3060-
**model:** `str` — The identifier of the model to use, one of : `rerank-english-v3.0`, `rerank-multilingual-v3.0`, `rerank-english-v2.0`, `rerank-multilingual-v2.0`
3089+
**model:** `str`
3090+
3091+
The identifier of the model to use.
3092+
3093+
Supported models:
3094+
- `rerank-english-v3.0`
3095+
- `rerank-multilingual-v3.0`
3096+
- `rerank-english-v2.0`
3097+
- `rerank-multilingual-v2.0`
30613098

30623099
</dd>
30633100
</dl>
@@ -3073,30 +3110,22 @@ client.v2.rerank(
30733110
<dl>
30743111
<dd>
30753112

3076-
**documents:** `typing.Sequence[V2RerankRequestDocumentsItem]`
3077-
3078-
A list of document objects or strings to rerank.
3079-
If a document is provided the text fields is required and all other fields will be preserved in the response.
3080-
3081-
The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000.
3113+
**documents:** `typing.Sequence[str]`
30823114

3083-
We recommend a maximum of 1,000 documents for optimal endpoint performance.
3084-
3085-
</dd>
3086-
</dl>
3115+
A list of texts that will be compared to the `query`.
3116+
For optimal performance we recommend against sending more than 1,000 documents in a single request.
30873117

3088-
<dl>
3089-
<dd>
3118+
**Note**: long documents will automatically be truncated to the value of `max_tokens_per_doc`.
30903119

3091-
**top_n:** `typing.Optional[int]` — The number of most relevant documents or indices to return, defaults to the length of the documents
3120+
**Note**: structured data should be formatted as YAML strings for best performance.
30923121

30933122
</dd>
30943123
</dl>
30953124

30963125
<dl>
30973126
<dd>
30983127

3099-
**rank_fields:** `typing.Optional[typing.Sequence[str]]`If a JSON object is provided, you can specify which keys you would like to have considered for reranking. The model will rerank based on order of the fields passed in (i.e. rank_fields=['title','author','text'] will rerank using the values in title, author, text sequentially. If the length of title, author, and text exceeds the context length of the model, the chunking will not re-consider earlier fields). If not provided, the model will use the default text field for ranking.
3128+
**top_n:** `typing.Optional[int]`Limits the number of returned rerank results to the specified value. If not passed, all the rerank results will be returned.
31003129

31013130
</dd>
31023131
</dl>
@@ -3115,7 +3144,7 @@ We recommend a maximum of 1,000 documents for optimal endpoint performance.
31153144
<dl>
31163145
<dd>
31173146

3118-
**max_chunks_per_doc:** `typing.Optional[int]`The maximum number of chunks to produce internally from a document
3147+
**max_tokens_per_doc:** `typing.Optional[int]`Defaults to `4096`. Long documents will be automatically truncated to the specified number of tokens.
31193148

31203149
</dd>
31213150
</dl>
@@ -5043,7 +5072,7 @@ client.finetuning.update_finetuned_model(
50435072
<dl>
50445073
<dd>
50455074

5046-
**last_used:** `typing.Optional[dt.datetime]` — Timestamp for the latest request to this fine-tuned model.
5075+
**last_used:** `typing.Optional[dt.datetime]`Deprecated: Timestamp for the latest request to this fine-tuned model.
50475076

50485077
</dd>
50495078
</dl>

src/cohere/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@
106106
DatasetType,
107107
DatasetValidationStatus,
108108
DebugStreamedChatResponse,
109+
DebugStreamedChatResponseV2,
109110
DeleteConnectorResponse,
110111
DetokenizeResponse,
111112
Document,
@@ -263,7 +264,6 @@
263264
V2ChatStreamRequestDocumentsItem,
264265
V2ChatStreamRequestSafetyMode,
265266
V2EmbedRequestTruncate,
266-
V2RerankRequestDocumentsItem,
267267
V2RerankResponse,
268268
V2RerankResponseResultsItem,
269269
V2RerankResponseResultsItemDocument,
@@ -391,6 +391,7 @@
391391
"DatasetsGetUsageResponse",
392392
"DatasetsListResponse",
393393
"DebugStreamedChatResponse",
394+
"DebugStreamedChatResponseV2",
394395
"DeleteConnectorResponse",
395396
"DetokenizeResponse",
396397
"Document",
@@ -528,7 +529,6 @@
528529
"V2ChatStreamRequestDocumentsItem",
529530
"V2ChatStreamRequestSafetyMode",
530531
"V2EmbedRequestTruncate",
531-
"V2RerankRequestDocumentsItem",
532532
"V2RerankResponse",
533533
"V2RerankResponseResultsItem",
534534
"V2RerankResponseResultsItemDocument",

src/cohere/core/client_wrapper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def get_headers(self) -> typing.Dict[str, str]:
2424
headers: typing.Dict[str, str] = {
2525
"X-Fern-Language": "Python",
2626
"X-Fern-SDK-Name": "cohere",
27-
"X-Fern-SDK-Version": "5.11.4",
27+
"X-Fern-SDK-Version": "5.12.0",
2828
}
2929
if self._client_name is not None:
3030
headers["X-Client-Name"] = self._client_name

src/cohere/finetuning/client.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -543,7 +543,7 @@ def update_finetuned_model(
543543
Timestamp for the completed fine-tuning.
544544
545545
last_used : typing.Optional[dt.datetime]
546-
Timestamp for the latest request to this fine-tuned model.
546+
Deprecated: Timestamp for the latest request to this fine-tuned model.
547547
548548
request_options : typing.Optional[RequestOptions]
549549
Request-specific configuration.
@@ -1468,7 +1468,7 @@ async def update_finetuned_model(
14681468
Timestamp for the completed fine-tuning.
14691469
14701470
last_used : typing.Optional[dt.datetime]
1471-
Timestamp for the latest request to this fine-tuned model.
1471+
Deprecated: Timestamp for the latest request to this fine-tuned model.
14721472
14731473
request_options : typing.Optional[RequestOptions]
14741474
Request-specific configuration.

src/cohere/finetuning/finetuning/types/finetuned_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ class FinetunedModel(UncheckedBaseModel):
6161

6262
last_used: typing.Optional[dt.datetime] = pydantic.Field(default=None)
6363
"""
64-
read-only. Timestamp for the latest request to this fine-tuned model.
64+
read-only. Deprecated: Timestamp for the latest request to this fine-tuned model.
6565
"""
6666

6767
if IS_PYDANTIC_V2:

src/cohere/types/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,7 @@
188188
ContentDeltaStreamedChatResponseV2,
189189
ContentEndStreamedChatResponseV2,
190190
ContentStartStreamedChatResponseV2,
191+
DebugStreamedChatResponseV2,
191192
MessageEndStreamedChatResponseV2,
192193
MessageStartStreamedChatResponseV2,
193194
StreamedChatResponseV2,
@@ -329,6 +330,7 @@
329330
"DatasetType",
330331
"DatasetValidationStatus",
331332
"DebugStreamedChatResponse",
333+
"DebugStreamedChatResponseV2",
332334
"DeleteConnectorResponse",
333335
"DetokenizeResponse",
334336
"Document",

src/cohere/types/streamed_chat_response_v2.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,23 @@ class Config:
213213
extra = pydantic.Extra.allow
214214

215215

216+
class DebugStreamedChatResponseV2(UncheckedBaseModel):
217+
"""
218+
StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request).
219+
"""
220+
221+
type: typing.Literal["debug"] = "debug"
222+
prompt: typing.Optional[str] = None
223+
224+
if IS_PYDANTIC_V2:
225+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow") # type: ignore # Pydantic v2
226+
else:
227+
228+
class Config:
229+
smart_union = True
230+
extra = pydantic.Extra.allow
231+
232+
216233
StreamedChatResponseV2 = typing_extensions.Annotated[
217234
typing.Union[
218235
MessageStartStreamedChatResponseV2,
@@ -226,6 +243,7 @@ class Config:
226243
CitationStartStreamedChatResponseV2,
227244
CitationEndStreamedChatResponseV2,
228245
MessageEndStreamedChatResponseV2,
246+
DebugStreamedChatResponseV2,
229247
],
230248
UnionMetadata(discriminant="type"),
231249
]

src/cohere/v2/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
V2ChatStreamRequestDocumentsItem,
77
V2ChatStreamRequestSafetyMode,
88
V2EmbedRequestTruncate,
9-
V2RerankRequestDocumentsItem,
109
V2RerankResponse,
1110
V2RerankResponseResultsItem,
1211
V2RerankResponseResultsItemDocument,
@@ -18,7 +17,6 @@
1817
"V2ChatStreamRequestDocumentsItem",
1918
"V2ChatStreamRequestSafetyMode",
2019
"V2EmbedRequestTruncate",
21-
"V2RerankRequestDocumentsItem",
2220
"V2RerankResponse",
2321
"V2RerankResponseResultsItem",
2422
"V2RerankResponseResultsItemDocument",

0 commit comments

Comments
 (0)