Skip to content

Commit 96f29a9

Browse files
SDK regeneration (#421)
Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
1 parent 681424e commit 96f29a9

File tree

3 files changed

+24
-2
lines changed

3 files changed

+24
-2
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "cohere"
3-
version = "5.0.0"
3+
version = "5.0.1"
44
description = ""
55
readme = "README.md"
66
authors = []

src/cohere/base_client.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ def chat_stream(
129129
k: typing.Optional[int] = OMIT,
130130
p: typing.Optional[float] = OMIT,
131131
seed: typing.Optional[float] = OMIT,
132+
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
132133
frequency_penalty: typing.Optional[float] = OMIT,
133134
presence_penalty: typing.Optional[float] = OMIT,
134135
raw_prompting: typing.Optional[bool] = OMIT,
@@ -213,6 +214,8 @@ def chat_stream(
213214
214215
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
215216
217+
- stop_sequences: typing.Optional[typing.Sequence[str]]. A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
218+
216219
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
217220
218221
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
@@ -295,6 +298,7 @@ def chat_stream(
295298
k=1,
296299
p=1.1,
297300
seed=1.1,
301+
stop_sequences=["string"],
298302
connectors_search_options=ChatStreamRequestConnectorsSearchOptions(
299303
model={"key": "value"},
300304
temperature={"key": "value"},
@@ -358,6 +362,8 @@ def chat_stream(
358362
_request["p"] = p
359363
if seed is not OMIT:
360364
_request["seed"] = seed
365+
if stop_sequences is not OMIT:
366+
_request["stop_sequences"] = stop_sequences
361367
if frequency_penalty is not OMIT:
362368
_request["frequency_penalty"] = frequency_penalty
363369
if presence_penalty is not OMIT:
@@ -426,6 +432,7 @@ def chat(
426432
k: typing.Optional[int] = OMIT,
427433
p: typing.Optional[float] = OMIT,
428434
seed: typing.Optional[float] = OMIT,
435+
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
429436
frequency_penalty: typing.Optional[float] = OMIT,
430437
presence_penalty: typing.Optional[float] = OMIT,
431438
raw_prompting: typing.Optional[bool] = OMIT,
@@ -510,6 +517,8 @@ def chat(
510517
511518
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
512519
520+
- stop_sequences: typing.Optional[typing.Sequence[str]]. A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
521+
513522
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
514523
515524
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
@@ -598,6 +607,8 @@ def chat(
598607
_request["p"] = p
599608
if seed is not OMIT:
600609
_request["seed"] = seed
610+
if stop_sequences is not OMIT:
611+
_request["stop_sequences"] = stop_sequences
601612
if frequency_penalty is not OMIT:
602613
_request["frequency_penalty"] = frequency_penalty
603614
if presence_penalty is not OMIT:
@@ -1629,6 +1640,7 @@ async def chat_stream(
16291640
k: typing.Optional[int] = OMIT,
16301641
p: typing.Optional[float] = OMIT,
16311642
seed: typing.Optional[float] = OMIT,
1643+
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
16321644
frequency_penalty: typing.Optional[float] = OMIT,
16331645
presence_penalty: typing.Optional[float] = OMIT,
16341646
raw_prompting: typing.Optional[bool] = OMIT,
@@ -1713,6 +1725,8 @@ async def chat_stream(
17131725
17141726
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
17151727
1728+
- stop_sequences: typing.Optional[typing.Sequence[str]]. A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
1729+
17161730
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
17171731
17181732
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
@@ -1795,6 +1809,7 @@ async def chat_stream(
17951809
k=1,
17961810
p=1.1,
17971811
seed=1.1,
1812+
stop_sequences=["string"],
17981813
connectors_search_options=ChatStreamRequestConnectorsSearchOptions(
17991814
model={"key": "value"},
18001815
temperature={"key": "value"},
@@ -1858,6 +1873,8 @@ async def chat_stream(
18581873
_request["p"] = p
18591874
if seed is not OMIT:
18601875
_request["seed"] = seed
1876+
if stop_sequences is not OMIT:
1877+
_request["stop_sequences"] = stop_sequences
18611878
if frequency_penalty is not OMIT:
18621879
_request["frequency_penalty"] = frequency_penalty
18631880
if presence_penalty is not OMIT:
@@ -1926,6 +1943,7 @@ async def chat(
19261943
k: typing.Optional[int] = OMIT,
19271944
p: typing.Optional[float] = OMIT,
19281945
seed: typing.Optional[float] = OMIT,
1946+
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
19291947
frequency_penalty: typing.Optional[float] = OMIT,
19301948
presence_penalty: typing.Optional[float] = OMIT,
19311949
raw_prompting: typing.Optional[bool] = OMIT,
@@ -2010,6 +2028,8 @@ async def chat(
20102028
20112029
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
20122030
2031+
- stop_sequences: typing.Optional[typing.Sequence[str]]. A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
2032+
20132033
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
20142034
20152035
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
@@ -2098,6 +2118,8 @@ async def chat(
20982118
_request["p"] = p
20992119
if seed is not OMIT:
21002120
_request["seed"] = seed
2121+
if stop_sequences is not OMIT:
2122+
_request["stop_sequences"] = stop_sequences
21012123
if frequency_penalty is not OMIT:
21022124
_request["frequency_penalty"] = frequency_penalty
21032125
if presence_penalty is not OMIT:

src/cohere/core/client_wrapper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def get_headers(self) -> typing.Dict[str, str]:
2323
headers: typing.Dict[str, str] = {
2424
"X-Fern-Language": "Python",
2525
"X-Fern-SDK-Name": "cohere",
26-
"X-Fern-SDK-Version": "5.0.0",
26+
"X-Fern-SDK-Version": "5.0.1",
2727
}
2828
if self._client_name is not None:
2929
headers["X-Client-Name"] = self._client_name

0 commit comments

Comments
 (0)