Skip to content

Commit 2a4cdea

Browse files
committed
SDK regeneration
1 parent d476799 commit 2a4cdea

32 files changed

+100
-193
lines changed

poetry.lock

Lines changed: 3 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "cohere"
3-
version = "5.0.0a11"
3+
version = "5.0.0a12"
44
description = ""
55
readme = "README.md"
66
authors = []

src/cohere/base_client.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,7 @@ def chat_stream(
128128
max_tokens: typing.Optional[int] = OMIT,
129129
k: typing.Optional[int] = OMIT,
130130
p: typing.Optional[float] = OMIT,
131+
seed: typing.Optional[float] = OMIT,
131132
frequency_penalty: typing.Optional[float] = OMIT,
132133
presence_penalty: typing.Optional[float] = OMIT,
133134
raw_prompting: typing.Optional[bool] = OMIT,
@@ -210,6 +211,8 @@ def chat_stream(
210211
- p: typing.Optional[float]. Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
211212
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
212213
214+
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
215+
213216
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
214217
215218
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
@@ -353,6 +356,8 @@ def chat_stream(
353356
_request["k"] = k
354357
if p is not OMIT:
355358
_request["p"] = p
359+
if seed is not OMIT:
360+
_request["seed"] = seed
356361
if frequency_penalty is not OMIT:
357362
_request["frequency_penalty"] = frequency_penalty
358363
if presence_penalty is not OMIT:
@@ -420,6 +425,7 @@ def chat(
420425
max_tokens: typing.Optional[int] = OMIT,
421426
k: typing.Optional[int] = OMIT,
422427
p: typing.Optional[float] = OMIT,
428+
seed: typing.Optional[float] = OMIT,
423429
frequency_penalty: typing.Optional[float] = OMIT,
424430
presence_penalty: typing.Optional[float] = OMIT,
425431
raw_prompting: typing.Optional[bool] = OMIT,
@@ -502,6 +508,8 @@ def chat(
502508
- p: typing.Optional[float]. Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
503509
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
504510
511+
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
512+
505513
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
506514
507515
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
@@ -588,6 +596,8 @@ def chat(
588596
_request["k"] = k
589597
if p is not OMIT:
590598
_request["p"] = p
599+
if seed is not OMIT:
600+
_request["seed"] = seed
591601
if frequency_penalty is not OMIT:
592602
_request["frequency_penalty"] = frequency_penalty
593603
if presence_penalty is not OMIT:
@@ -643,6 +653,7 @@ def generate_stream(
643653
max_tokens: typing.Optional[int] = OMIT,
644654
truncate: typing.Optional[GenerateStreamRequestTruncate] = OMIT,
645655
temperature: typing.Optional[float] = OMIT,
656+
seed: typing.Optional[float] = OMIT,
646657
preset: typing.Optional[str] = OMIT,
647658
end_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
648659
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
@@ -683,6 +694,8 @@ def generate_stream(
683694
- temperature: typing.Optional[float]. A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
684695
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
685696
697+
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
698+
686699
- preset: typing.Optional[str]. Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
687700
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
688701
@@ -751,6 +764,8 @@ def generate_stream(
751764
_request["truncate"] = truncate
752765
if temperature is not OMIT:
753766
_request["temperature"] = temperature
767+
if seed is not OMIT:
768+
_request["seed"] = seed
754769
if preset is not OMIT:
755770
_request["preset"] = preset
756771
if end_sequences is not OMIT:
@@ -823,6 +838,7 @@ def generate(
823838
max_tokens: typing.Optional[int] = OMIT,
824839
truncate: typing.Optional[GenerateRequestTruncate] = OMIT,
825840
temperature: typing.Optional[float] = OMIT,
841+
seed: typing.Optional[float] = OMIT,
826842
preset: typing.Optional[str] = OMIT,
827843
end_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
828844
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
@@ -863,6 +879,8 @@ def generate(
863879
- temperature: typing.Optional[float]. A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
864880
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
865881
882+
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
883+
866884
- preset: typing.Optional[str]. Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
867885
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
868886
@@ -917,6 +935,8 @@ def generate(
917935
_request["truncate"] = truncate
918936
if temperature is not OMIT:
919937
_request["temperature"] = temperature
938+
if seed is not OMIT:
939+
_request["seed"] = seed
920940
if preset is not OMIT:
921941
_request["preset"] = preset
922942
if end_sequences is not OMIT:
@@ -1608,6 +1628,7 @@ async def chat_stream(
16081628
max_tokens: typing.Optional[int] = OMIT,
16091629
k: typing.Optional[int] = OMIT,
16101630
p: typing.Optional[float] = OMIT,
1631+
seed: typing.Optional[float] = OMIT,
16111632
frequency_penalty: typing.Optional[float] = OMIT,
16121633
presence_penalty: typing.Optional[float] = OMIT,
16131634
raw_prompting: typing.Optional[bool] = OMIT,
@@ -1690,6 +1711,8 @@ async def chat_stream(
16901711
- p: typing.Optional[float]. Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
16911712
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
16921713
1714+
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
1715+
16931716
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
16941717
16951718
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
@@ -1833,6 +1856,8 @@ async def chat_stream(
18331856
_request["k"] = k
18341857
if p is not OMIT:
18351858
_request["p"] = p
1859+
if seed is not OMIT:
1860+
_request["seed"] = seed
18361861
if frequency_penalty is not OMIT:
18371862
_request["frequency_penalty"] = frequency_penalty
18381863
if presence_penalty is not OMIT:
@@ -1900,6 +1925,7 @@ async def chat(
19001925
max_tokens: typing.Optional[int] = OMIT,
19011926
k: typing.Optional[int] = OMIT,
19021927
p: typing.Optional[float] = OMIT,
1928+
seed: typing.Optional[float] = OMIT,
19031929
frequency_penalty: typing.Optional[float] = OMIT,
19041930
presence_penalty: typing.Optional[float] = OMIT,
19051931
raw_prompting: typing.Optional[bool] = OMIT,
@@ -1982,6 +2008,8 @@ async def chat(
19822008
- p: typing.Optional[float]. Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
19832009
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
19842010
2011+
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
2012+
19852013
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
19862014
19872015
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
@@ -2068,6 +2096,8 @@ async def chat(
20682096
_request["k"] = k
20692097
if p is not OMIT:
20702098
_request["p"] = p
2099+
if seed is not OMIT:
2100+
_request["seed"] = seed
20712101
if frequency_penalty is not OMIT:
20722102
_request["frequency_penalty"] = frequency_penalty
20732103
if presence_penalty is not OMIT:
@@ -2123,6 +2153,7 @@ async def generate_stream(
21232153
max_tokens: typing.Optional[int] = OMIT,
21242154
truncate: typing.Optional[GenerateStreamRequestTruncate] = OMIT,
21252155
temperature: typing.Optional[float] = OMIT,
2156+
seed: typing.Optional[float] = OMIT,
21262157
preset: typing.Optional[str] = OMIT,
21272158
end_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
21282159
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
@@ -2163,6 +2194,8 @@ async def generate_stream(
21632194
- temperature: typing.Optional[float]. A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
21642195
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
21652196
2197+
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
2198+
21662199
- preset: typing.Optional[str]. Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
21672200
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
21682201
@@ -2231,6 +2264,8 @@ async def generate_stream(
22312264
_request["truncate"] = truncate
22322265
if temperature is not OMIT:
22332266
_request["temperature"] = temperature
2267+
if seed is not OMIT:
2268+
_request["seed"] = seed
22342269
if preset is not OMIT:
22352270
_request["preset"] = preset
22362271
if end_sequences is not OMIT:
@@ -2303,6 +2338,7 @@ async def generate(
23032338
max_tokens: typing.Optional[int] = OMIT,
23042339
truncate: typing.Optional[GenerateRequestTruncate] = OMIT,
23052340
temperature: typing.Optional[float] = OMIT,
2341+
seed: typing.Optional[float] = OMIT,
23062342
preset: typing.Optional[str] = OMIT,
23072343
end_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
23082344
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
@@ -2343,6 +2379,8 @@ async def generate(
23432379
- temperature: typing.Optional[float]. A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
23442380
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
23452381
2382+
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
2383+
23462384
- preset: typing.Optional[str]. Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
23472385
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
23482386
@@ -2397,6 +2435,8 @@ async def generate(
23972435
_request["truncate"] = truncate
23982436
if temperature is not OMIT:
23992437
_request["temperature"] = temperature
2438+
if seed is not OMIT:
2439+
_request["seed"] = seed
24002440
if preset is not OMIT:
24012441
_request["preset"] = preset
24022442
if end_sequences is not OMIT:

src/cohere/core/client_wrapper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def get_headers(self) -> typing.Dict[str, str]:
2323
headers: typing.Dict[str, str] = {
2424
"X-Fern-Language": "Python",
2525
"X-Fern-SDK-Name": "cohere",
26-
"X-Fern-SDK-Version": "5.0.0a11",
26+
"X-Fern-SDK-Version": "5.0.0a12",
2727
}
2828
if self._client_name is not None:
2929
headers["X-Client-Name"] = self._client_name

src/cohere/embed_jobs/types/create_embed_job_request_truncate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22

33
import typing
44

5-
CreateEmbedJobRequestTruncate = typing.Literal["START", "END"]
5+
CreateEmbedJobRequestTruncate = typing.Union[typing.AnyStr, typing.Literal["START", "END"]]

src/cohere/types/auth_token_type.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22

33
import typing
44

5-
AuthTokenType = typing.Literal["bearer", "basic", "noscheme"]
5+
AuthTokenType = typing.Union[typing.AnyStr, typing.Literal["bearer", "basic", "noscheme"]]

src/cohere/types/chat_message_role.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22

33
import typing
44

5-
ChatMessageRole = typing.Literal["CHATBOT", "SYSTEM", "USER"]
5+
ChatMessageRole = typing.Union[typing.AnyStr, typing.Literal["CHATBOT", "SYSTEM", "USER"]]

src/cohere/types/chat_request_citation_quality.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22

33
import typing
44

5-
ChatRequestCitationQuality = typing.Literal["fast", "accurate"]
5+
ChatRequestCitationQuality = typing.Union[typing.AnyStr, typing.Literal["fast", "accurate"]]

src/cohere/types/chat_request_prompt_truncation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22

33
import typing
44

5-
ChatRequestPromptTruncation = typing.Literal["OFF", "AUTO", "AUTO_PRESERVE_ORDER"]
5+
ChatRequestPromptTruncation = typing.Union[typing.AnyStr, typing.Literal["OFF", "AUTO", "AUTO_PRESERVE_ORDER"]]

src/cohere/types/chat_stream_end_event_finish_reason.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,6 @@
22

33
import typing
44

5-
ChatStreamEndEventFinishReason = typing.Literal["COMPLETE", "ERROR_LIMIT", "MAX_TOKENS", "ERROR", "ERROR_TOXIC"]
5+
ChatStreamEndEventFinishReason = typing.Union[
6+
typing.AnyStr, typing.Literal["COMPLETE", "ERROR_LIMIT", "MAX_TOKENS", "ERROR", "ERROR_TOXIC"]
7+
]

0 commit comments

Comments
 (0)