Skip to content

Commit 57c1855

Browse files
committed
SDK regeneration
1 parent 165ba6c commit 57c1855

File tree

10 files changed

+39
-90
lines changed

10 files changed

+39
-90
lines changed

poetry.lock

Lines changed: 9 additions & 9 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "cohere"
3-
version = "5.0.0a7"
3+
version = "5.0.0a8"
44
description = ""
55
readme = "README.md"
66
authors = []

src/cohere/__init__.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -109,10 +109,8 @@
109109
SummarizeResponse,
110110
TokenizeResponse,
111111
Tool,
112-
ToolDefinition,
113-
ToolDefinitionInputsItem,
114-
ToolDefinitionOutputsItem,
115112
ToolInput,
113+
ToolParameterDefinitionsValue,
116114
UpdateConnectorResponse,
117115
)
118116
from .errors import BadRequestError, ForbiddenError, InternalServerError, NotFoundError, TooManyRequestsError
@@ -252,10 +250,8 @@
252250
"TokenizeResponse",
253251
"TooManyRequestsError",
254252
"Tool",
255-
"ToolDefinition",
256-
"ToolDefinitionInputsItem",
257-
"ToolDefinitionOutputsItem",
258253
"ToolInput",
254+
"ToolParameterDefinitionsValue",
259255
"UpdateConnectorResponse",
260256
"connectors",
261257
"datasets",

src/cohere/base_client.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,7 @@ def chat_stream(
127127
p: typing.Optional[float] = OMIT,
128128
frequency_penalty: typing.Optional[float] = OMIT,
129129
presence_penalty: typing.Optional[float] = OMIT,
130+
raw_prompting: typing.Optional[bool] = OMIT,
130131
request_options: typing.Optional[RequestOptions] = None,
131132
) -> typing.Iterator[StreamedChatResponse]:
132133
"""
@@ -204,6 +205,8 @@ def chat_stream(
204205
205206
Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
206207
208+
- raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing.
209+
207210
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
208211
"""
209212
_request: typing.Dict[str, typing.Any] = {"message": message, "stream": True}
@@ -235,6 +238,8 @@ def chat_stream(
235238
_request["frequency_penalty"] = frequency_penalty
236239
if presence_penalty is not OMIT:
237240
_request["presence_penalty"] = presence_penalty
241+
if raw_prompting is not OMIT:
242+
_request["raw_prompting"] = raw_prompting
238243
with self._client_wrapper.httpx_client.stream(
239244
"POST",
240245
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "chat"),
@@ -292,6 +297,7 @@ def chat(
292297
p: typing.Optional[float] = OMIT,
293298
frequency_penalty: typing.Optional[float] = OMIT,
294299
presence_penalty: typing.Optional[float] = OMIT,
300+
raw_prompting: typing.Optional[bool] = OMIT,
295301
request_options: typing.Optional[RequestOptions] = None,
296302
) -> NonStreamedChatResponse:
297303
"""
@@ -369,6 +375,8 @@ def chat(
369375
370376
Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
371377
378+
- raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing.
379+
372380
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
373381
---
374382
from cohere import ChatMessage, ChatMessageRole, ChatRequestPromptTruncation
@@ -427,6 +435,8 @@ def chat(
427435
_request["frequency_penalty"] = frequency_penalty
428436
if presence_penalty is not OMIT:
429437
_request["presence_penalty"] = presence_penalty
438+
if raw_prompting is not OMIT:
439+
_request["raw_prompting"] = raw_prompting
430440
_response = self._client_wrapper.httpx_client.request(
431441
"POST",
432442
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "chat"),
@@ -1368,6 +1378,7 @@ async def chat_stream(
13681378
p: typing.Optional[float] = OMIT,
13691379
frequency_penalty: typing.Optional[float] = OMIT,
13701380
presence_penalty: typing.Optional[float] = OMIT,
1381+
raw_prompting: typing.Optional[bool] = OMIT,
13711382
request_options: typing.Optional[RequestOptions] = None,
13721383
) -> typing.AsyncIterator[StreamedChatResponse]:
13731384
"""
@@ -1445,6 +1456,8 @@ async def chat_stream(
14451456
14461457
Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
14471458
1459+
- raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing.
1460+
14481461
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
14491462
"""
14501463
_request: typing.Dict[str, typing.Any] = {"message": message, "stream": True}
@@ -1476,6 +1489,8 @@ async def chat_stream(
14761489
_request["frequency_penalty"] = frequency_penalty
14771490
if presence_penalty is not OMIT:
14781491
_request["presence_penalty"] = presence_penalty
1492+
if raw_prompting is not OMIT:
1493+
_request["raw_prompting"] = raw_prompting
14791494
async with self._client_wrapper.httpx_client.stream(
14801495
"POST",
14811496
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "chat"),
@@ -1533,6 +1548,7 @@ async def chat(
15331548
p: typing.Optional[float] = OMIT,
15341549
frequency_penalty: typing.Optional[float] = OMIT,
15351550
presence_penalty: typing.Optional[float] = OMIT,
1551+
raw_prompting: typing.Optional[bool] = OMIT,
15361552
request_options: typing.Optional[RequestOptions] = None,
15371553
) -> NonStreamedChatResponse:
15381554
"""
@@ -1610,6 +1626,8 @@ async def chat(
16101626
16111627
Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
16121628
1629+
- raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing.
1630+
16131631
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
16141632
---
16151633
from cohere import ChatMessage, ChatMessageRole, ChatRequestPromptTruncation
@@ -1668,6 +1686,8 @@ async def chat(
16681686
_request["frequency_penalty"] = frequency_penalty
16691687
if presence_penalty is not OMIT:
16701688
_request["presence_penalty"] = presence_penalty
1689+
if raw_prompting is not OMIT:
1690+
_request["raw_prompting"] = raw_prompting
16711691
_response = await self._client_wrapper.httpx_client.request(
16721692
"POST",
16731693
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "chat"),

src/cohere/core/client_wrapper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def get_headers(self) -> typing.Dict[str, str]:
2121
headers: typing.Dict[str, str] = {
2222
"X-Fern-Language": "Python",
2323
"X-Fern-SDK-Name": "cohere",
24-
"X-Fern-SDK-Version": "5.0.0a7",
24+
"X-Fern-SDK-Version": "5.0.0a8",
2525
}
2626
if self._client_name is not None:
2727
headers["X-Client-Name"] = self._client_name

src/cohere/types/__init__.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -112,10 +112,8 @@
112112
from .summarize_response import SummarizeResponse
113113
from .tokenize_response import TokenizeResponse
114114
from .tool import Tool
115-
from .tool_definition import ToolDefinition
116-
from .tool_definition_inputs_item import ToolDefinitionInputsItem
117-
from .tool_definition_outputs_item import ToolDefinitionOutputsItem
118115
from .tool_input import ToolInput
116+
from .tool_parameter_definitions_value import ToolParameterDefinitionsValue
119117
from .update_connector_response import UpdateConnectorResponse
120118

121119
__all__ = [
@@ -227,9 +225,7 @@
227225
"SummarizeResponse",
228226
"TokenizeResponse",
229227
"Tool",
230-
"ToolDefinition",
231-
"ToolDefinitionInputsItem",
232-
"ToolDefinitionOutputsItem",
233228
"ToolInput",
229+
"ToolParameterDefinitionsValue",
234230
"UpdateConnectorResponse",
235231
]

src/cohere/types/tool.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import typing
55

66
from ..core.datetime_utils import serialize_datetime
7-
from .tool_definition import ToolDefinition
7+
from .tool_parameter_definitions_value import ToolParameterDefinitionsValue
88

99
try:
1010
import pydantic.v1 as pydantic # type: ignore
@@ -14,7 +14,8 @@
1414

1515
class Tool(pydantic.BaseModel):
1616
name: str
17-
definition: typing.Optional[ToolDefinition] = None
17+
description: str
18+
parameter_definitions: typing.Optional[typing.Dict[str, ToolParameterDefinitionsValue]] = None
1819

1920
def json(self, **kwargs: typing.Any) -> str:
2021
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}

src/cohere/types/tool_definition.py

Lines changed: 0 additions & 32 deletions
This file was deleted.

src/cohere/types/tool_definition_outputs_item.py

Lines changed: 0 additions & 31 deletions
This file was deleted.

src/cohere/types/tool_definition_inputs_item.py renamed to src/cohere/types/tool_parameter_definitions_value.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@
1111
import pydantic # type: ignore
1212

1313

14-
class ToolDefinitionInputsItem(pydantic.BaseModel):
15-
name: str
14+
class ToolParameterDefinitionsValue(pydantic.BaseModel):
1615
description: str
1716
type: str
1817
required: typing.Optional[bool] = None

0 commit comments

Comments
 (0)