Skip to content

Commit a370729

Browse files
authored
feat: allow to pass "none" as reasoning effort (#64)
* allow to pass none as reasoning effort * bump sdk version
1 parent 00a3eca commit a370729

File tree

3 files changed

+7
-5
lines changed

3 files changed

+7
-5
lines changed

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
lightning_sdk >= 2025.09.11
1+
lightning_sdk >= 2025.09.16

src/litai/llm.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ def chat( # noqa: D417
269269
stream: bool = False,
270270
tools: Optional[Sequence[Union[LitTool, "StructuredTool"]]] = None,
271271
auto_call_tools: bool = False,
272-
reasoning_effort: Optional[Literal["low", "medium", "high"]] = None,
272+
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
273273
**kwargs: Any,
274274
) -> str:
275275
"""Sends a message to the LLM and retrieves a response.
@@ -290,13 +290,14 @@ def chat( # noqa: D417
290290
categorized by conversation ID.
291291
full_response (bool): Whether the entire response should be returned from the chat.
292292
auto_call_tools (bool): Tools will be executed automatically whenever applicable. Defaults to False.
293-
reasoning_effort (Optional[Literal["low", "medium", "high"]]): The level of reasoning effort for the model.
293+
reasoning_effort (Optional[Literal["none", "low", "medium", "high"]]):
294+
The level of reasoning effort for the model.
294295
**kwargs (Any): Additional keyword arguments
295296
296297
Returns:
297298
str: The response from the LLM.
298299
"""
299-
if reasoning_effort is not None and reasoning_effort not in ["low", "medium", "high"]:
300+
if reasoning_effort is not None and reasoning_effort not in ["none", "low", "medium", "high"]:
300301
raise ValueError("reasoning_effort must be 'low', 'medium', 'high', or None")
301302
self._wait_for_model()
302303
lit_tools = LitTool.convert_tools(tools)

tests/test_llm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,7 @@ def test_llm_chat(mock_llm_class):
119119
system_prompt="You are a helpful assistant.",
120120
metadata={"user_api": "123456"},
121121
my_kwarg="test-kwarg",
122+
reasoning_effort="none",
122123
)
123124

124125
assert isinstance(response, str)
@@ -134,7 +135,7 @@ def test_llm_chat(mock_llm_class):
134135
full_response=False,
135136
my_kwarg="test-kwarg",
136137
tools=None,
137-
reasoning_effort=None,
138+
reasoning_effort="none",
138139
)
139140
test_kwargs = mock_llm_instance.chat.call_args.kwargs
140141
assert test_kwargs.get("my_kwarg") == "test-kwarg"

0 commit comments

Comments
 (0)