diff --git a/requirements.txt b/requirements.txt index 975c9a3..7f9490a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -lightning_sdk >= 2025.09.11 +lightning_sdk >= 2025.09.16 diff --git a/src/litai/llm.py b/src/litai/llm.py index 9dfc9e0..54186dd 100644 --- a/src/litai/llm.py +++ b/src/litai/llm.py @@ -269,7 +269,7 @@ def chat( # noqa: D417 stream: bool = False, tools: Optional[Sequence[Union[LitTool, "StructuredTool"]]] = None, auto_call_tools: bool = False, - reasoning_effort: Optional[Literal["low", "medium", "high"]] = None, + reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None, **kwargs: Any, ) -> str: """Sends a message to the LLM and retrieves a response. @@ -290,13 +290,14 @@ def chat( # noqa: D417 categorized by conversation ID. full_response (bool): Whether the entire response should be returned from the chat. auto_call_tools (bool): Tools will be executed automatically whenever applicable. Defaults to False. - reasoning_effort (Optional[Literal["low", "medium", "high"]]): The level of reasoning effort for the model. + reasoning_effort (Optional[Literal["none", "low", "medium", "high"]]): + The level of reasoning effort for the model. **kwargs (Any): Additional keyword arguments Returns: str: The response from the LLM. """ - if reasoning_effort is not None and reasoning_effort not in ["low", "medium", "high"]: + if reasoning_effort is not None and reasoning_effort not in ["none", "low", "medium", "high"]: raise ValueError("reasoning_effort must be 'low', 'medium', 'high', or None") self._wait_for_model() lit_tools = LitTool.convert_tools(tools) diff --git a/tests/test_llm.py b/tests/test_llm.py index 0f45e66..d9eaea3 100644 --- a/tests/test_llm.py +++ b/tests/test_llm.py @@ -119,6 +119,7 @@ def test_llm_chat(mock_llm_class): system_prompt="You are a helpful assistant.", metadata={"user_api": "123456"}, my_kwarg="test-kwarg", + reasoning_effort="none", ) assert isinstance(response, str) @@ -134,7 +135,7 @@ def test_llm_chat(mock_llm_class): full_response=False, my_kwarg="test-kwarg", tools=None, - reasoning_effort=None, + reasoning_effort="none", ) test_kwargs = mock_llm_instance.chat.call_args.kwargs assert test_kwargs.get("my_kwarg") == "test-kwarg"