Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
lightning_sdk >= 2025.09.11
lightning_sdk >= 2025.09.16
7 changes: 4 additions & 3 deletions src/litai/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def chat( # noqa: D417
stream: bool = False,
tools: Optional[Sequence[Union[LitTool, "StructuredTool"]]] = None,
auto_call_tools: bool = False,
reasoning_effort: Optional[Literal["low", "medium", "high"]] = None,
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
**kwargs: Any,
) -> str:
"""Sends a message to the LLM and retrieves a response.
Expand All @@ -290,13 +290,14 @@ def chat( # noqa: D417
categorized by conversation ID.
full_response (bool): Whether the entire response should be returned from the chat.
auto_call_tools (bool): Tools will be executed automatically whenever applicable. Defaults to False.
reasoning_effort (Optional[Literal["low", "medium", "high"]]): The level of reasoning effort for the model.
reasoning_effort (Optional[Literal["none", "low", "medium", "high"]]):
The level of reasoning effort for the model.
**kwargs (Any): Additional keyword arguments

Returns:
str: The response from the LLM.
"""
if reasoning_effort is not None and reasoning_effort not in ["low", "medium", "high"]:
if reasoning_effort is not None and reasoning_effort not in ["none", "low", "medium", "high"]:
raise ValueError("reasoning_effort must be 'low', 'medium', 'high', or None")
self._wait_for_model()
lit_tools = LitTool.convert_tools(tools)
Expand Down
3 changes: 2 additions & 1 deletion tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ def test_llm_chat(mock_llm_class):
system_prompt="You are a helpful assistant.",
metadata={"user_api": "123456"},
my_kwarg="test-kwarg",
reasoning_effort="none",
)

assert isinstance(response, str)
Expand All @@ -134,7 +135,7 @@ def test_llm_chat(mock_llm_class):
full_response=False,
my_kwarg="test-kwarg",
tools=None,
reasoning_effort=None,
reasoning_effort="none",
)
test_kwargs = mock_llm_instance.chat.call_args.kwargs
assert test_kwargs.get("my_kwarg") == "test-kwarg"
Expand Down
Loading