Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ orjson = "*"
schedule = "*"
uvloop = {version = "*", markers = "sys_platform == 'linux' or sys_platform == 'darwin'"}
winloop = {version = "*", markers = "sys_platform == 'win32'"}

google-generativeai = "*"
[tool.poetry.scripts]
swarms = "swarms.cli.main:main"

Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,4 @@ orjson
schedule
uvloop; sys_platform == 'linux' or sys_platform == 'darwin' # linux or macos only
winloop; sys_platform == 'win32' # windows only
google-generativeai
52 changes: 44 additions & 8 deletions swarms/structs/multi_agent_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,15 +160,39 @@
)
router_system_prompt += self._create_boss_system_prompt()

# Conditionally enable structured outputs only on supported models
# Avoids errors on models like `gpt-3.5-turbo` which don't support json_schema
def _supports_structured_outputs(model_name: str) -> bool:
name = (model_name or "").lower()
return any(
prefix in name
for prefix in [
"gpt-4.1",
"openai/gpt-4.1",
"gpt-4o",
"openai/gpt-4o",
"o3-",
"openai/o3-",
"o4-",
"openai/o4-",
]
)

# Build LiteLLM kwargs with conditional response_format
lite_llm_kwargs = {
"model_name": self.model,
"system_prompt": router_system_prompt,
"temperature": self.temperature,
"tool_choice": "auto",
"parallel_tool_calls": True,
}

if _supports_structured_outputs(self.model):
lite_llm_kwargs["response_format"] = MultipleHandOffsResponse

Check failure

Code scanning / Pyre

Incompatible parameter type Error

Incompatible parameter type [6]: In call dict.__setitem__, for 2nd positional argument, expected Union[float, str] but got Type[MultipleHandOffsResponse].

self.function_caller = LiteLLM(
model_name=self.model,
system_prompt=router_system_prompt,
temperature=self.temperature,
tool_choice="auto",
parallel_tool_calls=True,
response_format=MultipleHandOffsResponse,
*args,
**kwargs,
**{**lite_llm_kwargs, **kwargs},
)

def __repr__(self):
Expand Down Expand Up @@ -312,7 +336,19 @@
# Get boss decision using function calling
boss_response_str = self.function_caller.run(task)

boss_response_str = orjson.loads(boss_response_str)
# Handle JSON parsing with fallback for models without structured outputs
try:
boss_response_str = orjson.loads(boss_response_str)

Check failure

Code scanning / Pyre

Unbound name Error

Unbound name [10]: Name orjson is used but not defined in the current scope.
except (orjson.JSONDecodeError, TypeError):

Check failure

Code scanning / Pyre

Invalid except clause Error

Invalid except clause [66]: Exception handler type annotation unknown must extend BaseException.
# Fallback: route to first agent when JSON parsing fails
first_agent = list(self.agents.keys())[0]
boss_response_str = {
"handoffs": [{
"reasoning": "Fallback routing due to JSON parsing error",
"agent_name": first_agent,
"task": task
}]
}

if self.print_on:
formatter.print_panel(
Expand Down
Loading