Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ aiohttp = "*"
schedule = "*"
uvloop = {version = "*", markers = "sys_platform == 'linux' or sys_platform == 'darwin'"}
winloop = {version = "*", markers = "sys_platform == 'win32'"}

[tool.poetry.scripts]
swarms = "swarms.cli.main:main"

Expand Down
69 changes: 62 additions & 7 deletions swarms/structs/multi_agent_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,15 +160,54 @@
)
router_system_prompt += self._create_boss_system_prompt()

# Conditionally enable structured outputs only on supported models
# Avoids errors on models like `gpt-3.5-turbo` which don't support json_schema
def _supports_structured_outputs(model_name: str) -> bool:
name = (model_name or "").lower()
# Models that DON'T support structured outputs (exclude these)
unsupported_models = [
"gpt-3.5-turbo",
"gpt-4-turbo",
"gpt-4",
"text-davinci",
"text-curie",
"text-babbage",
"text-ada",
"claude-2",
"claude-instant",
"claude-v1",
"gemini-pro-vision",
"text-bison",
"chat-bison",
"llama-2",
"llama-3",
"mistral-7b",
"mistral-small",
]

# If it's in the unsupported list, return False
if any(unsupported in name for unsupported in unsupported_models):
return False

# Otherwise, assume it supports structured outputs
# This includes newer Claude, Gemini, and OpenAI models
return True

# Build LiteLLM kwargs with conditional response_format
lite_llm_kwargs = {
"model_name": self.model,
"system_prompt": router_system_prompt,
"temperature": self.temperature,
"tool_choice": "auto",
"parallel_tool_calls": True,
}

if _supports_structured_outputs(self.model):
lite_llm_kwargs["response_format"] = MultipleHandOffsResponse

Check failure

Code scanning / Pyre

Incompatible parameter type Error

Incompatible parameter type [6]: In call dict.__setitem__, for 2nd positional argument, expected Union[float, str] but got Type[MultipleHandOffsResponse].

self.function_caller = LiteLLM(
model_name=self.model,
system_prompt=router_system_prompt,
temperature=self.temperature,
tool_choice="auto",
parallel_tool_calls=True,
response_format=MultipleHandOffsResponse,
*args,
**kwargs,
**{**lite_llm_kwargs, **kwargs},
)

def __repr__(self):
Expand Down Expand Up @@ -300,7 +339,7 @@
"""
Routes a task to the appropriate agent and returns their response.

Args:
task (str): The task to be routed.

Returns:
Expand All @@ -312,8 +351,24 @@
# Get boss decision using function calling
boss_response_str = self.function_caller.run(task)


# Handle JSON parsing with fallback for models without structured outputs
try:
boss_response_str = orjson.loads(boss_response_str)

Check failure

Code scanning / Pyre

Unbound name Error

Unbound name [10]: Name orjson is used but not defined in the current scope.
except (orjson.JSONDecodeError, TypeError):

Check failure

Code scanning / Pyre

Invalid except clause Error

Invalid except clause [66]: Exception handler type annotation unknown must extend BaseException.
# Fallback: route to first agent when JSON parsing fails
first_agent = list(self.agents.keys())[0]
boss_response_str = {
"handoffs": [{
"reasoning": "Fallback routing due to JSON parsing error",
"agent_name": first_agent,
"task": task
}]
}

boss_response_str = json.loads(boss_response_str)


if self.print_on:
formatter.print_panel(
json.dumps(boss_response_str, indent=4),
Expand Down
Loading