@@ -59,15 +59,31 @@ def build_team_agent(payload: Dict, agents: List[Agent] = None, api_key: Text =
5959 payload_agents = agents
6060 if payload_agents is None :
6161 payload_agents = []
62- for i , agent in enumerate (agents_dict ):
62+ # Use parallel agent fetching with ThreadPoolExecutor for better performance
63+ from concurrent .futures import ThreadPoolExecutor , as_completed
64+
65+ def fetch_agent (agent_data ):
66+ """Fetch a single agent by ID with error handling"""
6367 try :
64- payload_agents . append ( AgentFactory .get (agent ["assetId" ]) )
65- except Exception :
68+ return AgentFactory .get (agent_data ["assetId" ])
69+ except Exception as e :
6670 logging .warning (
67- f"Agent { agent ['assetId' ]} not found. Make sure it exists or you have access to it. "
68- "If you think this is an error, please contact the administrators."
71+ f"Agent { agent_data ['assetId' ]} not found. Make sure it exists or you have access to it. "
72+ "If you think this is an error, please contact the administrators. Error: {e} "
6973 )
70- continue
74+ return None
75+
76+ # Fetch all agents in parallel (only if there are agents to fetch)
77+ if len (agents_dict ) > 0 :
78+ with ThreadPoolExecutor (max_workers = min (len (agents_dict ), 10 )) as executor :
79+ # Submit all agent fetch tasks
80+ future_to_agent = {executor .submit (fetch_agent , agent ): agent for agent in agents_dict }
81+
82+ # Collect results as they complete
83+ for future in as_completed (future_to_agent ):
84+ agent_result = future .result ()
85+ if agent_result is not None :
86+ payload_agents .append (agent_result )
7187
7288 # Ensure custom classes are instantiated: for compatibility with backend return format
7389 inspectors = []
@@ -90,6 +106,15 @@ def build_team_agent(payload: Dict, agents: List[Agent] = None, api_key: Text =
90106 # Get LLMs from tools if present
91107 supervisor_llm = None
92108 mentalist_llm = None
109+
110+ # Cache for models to avoid duplicate fetching of the same model ID
111+ model_cache = {}
112+
113+ def get_cached_model (model_id : str ) -> any :
114+ """Get model from cache or fetch if not cached"""
115+ if model_id not in model_cache :
116+ model_cache [model_id ] = ModelFactory .get (model_id , api_key = api_key , use_cache = True )
117+ return model_cache [model_id ]
93118
94119 # First check if we have direct LLM objects in the payload
95120 if "supervisor_llm" in payload :
@@ -100,14 +125,8 @@ def build_team_agent(payload: Dict, agents: List[Agent] = None, api_key: Text =
100125 elif "tools" in payload :
101126 for tool in payload ["tools" ]:
102127 if tool ["type" ] == "llm" :
103- try :
104- llm = ModelFactory .get (payload ["llmId" ], api_key = api_key )
105- except Exception :
106- logging .warning (
107- f"LLM { payload ['llmId' ]} not found. Make sure it exists or you have access to it. "
108- "If you think this is an error, please contact the administrators."
109- )
110- continue
128+ # Use cached model fetching to avoid duplicate API calls
129+ llm = get_cached_model (payload ["llmId" ])
111130 # Set parameters from the tool
112131 if "parameters" in tool :
113132 # Apply all parameters directly to the LLM properties
@@ -258,7 +277,7 @@ def build_team_agent_from_yaml(yaml_code: str, llm_id: str, api_key: str, team_i
258277 team_name = system_data .get ("name" , "" )
259278 team_description = system_data .get ("description" , "" )
260279 team_instructions = system_data .get ("instructions" , "" )
261- llm = ModelFactory .get (llm_id )
280+ llm = ModelFactory .get (llm_id , use_cache = True )
262281 # Create agent mapping by name for easier task assignment
263282 agents_mapping = {}
264283 agent_objs = []
0 commit comments