Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,6 @@
def simulate_agent_discovery():
"""Simulate how an agent would use the discovery tool."""

# Create a sample agent that will use the discovery tool
Agent(
agent_name="ProjectCoordinator",
agent_description="Coordinates projects and assigns tasks to other agents",
system_prompt="You are a project coordinator who helps organize work and delegate tasks to the most appropriate team members. You can discover information about other agents to make better decisions.",
model_name="gpt-4o-mini",
temperature=0.4,
)

# Create the AOP cluster
aop = AOP(
server_name="Project Team",
Expand Down
8 changes: 0 additions & 8 deletions examples/aop_examples/example_new_agent_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,12 @@

import json
import asyncio
from swarms.structs.aop import AOPCluster
from swarms.tools.mcp_client_tools import execute_tool_call_simple


async def demonstrate_new_agent_tools():
"""Demonstrate the new agent information tools."""

# Create AOP cluster connection
AOPCluster(
urls=["http://localhost:5932/mcp"],
transport="streamable-http",
)

print("🔧 New AOP Agent Information Tools Demo")
print("=" * 50)
print()
Expand Down Expand Up @@ -77,7 +70,6 @@ async def demonstrate_new_agent_tools():
if isinstance(result, list) and len(result) > 0:
data = result[0]
if data.get("success"):
data.get("agent_info", {})
discovery_info = data.get("discovery_info", {})
print(
f" Agent: {discovery_info.get('agent_name', 'Unknown')}"
Expand Down
1 change: 0 additions & 1 deletion examples/demos/legal/legal_swarm.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,6 @@ def create_pdf_from_string(
"""
try:
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from reportlab.lib.styles import (
getSampleStyleSheet,
ParagraphStyle,
Expand Down
2 changes: 1 addition & 1 deletion examples/guides/graphworkflow_guide/quick_start_guide.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def step_4_advanced_patterns():
print("\n📊 Workflow structure:")
try:
advanced_workflow.visualize_simple()
except:
except Exception:
print(" (Text visualization not available)")

# Execute advanced workflow
Expand Down
4 changes: 2 additions & 2 deletions examples/guides/graphworkflow_guide/setup_and_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def test_basic_import() -> bool:
print("\n🧪 Testing basic GraphWorkflow import...")

try:
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.graph_workflow import GraphWorkflow # noqa: F401

print("✅ GraphWorkflow imported successfully")
return True
Expand All @@ -123,7 +123,7 @@ def test_agent_import() -> bool:
print("\n🧪 Testing Agent import...")

try:
from swarms import Agent
from swarms import Agent # noqa: F401

print("✅ Agent imported successfully")
return True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def test_file_save_load():
try:
os.remove("test_workflow.json")
print("\n🧹 Cleaned up test file")
except:
except Exception:
pass


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1202,7 +1202,7 @@ def animate(frame):
self.fig.canvas.manager.window.wm_attributes(
"-topmost", 0
)
except:
except Exception:
pass # Not all backends support this

plt.show(block=False)
Expand Down Expand Up @@ -1535,7 +1535,7 @@ def run(
if with_visualization and self.fig is not None:
try:
self.update_visualization()
except:
except Exception:
pass # Ignore visualization errors

# Print status every 10 seconds
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def main():
if simulation.fig is not None:
try:
simulation.update_visualization()
except:
except Exception:
pass # Ignore visualization errors

# Check if we have enough conversations to make it interesting
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@

# Third-party model imports
try:
import timm
from segment_anything import (
SamAutomaticMaskGenerator,
sam_model_registry,
Expand Down
2 changes: 1 addition & 1 deletion examples/utils/agent_loader/multi_agents_loader_demo.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from swarms.utils import load_agents_from_markdown
from swarms.structs.sequential_workflow import SequentialWorkflow

agents = load_agents_from_markdown(
[
Expand All @@ -9,7 +10,6 @@
)

# Example 3: Use agents in a workflow
from swarms.structs.sequential_workflow import SequentialWorkflow

workflow = SequentialWorkflow(agents=agents, max_loops=1)

Expand Down
2 changes: 1 addition & 1 deletion scripts/docker/test_docker.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def test_swarms_import() -> Dict[str, Any]:
)

# Test basic functionality
from swarms import Agent
from swarms import Agent # noqa: F401

print(" Agent class imported successfully")

Expand Down
2 changes: 1 addition & 1 deletion swarms/structs/multi_model_gpu_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
# Try to import transformers, but don't fail if not available
try:
import transformers
from transformers import AutoModel, AutoTokenizer
from transformers import AutoModel

TRANSFORMERS_AVAILABLE = True
except ImportError:
Expand Down
1 change: 0 additions & 1 deletion swarms/tools/py_func_to_openai_func_str.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
Type,
TypeVar,
Union,
get_args,
)

from pydantic import BaseModel, Field
Expand Down
1 change: 0 additions & 1 deletion swarms/utils/audio_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,6 @@ def process_audio_with_model(
from litellm import (
completion,
supports_audio_input,
supports_audio_output,
)

if not supports_audio_input(model):
Expand Down
5 changes: 2 additions & 3 deletions tests/agent/agents/test_agent_logging.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
from unittest.mock import MagicMock
import unittest
from swarms.structs.agent import Agent
from swarms.tools.tool_parse_exec import parse_and_execute_json

# Mock parse_and_execute_json for testing
parse_and_execute_json = MagicMock()
parse_and_execute_json.return_value = {
mock_parse_and_execute_json = MagicMock()
mock_parse_and_execute_json.return_value = {
"tool_name": "calculator",
"args": {"numbers": [2, 2]},
"output": "4",
Expand Down
4 changes: 2 additions & 2 deletions tests/agent/benchmark_agent/test_auto_test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def _get_swarms_version(self) -> str:
import swarms

return swarms.__version__
except:
except Exception:
return "Unknown"

def _get_system_info(self) -> SwarmSystemInfo:
Expand Down Expand Up @@ -185,7 +185,7 @@ def _get_dependencies_info(self) -> str:
for dist in pkg_resources.working_set:
deps.append(f"- {dist.key} {dist.version}")
return "\n".join(deps)
except:
except Exception:
return "Unable to fetch dependency information"

# First, add this method to your SwarmsIssueReporter class
Expand Down
101 changes: 87 additions & 14 deletions tests/aop/aop_benchmark.py
Original file line number Diff line number Diff line change
@@ -1,36 +1,62 @@
#!/usr/bin/env python3
"""
AOP Framework Benchmarking Suite
This comprehensive benchmarking suite tests the scaling laws of the AOP (Agent Orchestration Platform)
framework by measuring latency, throughput, memory usage, and other performance metrics across different
agent counts and configurations.
Features:
- Scaling law analysis (1 to 100+ agents)
- Latency and throughput measurements
- Memory usage profiling
- Concurrent execution testing
- Error rate analysis
- Performance visualization with charts
- Statistical analysis and reporting
- Real agent testing with actual LLM calls
Usage:
1. Set your OpenAI API key: export OPENAI_API_KEY="your-key-here"
2. Install required dependencies: pip install swarms
3. Run the benchmark: python aop_benchmark.py
4. Check results in the generated charts and reports
Configuration:
- Edit BENCHMARK_CONFIG at the top of the file to customize settings
- Adjust model_name, max_agents, and other parameters as needed
- This benchmark ONLY uses real agents with actual LLM calls
Author: AI Assistant
Date: 2024
"""

import gc
import json
import os
import psutil

Check failure

Code scanning / Pyre

Undefined import Error test

Undefined import [21]: Could not find a module corresponding to import psutil.
import random
import statistics
import time
import uuid
import warnings
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import asdict, dataclass
from dataclasses import dataclass, asdict
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple

import matplotlib.pyplot as plt
import numpy as np
import openpyxl
import pandas as pd
import psutil
import seaborn as sns
from dotenv import load_dotenv
from loguru import logger
from openpyxl.styles import Font
from openpyxl.utils.dataframe import dataframe_to_rows

from swarms.structs.agent import Agent
from swarms.structs.aop import AOP
from swarms.utils.litellm_wrapper import LiteLLM

# Suppress warnings for cleaner output
warnings.filterwarnings("ignore")

# Load environment variables
load_dotenv()
# Configuration
BENCHMARK_CONFIG = {
"models": [
Expand Down Expand Up @@ -60,6 +86,21 @@
"detailed_logging": True, # Enable detailed logging
}

# Suppress warnings for cleaner output
warnings.filterwarnings("ignore")

# Load environment variables
load_dotenv()

# Import swarms Agent directly to avoid uvloop dependency
try:
from swarms.structs.agent import Agent
from swarms.utils.litellm_wrapper import LiteLLM

SWARMS_AVAILABLE = True
except ImportError:
SWARMS_AVAILABLE = False


@dataclass
class BenchmarkResult:
Expand Down Expand Up @@ -375,6 +416,12 @@
"SWARMS_API_KEY or OPENAI_API_KEY environment variable is required for real agent testing"
)

# Check if swarms is available
if not SWARMS_AVAILABLE:
raise ImportError(
"Swarms not available - install swarms: pip install swarms"
)

# Create LiteLLM instance for the specific model
llm = LiteLLM(
model_name=model_name,
Expand Down Expand Up @@ -877,7 +924,7 @@
try:
if len(str(cell.value)) > max_length:
max_length = len(str(cell.value))
except:
except Exception:
pass
adjusted_width = min(max_length + 2, 50)
ws.column_dimensions[column_letter].width = adjusted_width
Expand Down Expand Up @@ -1616,7 +1663,6 @@
initial_memory = (
psutil.Process().memory_info().rss / 1024 / 1024
)
psutil.cpu_percent()

# Execute some tasks
available_agents = aop.list_agents()
Expand Down Expand Up @@ -2439,13 +2485,13 @@
ax2.grid(True, alpha=0.3)

# Add value labels on bars
for i, (bar, time) in enumerate(
for i, (bar, exec_time) in enumerate(
zip(bars2, df["avg_tool_execution_time"])
):
ax2.text(
bar.get_x() + bar.get_width() / 2,
bar.get_height() + 0.01,
f"{time:.2f}s",
f"{exec_time:.2f}s",
ha="center",
va="bottom",
fontsize=8,
Expand Down Expand Up @@ -2905,6 +2951,33 @@
print(f" Context Length: {BENCHMARK_CONFIG['context_length']}")
print()

# Check for required environment variables
api_key = os.getenv("SWARMS_API_KEY") or os.getenv(
"OPENAI_API_KEY"
)
if not api_key:
print(
"❌ Error: SWARMS_API_KEY or OPENAI_API_KEY not found in environment variables"
)
print(
" This benchmark requires real LLM calls for accurate performance testing"
)
print(
" Set your API key: export SWARMS_API_KEY='your-key-here' or export OPENAI_API_KEY='your-key-here'"
)
return 1

# Check for required imports
if not SWARMS_AVAILABLE:
print("❌ Error: swarms not available")
print(
" Install required dependencies: pip install swarms openpyxl"
)
print(
" This benchmark requires swarms framework and Excel support"
)
return 1

# Initialize benchmark suite
benchmark = AOPBenchmarkSuite(
output_dir="aop_benchmark_results",
Expand Down
Loading
Loading