diff --git a/CometAgentNEXT/AGENTS.md b/CometAgentNEXT/AGENTS.md new file mode 100644 index 0000000..1a4f09a --- /dev/null +++ b/CometAgentNEXT/AGENTS.md @@ -0,0 +1,265 @@ +# AGENTS.md + +This file provides guidance to Qoder (qoder.com) when working with code in this repository. + +## Common Commands + +### Setup +```bash +pip install -r requirements.txt +cp .env.example .env +``` + +### Running the Application +```bash +python cli.py +``` + +### Running Examples +```bash +python examples/basic_usage.py +python examples/test_plugins.py +``` + +### Testing +```bash +pytest tests/ -v +``` + +## Architecture Overview + +### Core Components + +**LLM Environment Loading Pattern** (src/env_loader.py) +- Uses direct environment variable loading integrated into EnvLoader class +- Eliminates factory pattern complexity completely +- Supports only essential providers: openai, anthropic +- All provider settings are configured via environment variables loaded through EnvLoader +- LLM client creation unified within EnvLoader.create_llm_client() method + +**Agent System** (src/agent/agent.py) +- Agents are configured with AgentConfig dataclass (name, role, goal, llm_provider, memory settings, tools, plugin settings) +- New plugin-related config options: + - enable_plugins (bool): Enable plugin system (default: True) + - plugin_directory (str): Path to plugins directory (default: ".CometPlugins") + - use_builtin_tools (bool): Use legacy builtin_tools (default: False, for backward compatibility) +- Two agent types available: + - Agent: Standard agent implementation + - ReactAgent: ReAct规范的Agent实现,结合推理(Reasoning)和行动(Action)框架 +- Each agent has: llm_client, tool_registry, memory, conversation_history +- Agent.run() implements iterative loop with tool calling: + - Sends messages to LLM with available tools + - Executes tool calls returned by LLM + - Appends tool results to conversation + - Continues until LLM provides final response (max_iterations limit) +- Memory integration: automatically saves user/assistant messages to memory if enabled +- System prompt is auto-generated from role/goal unless custom system_prompt provided +- Plugin integration: When enable_plugins=True, automatically loads plugins from plugin_directory + +**Prompt System** (src/agent/prompt.py) +- 独立的Prompt处理模块,负责生成和管理Agent的系统提示 +- 支持两种提示生成方式: + - 标准提示:适用于普通Agent + - ReAct提示:适用于ReactAgent,包含ReAct思考框架指导 +- 可通过PromptConfig自定义提示参数 +- 支持从角色和目标自动生成系统提示 + +**Tool & Plugin System** (src/plugins/base.py) +- 合并了原有的tools和plugins目录,统一为插件系统 +- ToolRegistry manages available tools +- Tools defined with name, description, parameters (ToolParameter: name, type, description, required) +- Tools converted to OpenAI function calling format via to_openai_format() +- Supports both sync and async tool functions +- Built-in tools are now part of the plugin system in src/plugins/builtin_tools.py +- Modern replacement for builtin_tools with extensible plugin architecture +- BasePlugin: Abstract base class for all plugins with initialize(), get_tools(), shutdown() methods +- PluginRegistry: Manages plugin lifecycle (load, register, unload) +- PluginMetadata: Describes plugin information (name, version, author, dependencies, etc.) +- Plugin types: BUILTIN (core), SYSTEM (framework), EXTERNAL (third-party/user) +- Plugins stored in .CometPlugins/ directory with manifest.json and plugin.py +- Supports decorator syntax (@register_tool_function) and manual tool registration +- Hot reload capability: plugins can be loaded/unloaded at runtime +- See .CometPlugins/README.md for complete plugin development guide + +**BuiltinTools Plugin** (src/plugins/builtin_tools.py) +- Core built-in plugin providing essential tools +- Tools: web_search, calculate, read_file, write_file, http_request +- Automatically loaded when enable_plugins=True in AgentConfig + +**Memory System** (src/memory/base.py) +- Three memory implementations: + - ShortTermMemory: Fixed-size FIFO buffer (default 50 entries) + - LongTermMemory: Persistent storage with inverted index for fast search + - HybridMemory: Combines both short-term and long-term (recommended) +- MemoryEntry contains: content, role, timestamp, metadata, optional embedding +- Search uses word-based inverted index scoring in LongTermMemory +- HybridMemory.search() combines results from both memory types + +**MetaAgent Orchestration** (src/group/meta_agent.py) +- MetaAgent manages multiple agents and coordinates workflows +- Supports loading agents from `config.json` via `load_agents_from_config()` method +- Four workflow types: + - sequential_workflow: Agents execute tasks in order, optionally sharing context + - parallel_workflow: Multiple agents work simultaneously using asyncio.gather + - hierarchical_workflow: Manager agent decomposes task into subtasks for workers, then synthesizes results + - dynamic_workflow: Coordinator agent dynamically decides task allocation using DELEGATE/REQUEST/COMPLETE actions +- TaskResult captures agent_name, task, result, timestamp, metadata +- Task history stored for all completed tasks + +**Configuration** (src/config.py) +- Uses pydantic-settings for environment-based configuration +- Settings class defines all API keys, base URLs, models, and general parameters +- Singleton pattern via @lru_cache() in get_settings() +- Supports .env file loading via env_loader.py +- Each provider has dedicated api_key, base_url, model settings + +**Environment Loading System** (src/env_loader.py) +- New module for loading .env files with improved logic +- Loads .env file from current working directory first, then from project root directory +- Uses dotenv library for loading environment variables +- Ensures consistent environment loading across the application +- Supports both user-specific and system-level .env files + +### Key Design Patterns + +1. **Async-First**: All agent operations, tool execution, and memory operations use async/await +2. **OpenAI Function Calling Format**: All LLM clients normalize to OpenAI's tool calling format internally +3. **Lazy Initialization**: Memory and tool registries created on-demand if not provided to Agent constructor +4. **Manager Pattern**: EnvLoader.create_llm_client() centralizes client instantiation logic with direct env loading +5. **Registry Pattern**: ToolRegistry for tool discovery and execution; PluginRegistry for plugin management +6. **Hybrid Storage**: Memory system combines in-memory short-term with persistent long-term storage +7. **Plugin Architecture**: Modular, extensible plugin system with hot reload support + +### Adding New LLM Providers + +**Current Support**: Only openai and anthropic providers are supported for simplicity. + +To add a new provider compatible with OpenAI format: +1. Add provider settings to .env file with corresponding API key, base URL, and model +2. Add elif branch in EnvLoader.create_llm_client() returning OpenAIClient with custom base_url + +For non-OpenAI-compatible providers: +1. Create new client class inheriting from BaseLLMClient in src/llm/ +2. Implement chat() and chat_stream() methods +3. Add import and factory logic to EnvLoader + +### Agent Iteration Loop + +The agent's run() method implements a tool-calling loop: +1. User message added to conversation_history +2. Loop up to max_iterations times: + - Call LLM with conversation history and available tools + - If LLM returns tool_calls: + - Add assistant message with tool_calls to history + - Execute each tool via ToolRegistry.execute_tool() + - Add tool results to conversation as role="tool" messages + - If LLM returns content without tool_calls: + - This is the final response, break loop +3. Return final response content + +### Creating Custom Plugins + +To create a custom plugin: +1. Create directory in .CometPlugins/ with your plugin name +2. Add manifest.json with plugin metadata (name, version, author, description, dependencies, etc.) +3. Create plugin.py with a Plugin class inheriting from BasePlugin +4. Implement initialize() to register tools using @self.register_tool_function decorator +5. Implement get_tools() to return list of tools +6. Optional: Add shutdown() for cleanup and README.md for documentation + +Example plugin structure: +```python +from src.plugins import BasePlugin, PluginMetadata, Tool, ToolParameter + +class Plugin(BasePlugin): + def initialize(self): + @self.register_tool_function( + name="my_tool", + description="Tool description", + parameters=[ToolParameter(...)] + ) + async def my_tool(param: str) -> str: + return f"Processed: {param}" + + def get_tools(self): + return list(self.tools.values()) +``` + +See .CometPlugins/README.md for comprehensive plugin development guide. + +## Recent Changes + +### Environment Loading Pattern Migration + +**Changes Made:** +1. **Deleted**: `src/llm/llm_manager.py` - Removed the LLMManager class for simplification +2. **Updated**: `src/env_loader.py` - Integrated LLM client creation methods directly into EnvLoader class +3. **Updated**: `src/agent/agent.py` - Replaced LLMManager.create_client() with EnvLoader.create_llm_client() +4. **Updated**: `src/llm/__init__.py` - Removed LLMManager import and export +5. **Updated**: `src/__init__.py` - Added EnvLoader import and removed LLMManager +6. **Updated**: `AGENTS.md` - Updated documentation to reflect Environment Loading pattern + +**Migration Benefits:** +1. **Simplified Architecture**: LLM client creation now unified in one class (EnvLoader) +2. **Reduced Dependencies**: Eliminated separate llm_manager.py file +3. **Direct Environment Access**: All environment variable handling in one place +4. **Limited Providers**: Only openai and anthropic for better maintainability + +**Usage:** +- Use `EnvLoader.create_llm_client()` to create LLM clients +- All environment variables loaded via `EnvLoader.load_env()` +- Configure providers through `.env` file + +### Group/MetaAgent Migration + +**Changes Made:** +1. Renamed `src/orchestration/` to `src/group/` +2. Renamed `orchestrator.py` to `meta_agent.py` +3. Renamed `MultiAgentOrchestrator` class to `MetaAgent` +4. Added `load_agents_from_config(config_path="config.json")` method to dynamically load agents from JSON config +5. Created `src/group/config.json` with example agents (coder, reviewer, tester) +6. Updated `src/__init__.py` imports to reflect new structure + +**New Usage:** +- `meta = MetaAgent()` +- `meta.load_agents_from_config()` # Loads agents from config.json +- Use workflows: `await meta.sequential_workflow(tasks, share_context=True)` etc. + +### CrossToolsCall Cross-Agent Tool Calling + +**Changes Made:** +1. Created `src/CometSpaceX/CrossToolsCall/` module with `CrossToolCaller` class and `__init__.py`. +2. Added `cross_tool_call(agents, target_agent, tool_name, arguments)` and `list_cross_tools(agents, target_agent)` methods to `Agent` class. + +**Features:** +- `CrossToolCaller` aggregates tool registries from multiple agents for shared tool access. +- Enables any agent to call tools registered in other agents' `tool_registry`. +- Supports async tool execution across agent boundaries. + +**Integration with MetaAgent:** +- In workflows like `sequential_workflow` or `dynamic_workflow`, agents can invoke tools from collaborators. +- Pass agent dict `meta.agents` to enable cross-calling. + +**Example Usage:** +```python +from src.agent.agent import Agent +from src.CometSpaceX.CrossToolsCall import CrossToolCaller + +# Assume agents loaded via MetaAgent +agents = meta.agents # {'coder': coder_agent, 'tester': tester_agent} + +# List tester's tools from coder +coder_tools = coder_agent.list_cross_tools(agents, 'tester') +print(coder_tools) + +# Coder calls tester's tool +result = await coder_agent.cross_tool_call( + agents, 'tester', 'run_tests', {'path': 'src/agent/'} +) +print(result) +``` + +**Benefits:** +- Enables tool sharing without duplicating registries. +- Supports MetaAgent multi-agent collaboration with plugin interoperability. +- Minimal code changes, follows existing async patterns. \ No newline at end of file diff --git a/CometAgentNEXT/README.md b/CometAgentNEXT/README.md new file mode 100644 index 0000000..be5c3f8 --- /dev/null +++ b/CometAgentNEXT/README.md @@ -0,0 +1,323 @@ +# CometAgentNEXT + +A high-performance, multi-LLM agent framework for building intelligent AI systems with advanced orchestration capabilities. + +## Features + +### 🚀 Multi-LLM Support +- **OpenAI** (GPT-4, GPT-3.5) +- **Anthropic** (Claude) +- **DeepSeek** +- **Zhipu AI** (GLM) +- **Qwen** +- Extensible architecture for adding new providers + +### 🛠️ Core Capabilities +- **Agent System**: Flexible agent configuration with roles, goals, and custom behaviors +- **Tool System**: Built-in tools and easy custom tool registration +- **Memory System**: Short-term, long-term, and hybrid memory for context management +- **Multi-Agent Orchestration**: Sequential, parallel, hierarchical, and dynamic workflows +- **Async Performance**: Full async/await support for high performance +- **Error Handling**: Robust retry mechanisms with exponential backoff + +### 🔧 Built-in Tools +- Web search +- File operations (read/write) +- HTTP requests +- Mathematical calculations +- Extensible tool registry + +## Installation + +```bash +# Clone the repository +git clone +cd CometAgentNEXT + +# Install dependencies +pip install -r requirements.txt + +# Copy environment template +cp .env.example .env + +# Edit .env with your API keys +``` + +## Quick Start + +### Basic Agent Usage + +```python +import asyncio +from src import Agent, AgentConfig + +async def main(): + config = AgentConfig( + name="ResearchAgent", + role="Research Assistant", + goal="Help users find and analyze information", + llm_provider="openai", + temperature=0.7 + ) + + agent = Agent(config=config) + result = await agent.run("What are the benefits of async programming?") + print(result) + +asyncio.run(main()) +``` + +### Multi-Agent Orchestration + +```python +from src import MultiAgentOrchestrator, Agent, AgentConfig + +orchestrator = MultiAgentOrchestrator() + +# Create specialized agents +researcher = Agent(AgentConfig( + name="Researcher", + role="Research Specialist", + goal="Gather information" +)) + +writer = Agent(AgentConfig( + name="Writer", + role="Content Writer", + goal="Create content" +)) + +orchestrator.register_agent(researcher) +orchestrator.register_agent(writer) + +# Sequential workflow +tasks = [ + {"agent": "Researcher", "task": "Research AI trends"}, + {"agent": "Writer", "task": "Write an article about the research"} +] + +results = await orchestrator.sequential_workflow(tasks, share_context=True) +``` + +### Custom Tools + +```python +from src import ToolRegistry, ToolParameter + +registry = ToolRegistry() + +@registry.register_function( + name="get_weather", + description="Get weather for a location", + parameters=[ + ToolParameter(name="location", type="string", description="City name") + ] +) +async def get_weather(location: str) -> str: + return f"Weather in {location}: Sunny, 25°C" + +agent = Agent(config=config, tool_registry=registry) +``` + +### Memory Management + +```python +from src import HybridMemory, MemoryEntry + +memory = HybridMemory( + short_term_size=50, + long_term_path="./memory.json" +) + +await memory.add(MemoryEntry( + content="User prefers Python", + role="system" +)) + +results = await memory.search("Python", n=5) +``` + +## Configuration + +Create a `.env` file with your API keys: + +```env +# OpenAI +OPENAI_API_KEY=your_key_here +OPENAI_MODEL=gpt-4 + +# Anthropic +ANTHROPIC_API_KEY=your_key_here +ANTHROPIC_MODEL=claude-3-5-sonnet-20241022 + +# DeepSeek +DEEPSEEK_API_KEY=your_key_here +DEEPSEEK_MODEL=deepseek-chat + +# Zhipu AI +ZHIPU_API_KEY=your_key_here +ZHIPU_MODEL=glm-4-plus + +# Qwen +QWEN_API_KEY=your_key_here +QWEN_MODEL=qwen-plus + +# Default provider +DEFAULT_LLM_PROVIDER=openai + +# General settings +LOG_LEVEL=INFO +MAX_RETRIES=3 +TIMEOUT=60 +MAX_TOKENS=4096 +TEMPERATURE=0.7 +``` + +## Architecture + +``` +CometAgentNEXT/ +├── src/ +│ ├── agent/ # Agent core implementation +│ ├── llm/ # LLM client abstraction +│ ├── tools/ # Tool system and built-in tools +│ ├── memory/ # Memory management +│ ├── orchestration/ # Multi-agent coordination +│ └── config.py # Configuration management +├── examples/ # Usage examples +├── tests/ # Test suite +├── main.py # Interactive CLI +└── requirements.txt # Dependencies +``` + +## Workflow Types + +### 1. Sequential Workflow +Agents execute tasks one after another, optionally sharing context. + +```python +results = await orchestrator.sequential_workflow(tasks, share_context=True) +``` + +### 2. Parallel Workflow +Multiple agents work on different tasks simultaneously. + +```python +results = await orchestrator.parallel_workflow(tasks) +``` + +### 3. Hierarchical Workflow +A manager agent coordinates worker agents. + +```python +results = await orchestrator.hierarchical_workflow( + manager_agent="Manager", + worker_agents=["Worker1", "Worker2"], + task="Complex task" +) +``` + +### 4. Dynamic Workflow +Coordinator agent dynamically decides task allocation. + +```python +results = await orchestrator.dynamic_workflow( + coordinator_agent="Coordinator", + available_agents=["Agent1", "Agent2"], + task="Adaptive task" +) +``` + +## Examples + +Run the interactive chat: +```bash +python main.py +``` + +Run example scripts: +```bash +python examples/basic_usage.py +``` + +## Testing + +```bash +# Install test dependencies +pip install -r tests/requirements.txt + +# Run tests +pytest tests/ -v +``` + +## Performance Optimizations + +- **Async I/O**: All operations are async for maximum concurrency +- **Connection Pooling**: Efficient HTTP connection management +- **Retry Logic**: Smart exponential backoff for failed requests +- **Memory Indexing**: Fast search with inverted index +- **Lazy Loading**: Resources loaded only when needed +- **Streaming Support**: Real-time response streaming + +## Advanced Features + +### Custom LLM Providers +Extend `BaseLLMClient` to add new providers: + +```python +from src.llm import BaseLLMClient + +class CustomClient(BaseLLMClient): + async def chat(self, messages, **kwargs): + # Implement custom logic + pass +``` + +### Agent Specialization +Create domain-specific agents: + +```python +class CodeReviewAgent(Agent): + def __init__(self): + config = AgentConfig( + name="CodeReviewer", + role="Senior Code Reviewer", + goal="Review code for best practices", + system_prompt="You are an expert code reviewer..." + ) + super().__init__(config) +``` + +### Event Hooks +Monitor agent activities: + +```python +async def on_tool_call(tool_name, args, result): + print(f"Tool {tool_name} called with {args}") + +# Implement custom event system +``` + +## Contributing + +Contributions are welcome! Please feel free to submit issues and pull requests. + +## License + +MIT License + +## Inspiration + +This framework draws inspiration from: +- **OpenAI Swarm**: Agent handoff patterns +- **MetaGPT**: Role-based multi-agent coordination +- **LangGraph**: Graph-based agent workflows +- **AutoGPT/CrewAI**: Autonomous agent design patterns + +## Support + +For questions and support, please open an issue on GitHub. + +--- + +Built with ❤️ for the AI agent community diff --git a/CometAgentNEXT/cli.py b/CometAgentNEXT/cli.py new file mode 100644 index 0000000..105149e --- /dev/null +++ b/CometAgentNEXT/cli.py @@ -0,0 +1,100 @@ +import asyncio +import traceback +import json +from rich.console import Console +from rich.prompt import Prompt +from rich.panel import Panel +from rich.markdown import Markdown +from rich.table import Table +from rich import box +from rich.rule import Rule +from rich.text import Text +from rich.columns import Columns +from rich.syntax import Syntax +from src import Agent, AgentConfig, ToolRegistry +console = Console() + +async def interactive_chat(): + title = Text("🚀 Comet Agent CLI", style="bold magenta", justify="center") + subtitle = Text("由CometFuture开发的通用Agent", style="bold cyan", justify="center") + welcome_panel = Panel(Columns([title, subtitle]),title="[bold white]欢迎使用[/bold white]",border_style="bright_blue",expand=False) + console.print(welcome_panel) + instructions = ["[bold green]📝 输入 'exit' 以退出[/]","[bold yellow]🔄 输入 'reset' 以重置对话历史[/]","[bold blue]💬 直接输入问题开始对话[/]"] + instr_panel = Panel("\n".join(instructions), title="快速指南", border_style="green") + console.print(instr_panel) + + config = AgentConfig(name="ChatMode",role="你是一个强大的对话助手,由CometFuture开发,用于回答用户问题和执行任务", + goal="回答用户问题并执行任务",llm_provider="openai",temperature=0.7,enable_memory=True,memory_path="./.cometai/chat_memory.json") + agent = Agent(config=config) + console.print(Rule("Chat with CometAgent", style="bright_blue")) + + while True: + try: + user_input = Prompt.ask("\n[bold blue]👤 你 >[/bold blue]", console=console) + if user_input.lower() == 'exit': + console.print("\n[yellow]Comet Agent CLI 已退出[/yellow]") + break + if user_input.lower() == 'reset': + agent.reset() + console.print("\n[yellow]Conversation reset[/yellow]") + continue + if not user_input.strip(): + continue + console.print(f"\n[bold green]🤖 CometAgent[/bold green]:", end="") + console.print("[dim] 思考中...[/dim]") + response = await agent.run(user_input) + + tool_calls = agent.get_last_tool_calls() + tool_results = agent.get_last_tool_results() + + if tool_calls: + console.print("\n[bold cyan]🔧 工具调用:[/bold cyan]") + for i, tool_call in enumerate(tool_calls): + tool_table = Table(show_header=False, box=box.ROUNDED, border_style="cyan") + tool_table.add_column("Key", style="cyan") + tool_table.add_column("Value") + + tool_name = tool_call["function"]["name"] + tool_args = tool_call["function"]["arguments"] + + tool_table.add_row("工具名称", f"[bold]{tool_name}[/bold]") + + if isinstance(tool_args, str): + args_syntax = Syntax(tool_args, "json", theme="monokai", line_numbers=False) + else: + args_syntax = Syntax(json.dumps(tool_args, indent=2, ensure_ascii=False), "json", theme="monokai", line_numbers=False) + + console.print(tool_table) + console.print("[cyan]参数:[/cyan]") + console.print(args_syntax) + + if i < len(tool_results): + result = tool_results[i] + result_style = "green" if result.get("success", True) else "red" + result_icon = "✅" if result.get("success", True) else "❌" + console.print(f"[{result_style}]{result_icon} 执行结果:[/{result_style}]") + console.print(Panel(result["content"], border_style=result_style)) + + if i < len(tool_calls) - 1: + console.print() + + console.print(Markdown(response)) + console.print(Rule(style="green")) + except KeyboardInterrupt: + console.print(Panel("[bold yellow]⏹️ 中断。输入 'exit' 退出。[/bold yellow]", border_style="yellow")) + except Exception as e: + traceback.print_exc() + error_panel = Panel(f"[bold red]❌ 错误: {str(e)}[/bold red]",title="错误信息",border_style="red") + console.print(error_panel) + +async def main(): + try: + await interactive_chat() + except Exception as e: + traceback.print_exc() + fatal_panel = Panel(f"[bold red]💥 致命错误: {str(e)}[/bold red]\n\n[yellow]请检查:[/yellow]\n1. 正确配置 .env 文件\n2. 安装依赖: pip install -r requirements.txt", + title="启动失败",border_style="red") + console.print(fatal_panel) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/CometAgentNEXT/requirements.txt b/CometAgentNEXT/requirements.txt new file mode 100644 index 0000000..533ac30 --- /dev/null +++ b/CometAgentNEXT/requirements.txt @@ -0,0 +1,12 @@ +openai>=1.0.0 +anthropic>=0.18.0 +google-generativeai>=0.3.0 +httpx>=0.25.0 +python-dotenv>=1.0.0 +pydantic>=2.0.0 +pydantic-settings>=2.0.0 +tenacity>=8.2.0 +rich>=13.0.0 +asyncio>=3.4.3 +aiohttp>=3.9.0 +tiktoken>=0.5.0 diff --git a/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/__init__.py b/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/__init__.py new file mode 100644 index 0000000..0cc4749 --- /dev/null +++ b/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/__init__.py @@ -0,0 +1,5 @@ +from .cross_tools_call import CrossToolCaller +from .tool_config_loader import ToolConfigLoader +from .tool_executor import ToolExecutor + +__all__ = ["CrossToolCaller","ToolConfigLoader","ToolExecutor"] \ No newline at end of file diff --git a/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/cross_tools_call.py b/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/cross_tools_call.py new file mode 100644 index 0000000..72c36ca --- /dev/null +++ b/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/cross_tools_call.py @@ -0,0 +1,36 @@ +from typing import Dict, Any, List, Optional, TYPE_CHECKING +from src.plugins.base import ToolRegistry, Tool +if TYPE_CHECKING: + from src.agent.agent import Agent + +class CrossToolCaller: + def __init__(self, agents: Dict[str, 'Agent']): + self.agents = agents + self.registries: Dict[str, ToolRegistry] = {name: agent.tool_registry for name, agent in agents.items()} + + async def call_tool(self, agent_name: str, tool_name: str, arguments: Dict[str, Any]) -> Any: + """从指定代理的工具注册表中异步执行工具。""" + registry = self.registries.get(agent_name) + if not registry: + raise ValueError(f"代理 '{agent_name}' 的工具注册表未找到") + try: + return await registry.execute_tool(tool_name, arguments) + except ValueError as e: + raise ValueError(f"工具 '{tool_name}' 在代理 '{agent_name}' 中未找到或执行失败: {str(e)}") + + def list_tools(self, agent_name: Optional[str] = None) -> Dict[str, List[Dict[str, Any]]]: + """获取指定代理或所有代理的可用工具列表(OpenAI工具格式)。""" + if agent_name: + registry = self.registries.get(agent_name) + if registry: + return {agent_name: [tool.to_openai_format() for tool in registry.get_all()]} + return {} + + tools_info: Dict[str, List[Dict[str, Any]]] = {} + for name, registry in self.registries.items(): + tools_info[name] = [tool.to_openai_format() for tool in registry.get_all()] + return tools_info + + def get_registry(self, agent_name: str) -> Optional[ToolRegistry]: + """获取指定代理的ToolRegistry。""" + return self.registries.get(agent_name) \ No newline at end of file diff --git a/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/tool_config_loader.py b/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/tool_config_loader.py new file mode 100644 index 0000000..70a9846 --- /dev/null +++ b/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/tool_config_loader.py @@ -0,0 +1,60 @@ +from typing import Dict, Any, List, Optional +from pathlib import Path +import json +from src.plugins.base import PluginRegistry, ToolRegistry + +class ToolConfigLoader: + def __init__(self, config_path: Optional[str] = None): + if config_path is None: + root_dir = Path(__file__).resolve().parent.parent.parent.parent + config_path = root_dir / "tools.json" + self.config_path = Path(config_path) + self.config: Dict[str, Any] = {} + self.plugin_registry = PluginRegistry() + self.tool_registry = ToolRegistry() + + def load_config(self) -> Dict[str, Any]: + if not self.config_path.exists(): + raise FileNotFoundError(f"工具配置文件 {self.config_path} 不存在") + + with open(self.config_path, 'r', encoding='utf-8') as f: + self.config = json.load(f) + return self.config + + def load_plugins(self) -> ToolRegistry: + if not self.config: + self.load_config() + + plugins = self.config.get("plugins", []) + + for plugin_config in plugins: + if not plugin_config.get("enabled", True): + continue + + plugin_path = Path(plugin_config["path"]) + + if not plugin_path.exists(): + plugin_path = Path(plugin_config.get("plugin_directory", ".CometPlugins")) / plugin_config["path"] + + if not plugin_path.exists(): + print(f"警告: 插件目录 {plugin_path} 不存在,跳过加载") + continue + + try: + self.plugin_registry.load_plugin_from_directory(plugin_path) + except Exception as e: + print(f"加载插件 {plugin_config['name']} 失败: {e}") + + self.tool_registry = self.plugin_registry.get_tool_registry() + return self.tool_registry + + def get_tool_registry(self) -> ToolRegistry: + if not self.tool_registry._tools: + self.load_plugins() + return self.tool_registry + + def get_tools_list(self) -> List[Dict[str, Any]]: + return self.tool_registry.to_openai_format() + + def get_plugin_info(self) -> List[Dict[str, Any]]: + return self.plugin_registry.get_plugin_info() \ No newline at end of file diff --git a/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/tool_executor.py b/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/tool_executor.py new file mode 100644 index 0000000..2b30182 --- /dev/null +++ b/CometAgentNEXT/src/CometSpaceX/CrossToolsCall/tool_executor.py @@ -0,0 +1,36 @@ +from typing import Dict, Any, List +import json + +class ToolExecutor: + def __init__(self, tool_registry): + self.tool_registry = tool_registry + + def parse_tool_calls(self, tool_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + parsed_calls = [] + for tool_call in tool_calls: + parsed_call = {"id": tool_call.get("id"), + "type": tool_call.get("type", "function"), + "function": { + "name": tool_call["function"]["name"], + "arguments": json.loads(tool_call["function"]["arguments"]) if isinstance(tool_call["function"]["arguments"], str) else tool_call["function"]["arguments"] + }} + parsed_calls.append(parsed_call) + return parsed_calls + + async def execute_tool_call(self, tool_call: Dict[str, Any]) -> Dict[str, Any]: + tool_name = tool_call["function"]["name"] + tool_args = tool_call["function"]["arguments"] + tool_call_id = tool_call.get("id") + try: + result = await self.tool_registry.execute_tool(tool_name, tool_args) + return {"tool_call_id": tool_call_id,"role": "tool","name": tool_name,"content": str(result),"success": True} + except Exception as e: + return {"tool_call_id": tool_call_id,"role": "tool","name": tool_name,"content": f"Error executing tool: {str(e)}","success": False,"error": str(e)} + + async def execute_tool_calls(self, tool_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + parsed_calls = self.parse_tool_calls(tool_calls) + results = [] + for tool_call in parsed_calls: + result = await self.execute_tool_call(tool_call) + results.append(result) + return results \ No newline at end of file diff --git a/CometAgentNEXT/src/__init__.py b/CometAgentNEXT/src/__init__.py new file mode 100644 index 0000000..50d8a42 --- /dev/null +++ b/CometAgentNEXT/src/__init__.py @@ -0,0 +1,12 @@ +from .config import get_settings, Settings +from .llm import BaseLLMClient, Message, LLMResponse +from .env_loader import EnvLoader +from .agent import Agent, AgentConfig +from .plugins import ToolRegistry, Tool, ToolParameter, BasePlugin, PluginMetadata, PluginType, PluginRegistry +from .memory import BaseMemory, MemoryEntry, ShortTermMemory, LongTermMemory, HybridMemory +from .group import MetaAgent, WorkflowType, TaskResult + +__version__ = "0.1.0" +__all__ = ["get_settings","Settings","EnvLoader","BaseLLMClient","Message","LLMResponse","Agent","AgentConfig","ToolRegistry","Tool", + "ToolParameter","BaseMemory","MemoryEntry","ShortTermMemory","LongTermMemory","HybridMemory","MetaAgent", + "WorkflowType","TaskResult","BasePlugin","PluginMetadata","PluginType","PluginRegistry"] diff --git a/CometAgentNEXT/src/agent/__init__.py b/CometAgentNEXT/src/agent/__init__.py new file mode 100644 index 0000000..cd627ac --- /dev/null +++ b/CometAgentNEXT/src/agent/__init__.py @@ -0,0 +1,4 @@ +from .agent import Agent, AgentConfig +from .react_agent import ReactAgent + +__all__ = ["Agent", "AgentConfig", "ReactAgent"] \ No newline at end of file diff --git a/CometAgentNEXT/src/agent/agent.py b/CometAgentNEXT/src/agent/agent.py new file mode 100644 index 0000000..0dd5938 --- /dev/null +++ b/CometAgentNEXT/src/agent/agent.py @@ -0,0 +1,130 @@ +from typing import List, Dict, Any, Optional, Callable +from dataclasses import dataclass, field +import asyncio +import json +from datetime import datetime +from pathlib import Path +from ..llm import BaseLLMClient, Message, LLMResponse +from ..env_loader import EnvLoader +from ..plugins import ToolRegistry, PluginRegistry +from ..memory import BaseMemory, MemoryEntry, HybridMemory +from ..config import get_settings +from .prompt import PromptManager, PromptConfig +from ..CometSpaceX.CrossToolsCall import CrossToolCaller, ToolConfigLoader, ToolExecutor + +@dataclass +class AgentConfig: + name: str + role: str + goal: str + llm_provider: Optional[str] = None + model: Optional[str] = None + temperature: float = 0.7 + max_tokens: int = 4096 + max_iterations: int = 10 + enable_memory: bool = True + memory_path: Optional[str] = None + tools: Optional[List[str]] = None + system_prompt: Optional[str] = None + enable_plugins: bool = True + plugin_directory: Optional[str] = ".CometPlugins" + tools_config_path: Optional[str] = None + +class Agent: + def __init__(self,config: AgentConfig,llm_client: Optional[BaseLLMClient] = None,tools: Optional[ToolRegistry] = None, + memory: Optional[BaseMemory] = None): + self.config = config + self.llm_client = llm_client or EnvLoader.create_llm_client(provider=config.llm_provider,model=config.model) + self.tools = tools or ToolRegistry() + if config.tools_config_path: + tool_loader = ToolConfigLoader(config.tools_config_path) + self.tools = tool_loader.get_tool_registry() + elif not tools: + if config.enable_plugins: + plugin_registry = PluginRegistry() + if config.plugin_directory: + plugin_dir = Path(config.plugin_directory) + if plugin_dir.exists(): + plugin_registry.load_plugins_from_directory(plugin_dir) + self.tools = plugin_registry.get_tool_registry() + self.tool_executor = ToolExecutor(self.tools) + + self.memory = memory + if config.enable_memory and not memory: + self.memory = HybridMemory(short_term_size=50,long_term_path=config.memory_path) + if config.memory_path: + asyncio.create_task(self.memory.load(config.memory_path)) + self.conversation_history: List[Message] = [] + self._init_system_prompt() + self.last_tool_calls: List[Dict[str, Any]] = [] + self.last_tool_results: List[Dict[str, Any]] = [] + + def _init_system_prompt(self): + prompt_config = PromptConfig(name=self.config.name,role=self.config.role,goal=self.config.goal, + system_prompt=self.config.system_prompt) + system_content = PromptManager.generate_system_prompt(prompt_config) + self.conversation_history.append(Message(role="system", content=system_content)) + + async def run(self, task: str, stream: bool = False) -> str: + self.conversation_history.append(Message(role="user", content=task)) + if self.memory: + await self.memory.add(MemoryEntry(content=task, role="user")) + iteration = 0 + final_response = "" + + while iteration < self.config.max_iterations: + iteration += 1 + tools = self.tools.to_openai_format() if self.config.tools != [] else None + response = await self.llm_client.chat(messages=self.conversation_history,tools=tools, + temperature=self.config.temperature,max_tokens=self.config.max_tokens) + if response.tool_calls: + self.last_tool_calls = response.tool_calls + self.conversation_history.append(Message(role="assistant",content=response.content or "",tool_calls=response.tool_calls)) + tool_results = await self.tool_executor.execute_tool_calls(response.tool_calls) + self.last_tool_results = tool_results + for tool_result in tool_results: + self.conversation_history.append(Message(role="tool",content=tool_result["content"],tool_call_id=tool_result["tool_call_id"])) + else: + final_response = response.content + self.conversation_history.append(Message(role="assistant", content=final_response)) + if self.memory: + await self.memory.add(MemoryEntry(content=final_response, role="assistant")) + break + return final_response + + async def stream_run(self, task: str) -> AsyncIterator[str]: + self.conversation_history.append(Message(role="user", content=task)) + if self.memory: + await self.memory.add(MemoryEntry(content=task, role="user")) + + async for chunk in self.llm_client.chat_stream(messages=self.conversation_history,temperature=self.config.temperature, + max_tokens=self.config.max_tokens): + yield chunk + + async def chat(self, message: str) -> str: + return await self.run(message) + + def reset(self): + self.conversation_history.clear() + self._init_system_prompt() + + async def get_context(self, query: str, n: int = 5) -> List[MemoryEntry]: + if not self.memory: + return [] + return await self.memory.search(query, n) + + async def cross_tool_call(self, agents: Dict[str, 'Agent'], target_agent: str, tool_name: str, arguments: Dict[str, Any]) -> Any: + """使用CrossToolCaller进行跨代理工具调用。""" + caller = CrossToolCaller(agents) + return await caller.call_tool(target_agent, tool_name, arguments) + + def list_cross_tools(self, agents: Dict[str, 'Agent'], target_agent: Optional[str] = None) -> Dict[str, List[Dict[str, Any]]]: + """获取跨代理工具列表。""" + caller = CrossToolCaller(agents) + return caller.list_tools(target_agent) + + def get_last_tool_calls(self) -> List[Dict[str, Any]]: + return self.last_tool_calls + + def get_last_tool_results(self) -> List[Dict[str, Any]]: + return self.last_tool_results \ No newline at end of file diff --git a/CometAgentNEXT/src/agent/prompt.py b/CometAgentNEXT/src/agent/prompt.py new file mode 100644 index 0000000..9c2b60a --- /dev/null +++ b/CometAgentNEXT/src/agent/prompt.py @@ -0,0 +1,40 @@ +from typing import Optional +from dataclasses import dataclass + +@dataclass +class PromptConfig: + name: str + role: str + goal: str + system_prompt: Optional[str] = None + +class PromptManager: + @staticmethod + def generate_system_prompt(config: PromptConfig) -> str: + if config.system_prompt: + return config.system_prompt + else: + return f"""你是 {config.name}, 一个 由CometFuture 开发的 AI 智能体,以下是你的角色和目标: +角色: {config.role} +目标: {config.goal} + +你有访问各种工具的权限,这些工具可以帮助你完成任务。请正确使用工具,并一步一步思考,完成任务。 +当你完成任务或需要澄清时,请直接回复,而不是使用工具。""" + + @staticmethod + def generate_react_system_prompt(config: PromptConfig) -> str: + if config.system_prompt: + return config.system_prompt + else: + return f"""你是 {config.name}, 一个 CometFuture 开发的 AI 智能体,以下是你的角色和目标: +角色: {config.role}\n目标: {config.goal} + +请使用一种特殊的思考过程来解决问题: +1. 首先思考当前问题,分析需要什么信息 +2. 如果需要额外信息,使用工具获取 +3. 根据工具结果,继续思考并决定下一步 +4. 重复上述步骤直到问题解决 +5. 最后,直接给出最终答案 + +你有访问各种工具的权限,这些工具可以帮助你完成任务。请正确使用工具,并一步一步思考,完成任务。 +当你完成任务或需要澄清时,请直接回复,而不是使用工具。""" \ No newline at end of file diff --git a/CometAgentNEXT/src/agent/react_agent.py b/CometAgentNEXT/src/agent/react_agent.py new file mode 100644 index 0000000..dd8f52b --- /dev/null +++ b/CometAgentNEXT/src/agent/react_agent.py @@ -0,0 +1,45 @@ +from typing import List, Dict, Any, Optional +from .agent import Agent, AgentConfig +from .prompt import PromptManager, PromptConfig +from ..llm import Message +from ..memory import MemoryEntry + +class ReactAgent(Agent): + def _init_system_prompt(self): + prompt_config = PromptConfig( name=self.config.name,role=self.config.role,goal=self.config.goal,system_prompt=self.config.system_prompt) + system_content = PromptManager.generate_react_system_prompt(prompt_config) + self.conversation_history.append(Message(role="system", content=system_content)) + + async def run(self, task: str, stream: bool = False) -> str: + self.conversation_history.append(Message(role="user", content=task)) + if self.memory: + await self.memory.add(MemoryEntry(content=task, role="user")) + iteration = 0 + final_response = "" + + while iteration < self.config.max_iterations: + iteration += 1 + tools = self.tool_registry.to_openai_format() if self.config.tools != [] else None + response = await self.llm_client.chat(messages=self.conversation_history,tools=tools,temperature=self.config.temperature, + max_tokens=self.config.max_tokens) + + if response.tool_calls: + react_thought = response.content or "我需要使用工具来获取更多信息" + self.conversation_history.append(Message(role="assistant",content=react_thought,tool_calls=response.tool_calls)) + + for tool_call in response.tool_calls: + tool_name = tool_call["function"]["name"] + tool_args = tool_call["function"]["arguments"] + try: + tool_result = await self.tool_registry.execute_tool(tool_name, tool_args) + result_content = f"工具执行结果:\n{tool_result}" + except Exception as e: + result_content = f"工具执行错误:\n{str(e)}" + self.conversation_history.append(Message(role="tool",content=result_content,tool_call_id=tool_call["id"])) + else: + final_response = response.content + self.conversation_history.append(Message(role="assistant",content=final_response)) + if self.memory: + await self.memory.add(MemoryEntry(content=final_response, role="assistant")) + break + return final_response \ No newline at end of file diff --git a/CometAgentNEXT/src/config.py b/CometAgentNEXT/src/config.py new file mode 100644 index 0000000..1ec91ad --- /dev/null +++ b/CometAgentNEXT/src/config.py @@ -0,0 +1,46 @@ +from pydantic_settings import BaseSettings +from pydantic import Field +from typing import Literal, Optional +from functools import lru_cache +from .env_loader import EnvLoader + +# Load .env file before creating Settings instance +EnvLoader.load_env() + +class Settings(BaseSettings): + openai_api_key: Optional[str] = Field(None, env="OPENAI_API_KEY") + openai_base_url: str = Field("https://api.openai.com/v1", env="OPENAI_BASE_URL") + openai_model: str = Field("gpt-4", env="OPENAI_MODEL") + + anthropic_api_key: Optional[str] = Field(None, env="ANTHROPIC_API_KEY") + anthropic_model: str = Field("claude-3-5-sonnet-20241022", env="ANTHROPIC_MODEL") + + google_api_key: Optional[str] = Field(None, env="GOOGLE_API_KEY") + google_model: str = Field("gemini-2.0-flash-exp", env="GOOGLE_MODEL") + + deepseek_api_key: Optional[str] = Field(None, env="DEEPSEEK_API_KEY") + deepseek_base_url: str = Field("https://api.deepseek.com/v1", env="DEEPSEEK_BASE_URL") + deepseek_model: str = Field("deepseek-chat", env="DEEPSEEK_MODEL") + + zhipu_api_key: Optional[str] = Field(None, env="ZHIPU_API_KEY") + zhipu_base_url: str = Field("https://open.bigmodel.cn/api/paas/v4", env="ZHIPU_BASE_URL") + zhipu_model: str = Field("glm-4-plus", env="ZHIPU_MODEL") + + qwen_api_key: Optional[str] = Field(None, env="QWEN_API_KEY") + qwen_base_url: str = Field("https://dashscope.aliyuncs.com/compatible-mode/v1", env="QWEN_BASE_URL") + qwen_model: str = Field("qwen-plus", env="QWEN_MODEL") + + default_llm_provider: Literal["openai", "anthropic", "google", "deepseek", "zhipu", "qwen"] = Field("openai", env="DEFAULT_LLM_PROVIDER") + + log_level: str = Field("INFO", env="LOG_LEVEL") + max_retries: int = Field(3, env="MAX_RETRIES") + timeout: int = Field(60, env="TIMEOUT") + max_tokens: int = Field(4096, env="MAX_TOKENS") + temperature: float = Field(0.7, env="TEMPERATURE") + + class Config: + case_sensitive = False + +@lru_cache() +def get_settings() -> Settings: + return Settings() \ No newline at end of file diff --git a/CometAgentNEXT/src/env_loader.py b/CometAgentNEXT/src/env_loader.py new file mode 100644 index 0000000..b627795 --- /dev/null +++ b/CometAgentNEXT/src/env_loader.py @@ -0,0 +1,99 @@ +import os +from pathlib import Path +from typing import Optional +from .llm.base import BaseLLMClient +from .llm.openai_client import OpenAIClient +from .llm.anthropic_client import AnthropicClient + +class EnvLoader: + @classmethod + def load_env(cls, env_file: Optional[str] = None) -> None: + """Load environment variables from .env file.""" + if env_file: + env_path = Path(env_file) + else: + current_dir = Path.cwd() + env_path = None + for i in range(4): + candidate = current_dir / ".env" + if candidate.exists(): + env_path = candidate + break + current_dir = current_dir.parent + if current_dir == current_dir.parent: # Reached root + break + if not env_path or not env_path.exists(): + return + try: + with open(env_path, 'r', encoding='utf-8') as f: + for line_num, line in enumerate(f, 1): + line = line.strip() + if not line or line.startswith('#'): + continue + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + if value.startswith('"') and value.endswith('"'): + value = value[1:-1] + elif value.startswith("'") and value.endswith("'"): + value = value[1:-1] + if key not in os.environ: + os.environ[key] = value + else: + print(f"Warning: Invalid line format in {env_path}:{line_num} - {line}") + except FileNotFoundError: + print(f"Warning: .env file not found at {env_path}") + except Exception as e: + print(f"Warning: Error loading .env file: {e}") + + @classmethod + def get_env(cls, key: str, default: Optional[str] = None) -> Optional[str]: + """Get environment variable value.""" + return os.environ.get(key, default) + + @classmethod + def set_env(cls, key: str, value: str) -> None: + """Set environment variable.""" + os.environ[key] = value + + @classmethod + def is_production(cls) -> bool: + """Check if running in production environment.""" + return cls.get_env("ENVIRONMENT", "development").lower() == "production" + + @classmethod + def is_debug(cls) -> bool: + """Check if debug mode is enabled.""" + debug = cls.get_env("DEBUG", "false").lower() + return debug in ("true", "1", "yes", "on") + + @staticmethod + def create_llm_client(provider: Optional[str] = None, api_key: Optional[str] = None,model: Optional[str] = None, base_url: Optional[str] = None,**kwargs) -> BaseLLMClient: + """Create LLM client directly from environment variables or provided parameters.""" + provider = provider or EnvLoader.get_env("DEFAULT_LLM_PROVIDER", "openai") + if provider == "openai": + return EnvLoader._create_openai_client(api_key, model, base_url, **kwargs) + elif provider == "anthropic": + return EnvLoader._create_anthropic_client(api_key, model, **kwargs) + else: + raise ValueError(f"Unsupported LLM provider: {provider}. Supported providers: 'openai', 'anthropic'") + + @staticmethod + def _create_openai_client(api_key: Optional[str] = None, model: Optional[str] = None, base_url: Optional[str] = None, **kwargs) -> OpenAIClient: + """Create OpenAI client with environment or parameter values.""" + api_key = api_key or EnvLoader.get_env("OPENAI_API_KEY") + model = model or EnvLoader.get_env("OPENAI_MODEL", "gpt-4") + base_url = base_url or EnvLoader.get_env("OPENAI_BASE_URL", "https://api.openai.com/v1") + if not api_key: + raise ValueError("OpenAI API key not found in environment variables or parameters") + return OpenAIClient(api_key=api_key, model=model, base_url=base_url, **kwargs) + + @staticmethod + def _create_anthropic_client(api_key: Optional[str] = None, model: Optional[str] = None, **kwargs) -> AnthropicClient: + """Create Anthropic client with environment or parameter values.""" + api_key = api_key or EnvLoader.get_env("ANTHROPIC_API_KEY") + model = model or EnvLoader.get_env("ANTHROPIC_MODEL", "claude-3-5-sonnet-20241022") + if not api_key: + raise ValueError("Anthropic API key not found in environment variables or parameters") + return AnthropicClient(api_key=api_key, model=model, **kwargs) \ No newline at end of file diff --git a/CometAgentNEXT/src/group/__init__.py b/CometAgentNEXT/src/group/__init__.py new file mode 100644 index 0000000..e337289 --- /dev/null +++ b/CometAgentNEXT/src/group/__init__.py @@ -0,0 +1,3 @@ +from .meta_agent import MetaAgent, WorkflowType, TaskResult + +__all__ = ["MetaAgent", "WorkflowType", "TaskResult"] diff --git a/CometAgentNEXT/src/group/config.json b/CometAgentNEXT/src/group/config.json new file mode 100644 index 0000000..3873d5b --- /dev/null +++ b/CometAgentNEXT/src/group/config.json @@ -0,0 +1,22 @@ +{ + "agents": [ + { + "name": "coder", + "role": "代码编写专家", + "goal": "编写高效、正确的Python代码", + "backstory": "资深AI智能体开发者,精通CrewAI和MetaGPT架构" + }, + { + "name": "reviewer", + "role": "代码审查专家", + "goal": "审查代码性能、安全性和规范性", + "backstory": "专注代码优化和错误处理的专业审查者" + }, + { + "name": "tester", + "role": "测试专家", + "goal": "生成并运行测试用例确保代码正确性", + "backstory": "自动化测试框架专家" + } + ] +} \ No newline at end of file diff --git a/CometAgentNEXT/src/group/meta_agent.py b/CometAgentNEXT/src/group/meta_agent.py new file mode 100644 index 0000000..b67e444 --- /dev/null +++ b/CometAgentNEXT/src/group/meta_agent.py @@ -0,0 +1,161 @@ +from typing import List, Dict, Any, Optional, Callable +from dataclasses import dataclass, field +from enum import Enum +import asyncio +from datetime import datetime +from ..agent import Agent, AgentConfig +import json +import os + +class WorkflowType(Enum): + SEQUENTIAL = "sequential" + PARALLEL = "parallel" + HIERARCHICAL = "hierarchical" + DYNAMIC = "dynamic" + +@dataclass +class TaskResult: + agent_name: str + task: str + result: str + timestamp: datetime = field(default_factory=datetime.now) + metadata: Dict[str, Any] = field(default_factory=dict) + +class MetaAgent: + def __init__(self): + self.agents: Dict[str, Agent] = {} + self.task_history: List[TaskResult] = [] + + def register_agent(self, agent: Agent): + self.agents[agent.config.name] = agent + + def get_agent(self, name: str) -> Optional[Agent]: + return self.agents.get(name) + + async def sequential_workflow(self,tasks: List[Dict[str, str]],share_context: bool = True) -> List[TaskResult]: + results = [] + context = "" + for task_info in tasks: + agent_name = task_info["agent"] + task = task_info["task"] + agent = self.get_agent(agent_name) + if not agent: + raise ValueError(f"Agent {agent_name} not found") + if share_context and context: + task = f"Context from previous tasks:\n{context}\n\nYour task:\n{task}" + + result = await agent.run(task) + task_result = TaskResult(agent_name=agent_name,task=task,result=result) + results.append(task_result) + self.task_history.append(task_result) + if share_context: + context += f"\n[{agent_name}]: {result}" + return results + + async def parallel_workflow(self,tasks: List[Dict[str, str]]) -> List[TaskResult]: + async def run_task(task_info: Dict[str, str]) -> TaskResult: + agent_name = task_info["agent"] + task = task_info["task"] + agent = self.get_agent(agent_name) + if not agent: + raise ValueError(f"Agent {agent_name} not found") + result = await agent.run(task) + return TaskResult(agent_name=agent_name,task=task,result=result) + + results = await asyncio.gather(*[run_task(t) for t in tasks]) + self.task_history.extend(results) + return list(results) + + async def hierarchical_workflow(self,manager_agent: str,worker_agents: List[str],task: str) -> List[TaskResult]: + manager = self.get_agent(manager_agent) + if not manager: + raise ValueError(f"Manager agent {manager_agent} not found") + decomposition_prompt = f"""Break down the following task into subtasks for your team: +Task: {task} + +Available workers: {', '.join(worker_agents)} + +Respond with a JSON array of subtasks in this format: +[ + {{"agent": "agent_name", "task": "subtask description"}}, + ...]""" + plan_result = await manager.run(decomposition_prompt) + try: + subtasks = json.loads(plan_result) + except: + subtasks = [{"agent": worker_agents[0], "task": task}] + results = await self.parallel_workflow(subtasks) + synthesis_prompt = f"""Synthesize the results from your team: +Original task: {task} + +Team results: +{chr(10).join([f"{r.agent_name}: {r.result}" for r in results])} + +Provide a comprehensive final answer.""" + final_result = await manager.run(synthesis_prompt) + final_task_result = TaskResult(agent_name=manager_agent,task=task,result=final_result,metadata={"subtask_results": [r.__dict__ for r in results]}) + self.task_history.append(final_task_result) + return results + [final_task_result] + + async def dynamic_workflow(self,coordinator_agent: str,available_agents: List[str],task: str,max_iterations: int = 10) -> List[TaskResult]: + coordinator = self.get_agent(coordinator_agent) + if not coordinator: + raise ValueError(f"Coordinator agent {coordinator_agent} not found") + results = [] + iteration = 0 + context = f"Task: {task}\n\nAvailable agents: {', '.join(available_agents)}" + while iteration < max_iterations: + iteration += 1 + decision_prompt = f"""{context} + +Based on the above, decide the next action: +1. Delegate a subtask to an agent (format: DELEGATE|agent_name|task_description) +2. Request information from an agent (format: REQUEST|agent_name|question) +3. Complete the task (format: COMPLETE|final_answer) + +Respond with only the action in the specified format.""" + + decision = await coordinator.run(decision_prompt) + parts = decision.strip().split("|", 2) + if len(parts) < 2: + break + action = parts[0].upper() + if action == "COMPLETE": + final_result = parts[1] if len(parts) > 1 else decision + results.append(TaskResult(agent_name=coordinator_agent,task=task,result=final_result,metadata={"iterations": iteration})) + break + elif action in ["DELEGATE", "REQUEST"]: + agent_name = parts[1] + subtask = parts[2] if len(parts) > 2 else "" + agent = self.get_agent(agent_name) + if agent: + result = await agent.run(subtask) + task_result = TaskResult(agent_name=agent_name,task=subtask,result=result) + results.append(task_result) + context += f"\n\n[{agent_name} result]: {result}" + else: + break + self.task_history.extend(results) + return results + + def get_history(self, agent_name: Optional[str] = None) -> List[TaskResult]: + if agent_name: + return [r for r in self.task_history if r.agent_name == agent_name] + return self.task_history + + def load_agents_from_config(self, config_path: str = "config.json") -> None: + config_dir = os.path.dirname(__file__) + full_path = os.path.join(config_dir, config_path) + try: + with open(full_path, 'r', encoding='utf-8') as f: + config = json.load(f) + for agent_data in config.get('agents', []): + agent_config = AgentConfig(**agent_data) + agent = Agent(agent_config) + self.register_agent(agent) + print(f"Loaded {len(config.get('agents', []))} agents from {config_path}") + except Exception as e: + print(f"Error loading config {config_path}: {e}") + + def clear_history(self): + self.task_history.clear() \ No newline at end of file diff --git a/CometAgentNEXT/src/llm/__init__.py b/CometAgentNEXT/src/llm/__init__.py new file mode 100644 index 0000000..6301da6 --- /dev/null +++ b/CometAgentNEXT/src/llm/__init__.py @@ -0,0 +1,3 @@ +from .base import BaseLLMClient, Message, LLMResponse + +__all__ = ["BaseLLMClient", "Message", "LLMResponse"] \ No newline at end of file diff --git a/CometAgentNEXT/src/llm/anthropic_client.py b/CometAgentNEXT/src/llm/anthropic_client.py new file mode 100644 index 0000000..f3b55a2 --- /dev/null +++ b/CometAgentNEXT/src/llm/anthropic_client.py @@ -0,0 +1,50 @@ +from typing import List, Dict, Any, Optional, AsyncIterator +from anthropic import AsyncAnthropic +from tenacity import retry, stop_after_attempt, wait_exponential +from ..llm.base import BaseLLMClient, Message, LLMResponse + +class AnthropicClient(BaseLLMClient): + def __init__(self, api_key: str, model: str, base_url: Optional[str] = None, **kwargs): + super().__init__(api_key, model, base_url, **kwargs) + self.client = AsyncAnthropic(api_key=api_key) + + @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=10)) + async def chat(self,messages: List[Message],tools: Optional[List[Dict[str, Any]]] = None,temperature: Optional[float] = None, + max_tokens: Optional[int] = None,stream: bool = False,**kwargs) -> LLMResponse: + if stream: + raise ValueError("Use chat_stream for streaming responses") + + formatted_messages = [{"role": msg.role, "content": msg.content} for msg in messages] + request_params = {"model": self.model,"messages": formatted_messages,"temperature": temperature or self.config.get("temperature", 0.7), + "max_tokens": max_tokens or self.config.get("max_tokens", 4096),**kwargs} + if tools: + request_params["tools"] = tools + response = await self.client.messages.create(**request_params) + content = "" + tool_calls = None + for block in response.content: + if block.type == "text": + content += block.text + elif block.type == "tool_use": + if tool_calls is None: + tool_calls = [] + tool_calls.append({"id": block.id, + "type": "function", + "function": { + "name": block.name, + "arguments": str(block.input)}}) + return LLMResponse(content=content,role=response.role,tool_calls=tool_calls,usage={ + "prompt_tokens": response.usage.input_tokens,"completion_tokens": response.usage.output_tokens, + "total_tokens": response.usage.input_tokens + response.usage.output_tokens},model=response.model,finish_reason=response.stop_reason) + + async def chat_stream(self,messages: List[Message],tools: Optional[List[Dict[str, Any]]] = None,temperature: Optional[float] = None, + max_tokens: Optional[int] = None,**kwargs) -> AsyncIterator[str]: + formatted_messages = [{"role": msg.role, "content": msg.content} for msg in messages] + request_params = {"model": self.model,"messages": formatted_messages,"temperature": temperature or self.config.get("temperature", 0.7), + "max_tokens": max_tokens or self.config.get("max_tokens", 4096),**kwargs} + if tools: + request_params["tools"] = tools + + async with self.client.messages.stream(**request_params) as stream: + async for text in stream.text_stream: + yield text \ No newline at end of file diff --git a/CometAgentNEXT/src/llm/base.py b/CometAgentNEXT/src/llm/base.py new file mode 100644 index 0000000..58063f1 --- /dev/null +++ b/CometAgentNEXT/src/llm/base.py @@ -0,0 +1,37 @@ +from abc import ABC, abstractmethod +from typing import List, Dict, Any, Optional, AsyncIterator +from dataclasses import dataclass + +@dataclass +class Message: + role: str + content: str + name: Optional[str] = None + tool_calls: Optional[List[Dict[str, Any]]] = None + tool_call_id: Optional[str] = None + +@dataclass +class LLMResponse: + content: str + role: str = "assistant" + tool_calls: Optional[List[Dict[str, Any]]] = None + usage: Optional[Dict[str, int]] = None + model: Optional[str] = None + finish_reason: Optional[str] = None + +class BaseLLMClient(ABC): + def __init__(self, api_key: str, model: str, base_url: Optional[str] = None, **kwargs): + self.api_key = api_key + self.model = model + self.base_url = base_url + self.config = kwargs + + @abstractmethod + async def chat(self,messages: List[Message],tools: Optional[List[Dict[str, Any]]] = None,temperature: Optional[float] = None, + max_tokens: Optional[int] = None,stream: bool = False,**kwargs) -> LLMResponse: + pass + + @abstractmethod + async def chat_stream(self,messages: List[Message],tools: Optional[List[Dict[str, Any]]] = None,temperature: Optional[float] = None, + max_tokens: Optional[int] = None,**kwargs) -> AsyncIterator[str]: + pass \ No newline at end of file diff --git a/CometAgentNEXT/src/llm/openai_client.py b/CometAgentNEXT/src/llm/openai_client.py new file mode 100644 index 0000000..90a5120 --- /dev/null +++ b/CometAgentNEXT/src/llm/openai_client.py @@ -0,0 +1,88 @@ +from typing import List, Dict, Any, Optional, AsyncIterator +from openai import AsyncOpenAI, BadRequestError +from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type, before_sleep_log +import logging +from ..llm.base import BaseLLMClient, Message, LLMResponse +logger = logging.getLogger(__name__) + +class OpenAIClient(BaseLLMClient): + def __init__(self, api_key: str, model: str, base_url: Optional[str] = None, **kwargs): + super().__init__(api_key, model, base_url, **kwargs) + self.client = AsyncOpenAI(api_key=api_key, base_url=base_url) + + @retry(stop=stop_after_attempt(3),wait=wait_exponential(multiplier=1, min=2, max=10),retry=retry_if_exception_type((BadRequestError, Exception)), + before_sleep=before_sleep_log(logger, logging.WARNING)) + + async def chat(self, messages: List[Message], tools: Optional[List[Dict[str, Any]]] = None, + temperature: Optional[float] = None, max_tokens: Optional[int] = None, stream: bool = False, **kwargs) -> LLMResponse: + if stream: + raise ValueError("Use chat_stream for streaming responses") + + if not messages: + raise ValueError("Messages list cannot be empty") + formatted_messages = [] + for msg in messages: + if msg.content.strip(): # Only include non-empty messages + formatted_messages.append({"role": msg.role, "content": msg.content}) + if not formatted_messages: + raise ValueError("No valid messages to send") + try: + request_params = {"model": self.model,"messages": formatted_messages,"temperature": temperature or self.config.get("temperature", 0.7), + "max_tokens": max_tokens or self.config.get("max_tokens", 4096),**kwargs} + if tools: + if not isinstance(tools, list): + raise ValueError("Tools must be a list") + request_params["tools"] = tools + logger.debug(f"Sending request to {self.model} with {len(formatted_messages)} messages") + response = await self.client.chat.completions.create(**request_params) + + if not response.choices: + raise ValueError("No response choices received from API") + message = response.choices[0].message + tool_calls = None + if hasattr(message, "tool_calls") and message.tool_calls: + tool_calls = [{"id": tc.id,"type": tc.type, + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments + }} for tc in message.tool_calls] + + return LLMResponse(content=message.content or "",role=message.role,tool_calls=tool_calls,usage={ + "prompt_tokens": response.usage.prompt_tokens,"completion_tokens": response.usage.completion_tokens, + "total_tokens": response.usage.total_tokens},model=response.model,finish_reason=response.choices[0].finish_reason) + except BadRequestError as e: + logger.error(f"BadRequestError: {e}") + raise ValueError(f"Bad request to OpenAI API: {str(e)}") from e + except Exception as e: + logger.error(f"Unexpected error in OpenAI client: {e}") + raise + + async def chat_stream(self, messages: List[Message], tools: Optional[List[Dict[str, Any]]] = None, + temperature: Optional[float] = None, max_tokens: Optional[int] = None,**kwargs) -> AsyncIterator[str]: + if not messages: + raise ValueError("Messages list cannot be empty") + formatted_messages = [] + for msg in messages: + if msg.content.strip(): # Only include non-empty messages + formatted_messages.append({"role": msg.role, "content": msg.content}) + if not formatted_messages: + raise ValueError("No valid messages to send") + try: + request_params = {"model": self.model,"messages": formatted_messages,"temperature": temperature or self.config.get("temperature", 0.7), + "max_tokens": max_tokens or self.config.get("max_tokens", 4096),"stream": True,**kwargs} + if tools: + if not isinstance(tools, list): + raise ValueError("Tools must be a list") + request_params["tools"] = tools + logger.debug(f"Sending streaming request to {self.model} with {len(formatted_messages)} messages") + stream = await self.client.chat.completions.create(**request_params) + + async for chunk in stream: + if chunk.choices[0].delta.content: + yield chunk.choices[0].delta.content + except BadRequestError as e: + logger.error(f"BadRequestError in stream: {e}") + raise ValueError(f"Bad request to OpenAI API: {str(e)}") from e + except Exception as e: + logger.error(f"Unexpected error in OpenAI stream: {e}") + raise \ No newline at end of file diff --git a/CometAgentNEXT/src/memory/__init__.py b/CometAgentNEXT/src/memory/__init__.py new file mode 100644 index 0000000..4e84018 --- /dev/null +++ b/CometAgentNEXT/src/memory/__init__.py @@ -0,0 +1,3 @@ +from .base import BaseMemory, MemoryEntry, ShortTermMemory, LongTermMemory, HybridMemory + +__all__ = ["BaseMemory", "MemoryEntry", "ShortTermMemory", "LongTermMemory", "HybridMemory"] diff --git a/CometAgentNEXT/src/memory/base.py b/CometAgentNEXT/src/memory/base.py new file mode 100644 index 0000000..d90d800 --- /dev/null +++ b/CometAgentNEXT/src/memory/base.py @@ -0,0 +1,161 @@ +from abc import ABC, abstractmethod +from typing import List, Optional, Dict, Any +from dataclasses import dataclass, field +from datetime import datetime +import json + +@dataclass +class MemoryEntry: + content: str + role: str + timestamp: datetime = field(default_factory=datetime.now) + metadata: Dict[str, Any] = field(default_factory=dict) + embedding: Optional[List[float]] = None + + def to_dict(self) -> Dict[str, Any]: + return {"content": self.content,"role": self.role,"timestamp": self.timestamp.isoformat(),"metadata": self.metadata,"embedding": self.embedding} + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "MemoryEntry": + return cls(content=data["content"],role=data["role"],timestamp=datetime.fromisoformat(data["timestamp"]),metadata=data.get("metadata", {}), + embedding=data.get("embedding")) + +class BaseMemory(ABC): + @abstractmethod + async def add(self, entry: MemoryEntry): + pass + + @abstractmethod + async def get_recent(self, n: int = 10) -> List[MemoryEntry]: + pass + + @abstractmethod + async def search(self, query: str, n: int = 5) -> List[MemoryEntry]: + pass + + @abstractmethod + async def clear(self): + pass + + @abstractmethod + async def save(self, path: str): + pass + + @abstractmethod + async def load(self, path: str): + pass + +class ShortTermMemory(BaseMemory): + def __init__(self, max_size: int = 50): + self.max_size = max_size + self._memories: List[MemoryEntry] = [] + + async def add(self, entry: MemoryEntry): + self._memories.append(entry) + if len(self._memories) > self.max_size: + self._memories.pop(0) + + async def get_recent(self, n: int = 10) -> List[MemoryEntry]: + return self._memories[-n:] + + async def search(self, query: str, n: int = 5) -> List[MemoryEntry]: + results = [m for m in self._memories if query.lower() in m.content.lower()] + return results[-n:] + + async def clear(self): + self._memories.clear() + + async def save(self, path: str): + with open(path, 'w', encoding='utf-8') as f: + json.dump([m.to_dict() for m in self._memories], f, indent=2) + + async def load(self, path: str): + try: + with open(path, 'r', encoding='utf-8') as f: + data = json.load(f) + self._memories = [MemoryEntry.from_dict(d) for d in data] + except FileNotFoundError: + self._memories = [] + +class LongTermMemory(BaseMemory): + def __init__(self, persistence_path: Optional[str] = None): + self.persistence_path = persistence_path + self._memories: List[MemoryEntry] = [] + self._index: Dict[str, List[int]] = {} + + async def add(self, entry: MemoryEntry): + self._memories.append(entry) + self._index_entry(entry, len(self._memories) - 1) + + if self.persistence_path: + await self.save(self.persistence_path) + + def _index_entry(self, entry: MemoryEntry, idx: int): + words = entry.content.lower().split() + for word in set(words): + if word not in self._index: + self._index[word] = [] + self._index[word].append(idx) + + async def get_recent(self, n: int = 10) -> List[MemoryEntry]: + return self._memories[-n:] + + async def search(self, query: str, n: int = 5) -> List[MemoryEntry]: + query_words = query.lower().split() + scores: Dict[int, int] = {} + for word in query_words: + if word in self._index: + for idx in self._index[word]: + scores[idx] = scores.get(idx, 0) + 1 + sorted_indices = sorted(scores.keys(), key=lambda x: scores[x], reverse=True) + return [self._memories[i] for i in sorted_indices[:n]] + + async def clear(self): + self._memories.clear() + self._index.clear() + + async def save(self, path: str): + with open(path, 'w', encoding='utf-8') as f: + json.dump([m.to_dict() for m in self._memories], f, indent=2) + + async def load(self, path: str): + try: + with open(path, 'r', encoding='utf-8') as f: + data = json.load(f) + self._memories = [MemoryEntry.from_dict(d) for d in data] + + self._index.clear() + for idx, memory in enumerate(self._memories): + self._index_entry(memory, idx) + except FileNotFoundError: + self._memories = [] + +class HybridMemory(BaseMemory): + def __init__(self,short_term_size: int = 50,long_term_path: Optional[str] = None): + self.short_term = ShortTermMemory(max_size=short_term_size) + self.long_term = LongTermMemory(persistence_path=long_term_path) + + async def add(self, entry: MemoryEntry): + await self.short_term.add(entry) + await self.long_term.add(entry) + + async def get_recent(self, n: int = 10) -> List[MemoryEntry]: + return await self.short_term.get_recent(n) + + async def search(self, query: str, n: int = 5) -> List[MemoryEntry]: + long_term_results = await self.long_term.search(query, n) + short_term_results = await self.short_term.search(query, n) + combined = {} + for r in long_term_results + short_term_results: + combined[r.content] = r + return list(combined.values())[:n] + + async def clear(self): + await self.short_term.clear() + await self.long_term.clear() + + async def save(self, path: str): + await self.long_term.save(path) + + async def load(self, path: str): + await self.long_term.load(path) \ No newline at end of file diff --git a/CometAgentNEXT/src/plugins/__init__.py b/CometAgentNEXT/src/plugins/__init__.py new file mode 100644 index 0000000..e9b32ba --- /dev/null +++ b/CometAgentNEXT/src/plugins/__init__.py @@ -0,0 +1,3 @@ +from .base import (BasePlugin, PluginMetadata, PluginType, PluginRegistry,Tool,ToolParameter,ToolRegistry) + +__all__ = ["BasePlugin", "PluginMetadata", "PluginType", "PluginRegistry","Tool","ToolParameter","ToolRegistry"] \ No newline at end of file diff --git a/CometAgentNEXT/src/plugins/base.py b/CometAgentNEXT/src/plugins/base.py new file mode 100644 index 0000000..66e7410 --- /dev/null +++ b/CometAgentNEXT/src/plugins/base.py @@ -0,0 +1,272 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict, Optional, Callable, List +from dataclasses import dataclass, field +from enum import Enum +from inspect import signature +import importlib.util +import sys +import json +from pathlib import Path + +@dataclass +class ToolParameter: + name: str + type: str + description: str + required: bool = True + enum: Optional[list] = None + +@dataclass +class Tool: + name: str + description: str + parameters: list[ToolParameter] = field(default_factory=list) + function: Optional[Callable] = None + + def to_openai_format(self) -> Dict[str, Any]: + properties = {} + required = [] + for param in self.parameters: + param_schema = {"type": param.type,"description": param.description} + if param.enum: + param_schema["enum"] = param.enum + properties[param.name] = param_schema + if param.required: + required.append(param.name) + return {"type": "function","function": {"name": self.name,"description": self.description,"parameters": {"type": "object", + "properties": properties,"required": required}}} + + async def execute(self, **kwargs) -> Any: + if not self.function: + raise ValueError(f"Tool {self.name} has no execution function") + + import asyncio + if asyncio.iscoroutinefunction(self.function): + return await self.function(**kwargs) + else: + return self.function(**kwargs) + +class ToolRegistry: + def __init__(self): + self._tools: Dict[str, Tool] = {} + + def register(self, tool: Tool): + self._tools[tool.name] = tool + + def register_function(self,name: str,description: str,parameters: Optional[list[ToolParameter]] = None): + def decorator(func: Callable): + if parameters is None: + params = self._extract_parameters_from_function(func) + else: + params = parameters + tool = Tool(name=name,description=description,parameters=params,function=func) + self.register(tool) + return func + return decorator + + def _extract_parameters_from_function(self, func: Callable) -> list[ToolParameter]: + sig = signature(func) + params = [] + for param_name, param in sig.parameters.items(): + if param_name == "self": + continue + param_type = "string" + if param.annotation != param.empty: + if param.annotation == int: + param_type = "integer" + elif param.annotation == float: + param_type = "number" + elif param.annotation == bool: + param_type = "boolean" + elif param.annotation == list: + param_type = "array" + elif param.annotation == dict: + param_type = "object" + + params.append(ToolParameter(name=param_name,type=param_type,description=f"Parameter {param_name}",required=param.default == param.empty)) + return params + + def get(self, name: str) -> Optional[Tool]: + return self._tools.get(name) + + def get_all(self) -> list[Tool]: + return list(self._tools.values()) + + def to_openai_format(self) -> list[Dict[str, Any]]: + return [tool.to_openai_format() for tool in self._tools.values()] + + async def execute_tool(self, name: str, arguments: Dict[str, Any]) -> Any: + tool = self.get(name) + if not tool: + raise ValueError(f"Tool {name} not found") + return await tool.execute(**arguments) + + def merge_registry(self, other_registry: "ToolRegistry"): + for tool in other_registry.get_all(): + self.register(tool) + +class PluginType(Enum): + BUILTIN = "builtin" + EXTERNAL = "external" + SYSTEM = "system" + +@dataclass +class PluginMetadata: + name: str + version: str + author: str + description: str + plugin_type: PluginType = PluginType.EXTERNAL + dependencies: List[str] = field(default_factory=list) + tags: List[str] = field(default_factory=list) + enabled: bool = True + + @classmethod + def from_manifest(cls, manifest_path: Path) -> "PluginMetadata": + with open(manifest_path, 'r', encoding='utf-8') as f: + data = json.load(f) + return cls(name=data["name"],version=data["version"],author=data["author"],description=data["description"], + plugin_type=PluginType(data.get("type", "external")),dependencies=data.get("dependencies", []),tags=data.get("tags", []), + enabled=data.get("enabled", True)) + +class BasePlugin(ABC): + def __init__(self, metadata: PluginMetadata): + self.metadata = metadata + self.tools: Dict[str, Tool] = {} + + @abstractmethod + def initialize(self) -> None: + pass + + @abstractmethod + def get_tools(self) -> List[Tool]: + pass + + def shutdown(self) -> None: + pass + + def register_tool(self, tool: Tool): + self.tools[tool.name] = tool + + def reg_plug(self, name: str, description: str, parameters: Optional[List[ToolParameter]] = None): + def decorator(func: Callable): + if parameters is None: + params = self._extract_parameters_from_function(func) + else: + params = parameters + tool = Tool(name=name,description=description,parameters=params,function=func) + self.register_tool(tool) + return func + return decorator + + def register_tool_function(self, name: str, description: str, parameters: Optional[List[ToolParameter]] = None): + return self.reg_plug(name, description, parameters) + + def _extract_parameters_from_function(self, func: Callable) -> List[ToolParameter]: + sig = signature(func) + params = [] + for param_name, param in sig.parameters.items(): + if param_name == "self": + continue + param_type = "string" + if param.annotation != param.empty: + if param.annotation == int: + param_type = "integer" + elif param.annotation == float: + param_type = "number" + elif param.annotation == bool: + param_type = "boolean" + elif param.annotation == list: + param_type = "array" + elif param.annotation == dict: + param_type = "object" + + params.append(ToolParameter(name=param_name,type=param_type,description=f"Parameter {param_name}",required=param.default == param.empty)) + return params + +class PluginRegistry: + def __init__(self): + self._plugins: Dict[str, BasePlugin] = {} + self._tool_registry = ToolRegistry() + + def register_plugin(self, plugin: BasePlugin): + if not plugin.metadata.enabled: + return + + plugin.initialize() + self._plugins[plugin.metadata.name] = plugin + + for tool in plugin.get_tools(): + self._tool_registry.register(tool) + + def unregister_plugin(self, plugin_name: str): + if plugin_name in self._plugins: + plugin = self._plugins[plugin_name] + plugin.shutdown() + + for tool_name in plugin.tools.keys(): + if tool_name in self._tool_registry._tools: + del self._tool_registry._tools[tool_name] + + del self._plugins[plugin_name] + + def get_plugin(self, name: str) -> Optional[BasePlugin]: + return self._plugins.get(name) + + def get_all_plugins(self) -> List[BasePlugin]: + return list(self._plugins.values()) + + def get_tool_registry(self) -> ToolRegistry: + return self._tool_registry + + def load_plugin_from_directory(self, plugin_dir: Path): + manifest_path = plugin_dir / "manifest.json" + if not manifest_path.exists(): + raise FileNotFoundError(f"No manifest.json found in {plugin_dir}") + metadata = PluginMetadata.from_manifest(manifest_path) + + plugin_file = plugin_dir / f"{metadata.name}.py" + if not plugin_file.exists(): + plugin_file = plugin_dir / "plugin.py" + if not plugin_file.exists(): + raise FileNotFoundError(f"No {metadata.name}.py or plugin.py found in {plugin_dir}") + + spec = importlib.util.spec_from_file_location(f"comet_plugin_{metadata.name}", plugin_file) + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + + if not hasattr(module, "Plugin"): + raise AttributeError(f"Plugin class not found in {plugin_file}") + plugin_class = getattr(module, "Plugin") + plugin_instance = plugin_class(metadata) + self.register_plugin(plugin_instance) + return plugin_instance + + def load_plugins_from_directory(self, base_dir: Path): + if not base_dir.exists(): + return + + plugins_config_path = base_dir / ".CometPlugins.json" + if plugins_config_path.exists(): + with open(plugins_config_path, 'r', encoding='utf-8') as f: + config = json.load(f) + for plugin_info in config.get("plugins", []): + if plugin_info.get("enabled", True): + plugin_path = base_dir / plugin_info["path"] + if plugin_path.exists(): + try: + self.load_plugin_from_directory(plugin_path) + except Exception as e: + print(f"Failed to load plugin {plugin_info['name']}: {e}") + else: + for plugin_dir in base_dir.iterdir(): + if plugin_dir.is_dir() and (plugin_dir / "manifest.json").exists(): + try: + self.load_plugin_from_directory(plugin_dir) + except Exception as e: + print(f"Failed to load plugin from {plugin_dir}: {e}") + + def get_plugin_info(self) -> List[Dict[str, Any]]: + return [{"name": p.metadata.name,"version": p.metadata.version,"author": p.metadata.author,"description": p.metadata.description, + "type": p.metadata.plugin_type.value,"tools": [t.name for t in p.get_tools()],"enabled": p.metadata.enabled} for p in self._plugins.values()] diff --git a/CometAgentNEXT/tools.json b/CometAgentNEXT/tools.json new file mode 100644 index 0000000..554a2e6 --- /dev/null +++ b/CometAgentNEXT/tools.json @@ -0,0 +1,17 @@ +{ + "version": "1.0.0", + "description": "工具配置文件 - 定义Agent可用的工具插件", + "plugins": [ + { + "name": "CometBaseTools", + "path": "CometBaseTools", + "enabled": true, + "plugin_directory": ".CometPlugins", + "description": "内置工具插件,提供web搜索、计算器、文件读写等基础功能" + } + ], + "settings": { + "auto_load": true, + "reload_on_change": false + } +} diff --git a/env.example b/env.example new file mode 100644 index 0000000..385c236 --- /dev/null +++ b/env.example @@ -0,0 +1,3 @@ +OPENAI_API_KEY = you-key +OPENAI_BASE_URL = service-link +OPENAI_MODEL = model-name \ No newline at end of file