diff --git a/docs/docs/api/base/swarmauri_base/agents/AgentBase.md b/docs/docs/api/base/swarmauri_base/agents/AgentBase.md index c554aaf29..523ccb81b 100644 --- a/docs/docs/api/base/swarmauri_base/agents/AgentBase.md +++ b/docs/docs/api/base/swarmauri_base/agents/AgentBase.md @@ -3,3 +3,4 @@ ::: swarmauri_base.agents.AgentBase.AgentBase options.extra: show_inheritance: true + diff --git a/docs/docs/api/base/swarmauri_base/logger/LoggerBase.md b/docs/docs/api/base/swarmauri_base/logger/LoggerBase.md deleted file mode 100644 index bb1b5a0b6..000000000 --- a/docs/docs/api/base/swarmauri_base/logger/LoggerBase.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_base.logger.LoggerBase.LoggerBase` - -::: swarmauri_base.logger.LoggerBase.LoggerBase - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/base/swarmauri_base/logger_formatter/FormatterBase.md b/docs/docs/api/base/swarmauri_base/logger_formatter/FormatterBase.md deleted file mode 100644 index 8e8419830..000000000 --- a/docs/docs/api/base/swarmauri_base/logger_formatter/FormatterBase.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_base.logger_formatter.FormatterBase.FormatterBase` - -::: swarmauri_base.logger_formatter.FormatterBase.FormatterBase - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/base/swarmauri_base/logger_formatters/FormatterBase.md b/docs/docs/api/base/swarmauri_base/logger_formatters/FormatterBase.md new file mode 100644 index 000000000..71f3d013f --- /dev/null +++ b/docs/docs/api/base/swarmauri_base/logger_formatters/FormatterBase.md @@ -0,0 +1,6 @@ +# Class `swarmauri_base.logger_formatters.FormatterBase.FormatterBase` + +::: swarmauri_base.logger_formatters.FormatterBase.FormatterBase + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/base/swarmauri_base/logger_handler/HandlerBase.md b/docs/docs/api/base/swarmauri_base/logger_handler/HandlerBase.md deleted file mode 100644 index 8616f2d3c..000000000 --- a/docs/docs/api/base/swarmauri_base/logger_handler/HandlerBase.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_base.logger_handler.HandlerBase.HandlerBase` - -::: swarmauri_base.logger_handler.HandlerBase.HandlerBase - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/base/swarmauri_base/logger_handlers/HandlerBase.md b/docs/docs/api/base/swarmauri_base/logger_handlers/HandlerBase.md new file mode 100644 index 000000000..40dc9377d --- /dev/null +++ b/docs/docs/api/base/swarmauri_base/logger_handlers/HandlerBase.md @@ -0,0 +1,6 @@ +# Class `swarmauri_base.logger_handlers.HandlerBase.HandlerBase` + +::: swarmauri_base.logger_handlers.HandlerBase.HandlerBase + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/base/swarmauri_base/loggers/LoggerBase.md b/docs/docs/api/base/swarmauri_base/loggers/LoggerBase.md new file mode 100644 index 000000000..eb07fe12d --- /dev/null +++ b/docs/docs/api/base/swarmauri_base/loggers/LoggerBase.md @@ -0,0 +1,6 @@ +# Class `swarmauri_base.loggers.LoggerBase.LoggerBase` + +::: swarmauri_base.loggers.LoggerBase.LoggerBase + options.extra: + show_inheritance: true + diff --git a/docs/docs/concept/index.md b/docs/docs/api/concepts.md similarity index 100% rename from docs/docs/concept/index.md rename to docs/docs/api/concepts.md diff --git a/docs/docs/api/core/swarmauri_core/logger/ILogger.md b/docs/docs/api/core/swarmauri_core/logger/ILogger.md deleted file mode 100644 index a8b6e6e95..000000000 --- a/docs/docs/api/core/swarmauri_core/logger/ILogger.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_core.logger.ILogger.ILogger` - -::: swarmauri_core.logger.ILogger.ILogger - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/core/swarmauri_core/logger_formatter/IFormatter.md b/docs/docs/api/core/swarmauri_core/logger_formatter/IFormatter.md deleted file mode 100644 index 472fef744..000000000 --- a/docs/docs/api/core/swarmauri_core/logger_formatter/IFormatter.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_core.logger_formatter.IFormatter.IFormatter` - -::: swarmauri_core.logger_formatter.IFormatter.IFormatter - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/core/swarmauri_core/logger_formatters/IFormatter.md b/docs/docs/api/core/swarmauri_core/logger_formatters/IFormatter.md new file mode 100644 index 000000000..f263f247d --- /dev/null +++ b/docs/docs/api/core/swarmauri_core/logger_formatters/IFormatter.md @@ -0,0 +1,6 @@ +# Class `swarmauri_core.logger_formatters.IFormatter.IFormatter` + +::: swarmauri_core.logger_formatters.IFormatter.IFormatter + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/core/swarmauri_core/logger_handler/IHandler.md b/docs/docs/api/core/swarmauri_core/logger_handler/IHandler.md deleted file mode 100644 index 4f8f9a00b..000000000 --- a/docs/docs/api/core/swarmauri_core/logger_handler/IHandler.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_core.logger_handler.IHandler.IHandler` - -::: swarmauri_core.logger_handler.IHandler.IHandler - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/core/swarmauri_core/logger_handlers/IHandler.md b/docs/docs/api/core/swarmauri_core/logger_handlers/IHandler.md new file mode 100644 index 000000000..902f4af91 --- /dev/null +++ b/docs/docs/api/core/swarmauri_core/logger_handlers/IHandler.md @@ -0,0 +1,6 @@ +# Class `swarmauri_core.logger_handlers.IHandler.IHandler` + +::: swarmauri_core.logger_handlers.IHandler.IHandler + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/core/swarmauri_core/loggers/ILogger.md b/docs/docs/api/core/swarmauri_core/loggers/ILogger.md new file mode 100644 index 000000000..b3b1ba634 --- /dev/null +++ b/docs/docs/api/core/swarmauri_core/loggers/ILogger.md @@ -0,0 +1,6 @@ +# Class `swarmauri_core.loggers.ILogger.ILogger` + +::: swarmauri_core.loggers.ILogger.ILogger + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/core/swarmauri_core/tool_llms/IPredict.md b/docs/docs/api/core/swarmauri_core/tool_llms/IPredict.md deleted file mode 100644 index cfc012bcd..000000000 --- a/docs/docs/api/core/swarmauri_core/tool_llms/IPredict.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_core.tool_llms.IPredict.IPredict` - -::: swarmauri_core.tool_llms.IPredict.IPredict - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/core/swarmauri_core/tool_llms/IToolPredict.md b/docs/docs/api/core/swarmauri_core/tool_llms/IToolPredict.md new file mode 100644 index 000000000..f5892075e --- /dev/null +++ b/docs/docs/api/core/swarmauri_core/tool_llms/IToolPredict.md @@ -0,0 +1,6 @@ +# Class `swarmauri_core.tool_llms.IToolPredict.IToolPredict` + +::: swarmauri_core.tool_llms.IToolPredict.IToolPredict + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/first_class/swarmauri_distance_minkowski/MinkowskiDistance.md b/docs/docs/api/first_class/swarmauri_distance_minkowski/MinkowskiDistance.md deleted file mode 100644 index da03ee80e..000000000 --- a/docs/docs/api/first_class/swarmauri_distance_minkowski/MinkowskiDistance.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_distance_minkowski.MinkowskiDistance.MinkowskiDistance` - -::: swarmauri_distance_minkowski.MinkowskiDistance.MinkowskiDistance - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/second_class/swarmauri_embedding_mlm/MlmEmbedding.md b/docs/docs/api/second_class/swarmauri_embedding_mlm/MlmEmbedding.md deleted file mode 100644 index 3f888823d..000000000 --- a/docs/docs/api/second_class/swarmauri_embedding_mlm/MlmEmbedding.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_embedding_mlm.MlmEmbedding.MlmEmbedding` - -::: swarmauri_embedding_mlm.MlmEmbedding.MlmEmbedding - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/standard/swarmauri_standard/loggers/Logger.md b/docs/docs/api/standard/swarmauri_standard/loggers/Logger.md new file mode 100644 index 000000000..0773cab34 --- /dev/null +++ b/docs/docs/api/standard/swarmauri_standard/loggers/Logger.md @@ -0,0 +1,6 @@ +# Class `swarmauri_standard.loggers.Logger.Logger` + +::: swarmauri_standard.loggers.Logger.Logger + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/standard/swarmauri_standard/logging/Logger.md b/docs/docs/api/standard/swarmauri_standard/logging/Logger.md deleted file mode 100644 index 5a7e963c7..000000000 --- a/docs/docs/api/standard/swarmauri_standard/logging/Logger.md +++ /dev/null @@ -1,6 +0,0 @@ -# Class `swarmauri_standard.logging.Logger.Logger` - -::: swarmauri_standard.logging.Logger.Logger - options.extra: - show_inheritance: true - diff --git a/docs/docs/api/standard/swarmauri_standard/tool_llms/DeepInfraToolModel.md b/docs/docs/api/standard/swarmauri_standard/tool_llms/DeepInfraToolModel.md new file mode 100644 index 000000000..fdac5c28d --- /dev/null +++ b/docs/docs/api/standard/swarmauri_standard/tool_llms/DeepInfraToolModel.md @@ -0,0 +1,6 @@ +# Class `swarmauri_standard.tool_llms.DeepInfraToolModel.DeepInfraToolModel` + +::: swarmauri_standard.tool_llms.DeepInfraToolModel.DeepInfraToolModel + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/standard/swarmauri_standard/tool_llms/ToolLLM.md b/docs/docs/api/standard/swarmauri_standard/tool_llms/ToolLLM.md new file mode 100644 index 000000000..883d94abf --- /dev/null +++ b/docs/docs/api/standard/swarmauri_standard/tool_llms/ToolLLM.md @@ -0,0 +1,6 @@ +# Class `swarmauri_standard.tool_llms.ToolLLM.ToolLLM` + +::: swarmauri_standard.tool_llms.ToolLLM.ToolLLM + options.extra: + show_inheritance: true + diff --git a/docs/docs/api/index.md b/docs/docs/guide/courses.md similarity index 100% rename from docs/docs/api/index.md rename to docs/docs/guide/courses.md diff --git a/docs/docs/guide/faq.md b/docs/docs/guide/faq.md index 158578255..1892e56fb 100644 --- a/docs/docs/guide/faq.md +++ b/docs/docs/guide/faq.md @@ -17,11 +17,10 @@ pip install swarmauri ``` !!! info "Installation Options" - For specific versions or development builds: - ```bash - # Install specific version - pip install swarmauri==1.2.3 - +For specific versions or development builds: +```bash # Install specific version +pip install swarmauri==1.2.3 + # Install from GitHub (latest development version) pip install git+https://github.com/swarmauri/swarmauri-sdk.git ``` @@ -49,11 +48,7 @@ If you encounter `ModuleNotFoundError`: pip list | grep swarmauri ``` -!!! warning "Common Import Error Causes" - - Package not installed correctly - - Virtual environment not activated - - Package installed in a different Python environment - - Package name misspelled in the import statement +!!! warning "Common Import Error Causes" - Package not installed correctly - Virtual environment not activated - Package installed in a different Python environment - Package name misspelled in the import statement ### Version Conflicts @@ -69,12 +64,12 @@ pip install swarmauri ``` !!! tip "Dependency Isolation" - Consider using tools like Poetry or pipenv for better dependency management. These tools create lockfiles that ensure consistent environments across different machines. - +Consider using tools like Poetry or pipenv for better dependency management. These tools create lockfiles that ensure consistent environments across different machines. + ```bash # Using Poetry poetry add swarmauri - + # Using pipenv pipenv install swarmauri ``` @@ -94,8 +89,8 @@ sudo apt install build-essential # For Ubuntu/Debian ``` ??? danger "Installation Troubleshooting" - If you encounter persistent installation issues: - +If you encounter persistent installation issues: + 1. Make sure your pip is up to date: `pip install --upgrade pip` 2. Check if you have sufficient privileges (try using `sudo` on Linux/Mac or run as administrator on Windows) 3. If you're behind a corporate firewall, you may need to configure pip to use an HTTPS proxy @@ -140,13 +135,13 @@ print(response) ``` !!! info "Available Tools" - Swarmauri SDK offers a variety of built-in tools including: +Swarmauri SDK offers a variety of built-in tools including: - `CalculatorTool`: For mathematical operations - `RequestsTool`: For HTTP requests - `CodeExtractorTool`: For code extraction - `CodeInterpreterTool`: For python code execution - + You can also create custom tools by extending the `ToolBase` class. ### How do I create an AI assistant? @@ -174,6 +169,7 @@ print(response) response = agent.exec("Can you explain it in simpler terms?") print(response) ``` + ### How do I handle large datasets? - **Chunking**: Break down large datasets into smaller chunks for processing. @@ -232,3 +228,6 @@ If you need further assistance: 1. Check our API Documentation 2. Visit our [GitHub Issues](https://github.com/swarmauri/swarmauri-sdk/issues) 3. Join our [Discord Community](https://discord.gg/swarmauri) + +!!! info "Community Support" + Our community is active and ready to help with any questions you might have. Don't hesitate to reach out if you're facing challenges with Swarmauri SDK. diff --git a/docs/docs/guide/index.md b/docs/docs/guide/index.md index b5cffb556..31284ca15 100644 --- a/docs/docs/guide/index.md +++ b/docs/docs/guide/index.md @@ -9,7 +9,7 @@ Welcome to Swarmauri guides! This section contains practical tutorials and how-t - **[Installation Guide](installation.md)** - Step-by-step instructions for setting up Swarmauri SDK - **[Usage Guide](usage.md)** - Learn how to use core Swarmauri features -- **[Courses](course/1.md)** - Structured learning paths for different skill levels +- **[Courses](courses.md)** - Structured learning paths for different skill levels - **[FAQ](faq.md)** - Answers to frequently asked questions ## Learning Paths @@ -21,15 +21,15 @@ Choose the path that fits your needs: ### 1. Beginners -Start with the **[Installation Guide](installation.md)** followed by the **[Entry Course](course/1.md)**. This path introduces core concepts without assuming prior AI development experience. +Start with the **[Installation Guide](installation.md)** followed by the **[Entry Course](courses.md)**. This path introduces core concepts without assuming prior AI development experience. ### 2. AI/ML Practitioners -Begin with **[Installation Guide](installation.md)** and jump straight to the **[Generative AI Course](course/2.md)**. This path focuses on using Swarmauri with existing AI/ML knowledge. +Begin with **[Installation Guide](installation.md)** and jump straight to the **[Generative AI Course](courses.md)**. This path focuses on using Swarmauri with existing AI/ML knowledge. ### 3. Tool Developers -After installation, explore the **[Tool Development Course](course/3.md)**. This path is ideal if you want to extend Swarmauri with custom components. +After installation, explore the **[Tool Development Course](courses.md)**. This path is ideal if you want to extend Swarmauri with custom components. !!! warning "Prerequisites" The Tool Development path assumes familiarity with Python development practices, including package management, testing, and object-oriented programming concepts. @@ -52,7 +52,7 @@ If this is your first time using Swarmauri, we recommend: 1. Install the SDK using the [Installation Guide](installation.md) 2. Follow the basic examples in the [Usage Guide](usage.md) -3. Explore more advanced concepts in our [Courses](course/1.md) +3. Explore more advanced concepts in our [Courses](courses.md) !!! note "Community Support" Need help? Visit our [community resources](../home/help.md) or check the [FAQ](faq.md). Our active community is ready to assist with any questions you might have. diff --git a/docs/docs/home/installation.md b/docs/docs/home/installation.md index 560d063f2..a4600e219 100644 --- a/docs/docs/home/installation.md +++ b/docs/docs/home/installation.md @@ -132,19 +132,19 @@ print(swarmauri.__version__) After installation: -1. Check out our [Quickstart Guide](../guide/quickstart.md) -2. Review [API Documentation](../api/index.md) -3. Try our [Examples](../examples/index.md) -4. Join our [Community](../community/index.md) +1. Check out our [Quickstart Guide](../guide/usage.md) +2. Review [API Documentation](../api/concepts.md) +3. Try our [Courses](../guide/courses.md) +4. Join our [Community](https://discord.gg/swarmauri) !!! tip "Getting Started" - The [Quickstart Guide](../guide/quickstart.md) is the best place to begin your journey with Swarmauri SDK. + The [Quickstart Guide](../guide/usage.md) is the best place to begin your journey with Swarmauri SDK. ## Getting Help If you run into issues: -1. Check our [FAQ](../faq.md) +1. Check our [FAQ](../guide/faq.md) 2. Visit our [GitHub Issues](https://github.com/swarmauri/swarmauri-sdk/issues) 3. Join our [Discord Community](https://discord.gg/swarmauri) diff --git a/docs/docs/home/why_use_swarmauri.md b/docs/docs/home/why_use_swarmauri.md index a4351818c..833b1b279 100644 --- a/docs/docs/home/why_use_swarmauri.md +++ b/docs/docs/home/why_use_swarmauri.md @@ -74,8 +74,8 @@ Build AI agents that can use tools to accomplish tasks: - **Jupyter notebook tools** for data science workflows - **Custom tools** that you can easily build and integrate -!!! tip "Custom Tools" -Creating your own tools is straightforward with Swarmauri. Extend the `ToolBase` class, register it with `@ComponentBase.register_type`, and implement the `__call__` method. See the [Custom Components](../guide/usage.md#creating-custom-components) section for examples. +???+ tip "Custom Tools" + Creating your own tools is straightforward with Swarmauri. Extend the `ToolBase` class, register it with `@ComponentBase.register_type`, and implement the `__call__` method. See the [Custom Components](../guide/usage.md#creating-custom-components) section for examples. ### Data Processing @@ -357,8 +357,8 @@ Discover how easy it is to build with Swarmauri: - [Installation Guide](installation.md) - Get set up with Swarmauri SDK - [Quick Start Tutorial](../guide/usage.md) - Build your first AI application -- [Examples Gallery](../examples/index.md) - Explore example projects -- [API Reference](../api/index.md) - Dive into detailed documentation +- [ Courses ](../guide/courses.md) - Explore our Courses +- [API Reference](../api/concepts.md) - Dive into detailed documentation Or jump right in with a simple example: diff --git a/docs/docs/index.md b/docs/docs/index.md index 45cdd6c53..9b78152b0 100644 --- a/docs/docs/index.md +++ b/docs/docs/index.md @@ -288,11 +288,11 @@ Need help? Have a feature suggestion? Join the Swarmauri community: Here are some recommended next steps to continue your journey with Swarmauri SDK: -1. **Explore the Tutorials**: Check out our [step-by-step tutorials](../tutorials/index.md) -2. **Read the API Documentation**: Dive into our [detailed API reference](../api/index.md) +1. **Explore the Courses**: Check out our [step-by-step courses](./guide/courses.md) +2. **Read the API Documentation**: Dive into our [detailed API reference](./api/concepts.md) 3. **Join the Community**: Connect with other developers in our [Discord server](https://discord.gg/swarmauri) 4. **Contribute**: Learn how to [contribute to the project](./home/contribute.md) -5. **Stay Updated**: Follow our [blog](../blog/index.md) for the latest updates +5. **Stay Updated**: Follow our [blog](./blog/index.md) for the latest updates --- diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 0e3f315e2..1a24a1560 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -112,24 +112,10 @@ nav: - guide/index.md - Installation: guide/installation.md - Usage: guide/usage.md - - Courses: - - guide/index.md - - Entry: guide/course/1.md - - Generative AI: guide/course/2.md - - Tool: guide/course/3.md + - Courses: guide/courses.md - FAQ: guide/faq.md -- Concept: - - concept/index.md - - Core: concept/core.md - - Base: concept/base.md - - Standard: concept/standard.md - - Swarmauri: concept/swarmauri.md - API Documentation: - - api/index.md - - Third Class: - - api/third_class/index.md - - Module 1: api/third_class/module1.md - - Module 2: api/third_class/module2.md + - api/concepts.md - Standard: - Home: api/standard/index.md - Agents: @@ -217,8 +203,8 @@ nav: - PerplexityModel: api/standard/swarmauri_standard/llms/PerplexityModel.md - PlayHTModel: api/standard/swarmauri_standard/llms/PlayHTModel.md - WhisperLargeModel: api/standard/swarmauri_standard/llms/WhisperLargeModel.md - - Logging: - - Logger: api/standard/swarmauri_standard/logging/Logger.md + - Loggers: + - Logger: api/standard/swarmauri_standard/loggers/Logger.md - Measurements: - CompletenessMeasurement: api/standard/swarmauri_standard/measurements/CompletenessMeasurement.md - DistinctivenessMeasurement: api/standard/swarmauri_standard/measurements/DistinctivenessMeasurement.md @@ -280,10 +266,12 @@ nav: - Tool Llms: - AnthropicToolModel: api/standard/swarmauri_standard/tool_llms/AnthropicToolModel.md - CohereToolModel: api/standard/swarmauri_standard/tool_llms/CohereToolModel.md + - DeepInfraToolModel: api/standard/swarmauri_standard/tool_llms/DeepInfraToolModel.md - GeminiToolModel: api/standard/swarmauri_standard/tool_llms/GeminiToolModel.md - GroqToolModel: api/standard/swarmauri_standard/tool_llms/GroqToolModel.md - MistralToolModel: api/standard/swarmauri_standard/tool_llms/MistralToolModel.md - OpenAIToolModel: api/standard/swarmauri_standard/tool_llms/OpenAIToolModel.md + - ToolLLM: api/standard/swarmauri_standard/tool_llms/ToolLLM.md - Toolkits: - AccessibilityToolkit: api/standard/swarmauri_standard/toolkits/AccessibilityToolkit.md - Toolkit: api/standard/swarmauri_standard/toolkits/Toolkit.md @@ -369,14 +357,14 @@ nav: - ImageGenBase: api/base/swarmauri_base/image_gens/ImageGenBase.md - Llms: - LLMBase: api/base/swarmauri_base/llms/LLMBase.md - - Logger: - - LoggerBase: api/base/swarmauri_base/logger/LoggerBase.md - - Logger Formatter: - - FormatterBase: api/base/swarmauri_base/logger_formatter/FormatterBase.md - - Logger Handler: - - HandlerBase: api/base/swarmauri_base/logger_handler/HandlerBase.md + - Logger Formatters: + - FormatterBase: api/base/swarmauri_base/logger_formatters/FormatterBase.md + - Logger Handlers: + - HandlerBase: api/base/swarmauri_base/logger_handlers/HandlerBase.md - Loggermixin: - LoggerMixin: api/base/swarmauri_base/LoggerMixin.md + - Loggers: + - LoggerBase: api/base/swarmauri_base/loggers/LoggerBase.md - Measurements: - MeasurementAggregateMixin: api/base/swarmauri_base/measurements/MeasurementAggregateMixin.md - MeasurementBase: api/base/swarmauri_base/measurements/MeasurementBase.md @@ -494,12 +482,12 @@ nav: - Llms: - IFit: api/core/swarmauri_core/llms/IFit.md - IPredict: api/core/swarmauri_core/llms/IPredict.md - - Logger: - - ILogger: api/core/swarmauri_core/logger/ILogger.md - - Logger Formatter: - - IFormatter: api/core/swarmauri_core/logger_formatter/IFormatter.md - - Logger Handler: - - IHandler: api/core/swarmauri_core/logger_handler/IHandler.md + - Logger Formatters: + - IFormatter: api/core/swarmauri_core/logger_formatters/IFormatter.md + - Logger Handlers: + - IHandler: api/core/swarmauri_core/logger_handlers/IHandler.md + - Loggers: + - ILogger: api/core/swarmauri_core/loggers/ILogger.md - Measurements: - IMeasurement: api/core/swarmauri_core/measurements/IMeasurement.md - IMeasurementAggregate: api/core/swarmauri_core/measurements/IMeasurementAggregate.md @@ -534,7 +522,7 @@ nav: - Task Mgmt Strategies: - ITaskMgmtStrategy: api/core/swarmauri_core/task_mgmt_strategies/ITaskMgmtStrategy.md - Tool Llms: - - IPredict: api/core/swarmauri_core/tool_llms/IPredict.md + - IToolPredict: api/core/swarmauri_core/tool_llms/IToolPredict.md - Toolkits: - IToolkit: api/core/swarmauri_core/toolkits/IToolkit.md - Tools: @@ -566,15 +554,9 @@ nav: - IPredictVision: api/core/swarmauri_core/vlms/IPredictVision.md - First_Class: - Home: api/first_class/index.md - - MinkowskiDistance: api/first_class/swarmauri_distance_minkowski/MinkowskiDistance.md - Second_Class: - Home: api/second_class/index.md - - Mlmembedding: - - MlmEmbedding: api/second_class/swarmauri_embedding_mlm/MlmEmbedding.md - - Home: index.md -- Blog: - - blog/index.md - - News: blog/news.md - - Events: blog/events.md - - Releases: blog/releases.md + - Third Class: + - Home: api/third_class/index.md +- Blog: blog/index.md - Swarmauri People: people/index.md diff --git a/docs/scripts/generate_content.py b/docs/scripts/generate_content.py index 310d6329b..5a9a757cb 100644 --- a/docs/scripts/generate_content.py +++ b/docs/scripts/generate_content.py @@ -316,9 +316,9 @@ def generate( if __name__ == "__main__": # Example usage generate( - package_name="swarmauri_base", + package_name="swarmauri_standard", docs_dir="docs/docs", api_output_dir="api", mkdocs_yml_path="docs/mkdocs.yml", - top_label="Base", + top_label="Standard", ) diff --git a/pkgs/base/swarmauri_base/DynamicBase.py b/pkgs/base/swarmauri_base/DynamicBase.py index cdf92c542..1268a2aec 100644 --- a/pkgs/base/swarmauri_base/DynamicBase.py +++ b/pkgs/base/swarmauri_base/DynamicBase.py @@ -14,6 +14,7 @@ from typing import ( Annotated, Any, + Callable, ClassVar, Dict, List, @@ -548,7 +549,7 @@ def _recreate_models(cls): ############################################################### @classmethod - def register_model(cls): + def register_model(cls) -> Callable[[Type[BaseModel]], Type[BaseModel]]: """ Decorator to register a base model in the unified registry. @@ -576,7 +577,7 @@ def register_type( cls, resource_type: Optional[Union[Type[T], List[Type[T]]]] = None, type_name: Optional[str] = None, - ): + ) -> Callable[[Type["DynamicBase"]], Type["DynamicBase"]]: """ Decorator to register a subtype under one or more base models in the unified registry. diff --git a/pkgs/base/swarmauri_base/parsers/ParserBase.py b/pkgs/base/swarmauri_base/parsers/ParserBase.py index 253443eed..288f2af17 100644 --- a/pkgs/base/swarmauri_base/parsers/ParserBase.py +++ b/pkgs/base/swarmauri_base/parsers/ParserBase.py @@ -1,10 +1,14 @@ from abc import abstractmethod -from typing import Optional, Union, List, Any, Literal -from pydantic import Field -from swarmauri_base.ComponentBase import ComponentBase, ResourceTypes +from typing import Any, List, Literal, Optional, TypeVar, Union + +from pydantic import ConfigDict, Field from swarmauri_core.documents.IDocument import IDocument from swarmauri_core.parsers.IParser import IParser +from swarmauri_base.ComponentBase import ComponentBase, ResourceTypes + +T = TypeVar("T", bound=IDocument) + @ComponentBase.register_model() class ParserBase(IParser, ComponentBase): @@ -16,11 +20,12 @@ class ParserBase(IParser, ComponentBase): chunking algorithms. """ - resource: Optional[str] = Field(default=ResourceTypes.PARSER.value) + resource: Optional[str] = Field(default=ResourceTypes.PARSER.value, frozen=True) type: Literal["ParserBase"] = "ParserBase" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @abstractmethod - def parse(self, data: Union[str, Any]) -> List[IDocument]: + def parse(self, data: Union[str, Any]) -> List[T]: """ Public method to parse input data (either a str or a Message) into a list of Document instances. diff --git a/pkgs/base/swarmauri_base/tool_llms/ToolLLMBase.py b/pkgs/base/swarmauri_base/tool_llms/ToolLLMBase.py index fb036fe53..342c96b96 100644 --- a/pkgs/base/swarmauri_base/tool_llms/ToolLLMBase.py +++ b/pkgs/base/swarmauri_base/tool_llms/ToolLLMBase.py @@ -1,5 +1,5 @@ -from abc import abstractmethod import json +from abc import abstractmethod from typing import Any, Dict, List, Literal, Optional, Type from pydantic import ConfigDict, Field, PrivateAttr, SecretStr, model_validator @@ -12,7 +12,6 @@ @ComponentBase.register_model() class ToolLLMBase(IToolPredict, ComponentBase): - allowed_models: List[str] = [] resource: Optional[str] = Field(default=ResourceTypes.TOOL_LLM.value, frozen=True) model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) type: Literal["ToolLLMBase"] = "ToolLLMBase" @@ -61,7 +60,7 @@ def get_schema_converter(self) -> Type["SchemaConverterBase"]: "get_schema_converter() not implemented in subclass yet." ) - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools(self, tools: Dict[str, Any]) -> List[Dict[str, Any]]: converter = self.get_schema_converter() return [converter.convert(tools[tool]) for tool in tools] @@ -78,7 +77,7 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase tool_calls (list): A list of dictionaries representing tool calls. Each dictionary should contain a "function" key with a nested dictionary that includes the "name" and "arguments" of the function to be called, and an "id" key for the tool call identifier. - toolkit (object): An object that provides access to tools via the `get_tool_by_name` method. + toolkit (ToolkitBase): An object that provides access to tools via the `get_tool_by_name` method. messages (list): A list of message dictionaries to which the results of the tool calls will be appended. Returns: diff --git a/pkgs/core/swarmauri_core/README.md b/pkgs/core/swarmauri_core/README.md index d6c1393b3..173458c16 100644 --- a/pkgs/core/swarmauri_core/README.md +++ b/pkgs/core/swarmauri_core/README.md @@ -14,11 +14,11 @@ The Core Library provides the foundational interfaces and abstract base classes ## Getting Started -To start developing with the Core Library, include it as a module in your Python project. Ensure you have Python 3.6 or later installed. +To start developing with the Core Library, include it as a module in your Python project. Ensure you have Python 3.10 or later installed. ```python # Example of using an abstract model interface from the Core Library -from swarmauri.core.models.IModel import IModel +from swarmauri_core.models.IModel import IModel class MyModel(IModel): # Implement the abstract methods here diff --git a/pkgs/core/swarmauri_core/chains/IChain.py b/pkgs/core/swarmauri_core/chains/IChain.py index c96d87972..eef1d3a7e 100644 --- a/pkgs/core/swarmauri_core/chains/IChain.py +++ b/pkgs/core/swarmauri_core/chains/IChain.py @@ -26,7 +26,7 @@ class IChain(ABC): """ @abstractmethod - def add_step(self, step: IChainStep, **kwargs) -> None: + def add_step(self, step: IChainStep, **kwargs: dict[str, Any]) -> None: """ Adds a new step to the chain. Steps are executed in the order they are added. Each step is represented by a Callable, which can be a function or method, with @@ -51,7 +51,7 @@ def remove_step(self, step: IChainStep) -> None: pass @abstractmethod - def execute(self, *args, **kwargs) -> Dict[str, Any]: + def execute(self, *args: tuple[Any, ...], **kwargs: dict[str, Any]) -> Dict[str, Any]: """ Initiates the execution of the chain. This involves invoking each step in the order they have been added to the chain, passing control from one step to the next, and optionally diff --git a/pkgs/core/swarmauri_core/chains/IChainFactory.py b/pkgs/core/swarmauri_core/chains/IChainFactory.py index 09ff10a90..00ce78b65 100644 --- a/pkgs/core/swarmauri_core/chains/IChainFactory.py +++ b/pkgs/core/swarmauri_core/chains/IChainFactory.py @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod -from typing import List, Any, Dict +from typing import Any, Dict, List, Optional + from swarmauri_core.chains.IChain import IChain from swarmauri_core.chains.IChainStep import IChainStep @@ -10,7 +11,7 @@ class IChainFactory(ABC): """ @abstractmethod - def create_chain(self, steps: List[IChainStep] = None) -> IChain: + def create_chain(self, steps: Optional[List[IChainStep]] = None) -> IChain: pass @abstractmethod @@ -18,11 +19,11 @@ def get_chain(self) -> IChain: pass @abstractmethod - def set_chain(self, chain: IChain): + def set_chain(self, chain: IChain) -> None: pass @abstractmethod - def reset_chain(self): + def reset_chain(self) -> None: pass @abstractmethod @@ -30,15 +31,15 @@ def get_chain_steps(self) -> List[IChainStep]: pass @abstractmethod - def set_chain_steps(self, steps: List[IChainStep]): + def set_chain_steps(self, steps: List[IChainStep]) -> None: pass @abstractmethod - def add_chain_step(self, step: IChainStep): + def add_chain_step(self, step: IChainStep) -> None: pass @abstractmethod - def remove_chain_step(self, key: str): + def remove_chain_step(self, key: str) -> None: pass @abstractmethod @@ -46,7 +47,7 @@ def get_configs(self) -> Dict[str, Any]: pass @abstractmethod - def set_configs(self, **configs): + def set_configs(self, **configs) -> None: pass @abstractmethod @@ -54,5 +55,5 @@ def get_config(self, key: str) -> Any: pass @abstractmethod - def set_config(self, key: str, value: Any): + def set_config(self, key: str, value: Any) -> None: pass diff --git a/pkgs/core/swarmauri_core/measurements/IMeasurement.py b/pkgs/core/swarmauri_core/measurements/IMeasurement.py index 489972f2f..694b48bc7 100644 --- a/pkgs/core/swarmauri_core/measurements/IMeasurement.py +++ b/pkgs/core/swarmauri_core/measurements/IMeasurement.py @@ -10,7 +10,7 @@ class IMeasurement(ABC): """ @abstractmethod - def __call__(self, **kwargs) -> Any: + def __call__(self, **kwargs: dict[str, Any]) -> Any: """ Retrieves the current value of the measurement. diff --git a/pkgs/core/swarmauri_core/measurements/IMeasurementCalculate.py b/pkgs/core/swarmauri_core/measurements/IMeasurementCalculate.py index 31a38299b..78ba9613a 100644 --- a/pkgs/core/swarmauri_core/measurements/IMeasurementCalculate.py +++ b/pkgs/core/swarmauri_core/measurements/IMeasurementCalculate.py @@ -4,24 +4,24 @@ class IMeasurementCalculate(ABC): @abstractmethod - def calculate(self, **kwargs) -> Any: + def calculate(self, *args: tuple[Any, ...], **kwargs: dict[str, Any]) -> Any: """ Calculate the measurement based on the provided data. Args: - *args: Variable length argument list that the measurement calculation might require. - **kwargs: Arbitrary keyword arguments that the measurement calculation might require. + *args tuple[Any, ...]: Variable length argument list that the measurement calculation might require. + **kwargs dict[str, Any]: Arbitrary keyword arguments that the measurement calculation might require. """ pass @abstractmethod - def update(self, value) -> None: + def update(self, value: float) -> None: """ Update the measurement value based on new information. Args: - value: The new information used to update the measurement. This could be a new - measurement or data point that affects the measurement's current value. + value float: The new information used to update the measurement. This could be a new measurement or data\ + point that affects the measurement's current value. Note: This method is intended for internal use and should not be publicly accessible. diff --git a/pkgs/core/swarmauri_core/ocrs/IPredict.py b/pkgs/core/swarmauri_core/ocrs/IPredict.py index 4180a4025..0b2df3561 100644 --- a/pkgs/core/swarmauri_core/ocrs/IPredict.py +++ b/pkgs/core/swarmauri_core/ocrs/IPredict.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod +from typing import Any, AsyncIterator, Iterator class IPredict(ABC): @@ -7,42 +8,42 @@ class IPredict(ABC): """ @abstractmethod - def predict(self, *args, **kwargs) -> any: + def predict(self, *args, **kwargs) -> Any: """ Generate predictions based on the input data provided to the model. """ pass @abstractmethod - async def apredict(self, *args, **kwargs) -> any: + async def apredict(self, *args, **kwargs) -> Any: """ Generate predictions based on the input data provided to the model. """ pass @abstractmethod - def stream(self, *args, **kwargs) -> any: + def stream(self, *args, **kwargs) -> Iterator[Any]: """ Generate predictions based on the input data provided to the model. """ pass @abstractmethod - async def astream(self, *args, **kwargs) -> any: + async def astream(self, *args, **kwargs) -> AsyncIterator[Any]: """ Generate predictions based on the input data provided to the model. """ pass @abstractmethod - def batch(self, *args, **kwargs) -> any: + def batch(self, *args, **kwargs) -> list[Any]: """ Generate predictions based on the input data provided to the model. """ pass @abstractmethod - async def abatch(self, *args, **kwargs) -> any: + async def abatch(self, *args, **kwargs) -> list[Any]: """ Generate predictions based on the input data provided to the model. """ diff --git a/pkgs/core/swarmauri_core/prompt_templates/IPromptTemplate.py b/pkgs/core/swarmauri_core/prompt_templates/IPromptTemplate.py index 6593fa2f9..42ff1eaba 100644 --- a/pkgs/core/swarmauri_core/prompt_templates/IPromptTemplate.py +++ b/pkgs/core/swarmauri_core/prompt_templates/IPromptTemplate.py @@ -9,7 +9,7 @@ class IPromptTemplate(ABC): """ @abstractmethod - def __call__(self, **kwargs) -> str: + def __call__(self, **kwargs: Dict[str, Any]) -> str: """ Abstract method that subclasses must implement to define the behavior of the prompt template when called. @@ -41,12 +41,12 @@ def set_variables( pass @abstractmethod - def generate_prompt(self, **kwargs) -> str: + def generate_prompt(self, **kwargs: Dict[str, Any]) -> str: """ Generates a prompt string based on the current template and provided keyword arguments. Args: - **kwargs: Keyword arguments containing variables for template substitution. + **kwargs Dict[str, Any]: Keyword arguments containing variables for template substitution. Returns: str: The generated prompt string with template variables replaced by their diff --git a/pkgs/core/swarmauri_core/prompts/IPrompt.py b/pkgs/core/swarmauri_core/prompts/IPrompt.py index 956065a54..52beada2e 100644 --- a/pkgs/core/swarmauri_core/prompts/IPrompt.py +++ b/pkgs/core/swarmauri_core/prompts/IPrompt.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod +from typing import Any class IPrompt(ABC): @@ -10,7 +11,7 @@ class IPrompt(ABC): """ @abstractmethod - def __call__(self, **kwargs) -> str: + def __call__(self, **kwargs: dict[str, Any]) -> str: """ Abstract method that subclasses must implement to define the behavior of the prompt when called. diff --git a/pkgs/core/swarmauri_core/prompts/ITemplate.py b/pkgs/core/swarmauri_core/prompts/ITemplate.py index 9e2ef6687..07ad518c8 100644 --- a/pkgs/core/swarmauri_core/prompts/ITemplate.py +++ b/pkgs/core/swarmauri_core/prompts/ITemplate.py @@ -33,12 +33,12 @@ def set_variables( pass @abstractmethod - def generate_prompt(self, **kwargs) -> str: + def generate_prompt(self, **kwargs: dict[str, Any]) -> str: """ Generates a prompt string based on the current template and provided keyword arguments. Args: - **kwargs: Keyword arguments containing variables for template substitution. + **kwargs dict[str, Any]: Keyword arguments containing variables for template substitution. Returns: str: The generated prompt string with template variables replaced by their diff --git a/pkgs/core/swarmauri_core/toolkits/IToolkit.py b/pkgs/core/swarmauri_core/toolkits/IToolkit.py index c9a1aa03e..ca4b6538c 100644 --- a/pkgs/core/swarmauri_core/toolkits/IToolkit.py +++ b/pkgs/core/swarmauri_core/toolkits/IToolkit.py @@ -1,5 +1,6 @@ -from typing import Dict from abc import ABC, abstractmethod +from typing import Dict, Optional + from swarmauri_core.tools.ITool import ITool @@ -10,28 +11,28 @@ class IToolkit(ABC): """ @abstractmethod - def add_tools(self, tools: Dict[str, ITool]): + def add_tools(self, tools: Dict[str, ITool]) -> None: """ An abstract method that should be implemented by subclasses to add multiple tools to the toolkit. """ pass @abstractmethod - def add_tool(self, tool: ITool): + def add_tool(self, tool: ITool) -> None: """ An abstract method that should be implemented by subclasses to add a single tool to the toolkit. """ pass @abstractmethod - def remove_tool(self, tool_name: str): + def remove_tool(self, tool_name: str) -> None: """ An abstract method that should be implemented by subclasses to remove a tool from the toolkit by name. """ pass @abstractmethod - def get_tool_by_name(self, tool_name: str) -> ITool: + def get_tool_by_name(self, tool_name: str) -> Optional[ITool]: """ An abstract method that should be implemented by subclasses to retrieve a tool from the toolkit by name. """ diff --git a/pkgs/swarmauri_standard/swarmauri_standard/chunkers/SentenceChunker.py b/pkgs/swarmauri_standard/swarmauri_standard/chunkers/SentenceChunker.py index b05ce5797..61fd31edd 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/chunkers/SentenceChunker.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/chunkers/SentenceChunker.py @@ -1,4 +1,4 @@ -from typing import Literal +from typing import Any, List, Literal import re from swarmauri_base.chunkers.ChunkerBase import ChunkerBase from swarmauri_base.ComponentBase import ComponentBase @@ -14,7 +14,7 @@ class SentenceChunker(ChunkerBase): type: Literal["SentenceChunker"] = "SentenceChunker" - def chunk_text(self, text, *args, **kwargs): + def chunk_text(self, text: str, *args: Any, **kwargs: Any) -> List[str]: """ Chunks the given text into sentences using basic punctuation. diff --git a/pkgs/swarmauri_standard/swarmauri_standard/embeddings/CohereEmbedding.py b/pkgs/swarmauri_standard/swarmauri_standard/embeddings/CohereEmbedding.py index 238abba88..50c990186 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/embeddings/CohereEmbedding.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/embeddings/CohereEmbedding.py @@ -1,4 +1,4 @@ -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union import httpx from pydantic import PrivateAttr, SecretStr @@ -65,17 +65,12 @@ class CohereEmbedding(EmbeddingBase): def __init__( self, - **kwargs, + **kwargs: Dict[str, Any], ): """ Initialize the CohereEmbedding instance. Args: - api_key (str, optional): The API key for accessing the Cohere API. - model (str, optional): The Cohere embedding model to use. - task_type (str, optional): The type of task for which embeddings are generated. - embedding_types (str, optional): The type of embedding to generate. - truncate (str, optional): The truncation strategy to use. **kwargs: Additional keyword arguments. Raises: diff --git a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/BlackForestImgGenModel.py b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/BlackForestImgGenModel.py index 7befcfb34..e337653fd 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/BlackForestImgGenModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/BlackForestImgGenModel.py @@ -1,7 +1,7 @@ import asyncio import contextlib import time -from typing import Dict, List, Literal, Optional +from typing import Any, Dict, List, Literal, Optional import httpx from pydantic import PrivateAttr, SecretStr @@ -30,9 +30,11 @@ class BlackForestImgGenModel(ImageGenBase): name: str = "" # Default model type: Literal["BlackForestImgGenModel"] = "BlackForestImgGenModel" - def __init__(self, **kwargs): + def __init__(self, **kwargs: Dict[str, Any]): """ Initializes the BlackForestImgGenModel instance with HTTP clients. + Args: + **kwarg (Dict[str, Any]): Additional arguments including api_key and allowed_models. """ super().__init__(**kwargs) self._headers = { @@ -163,7 +165,7 @@ def generate_image( raise TimeoutError(f"Image generation timed out after {max_wait_time} seconds") - async def agenerate_image(self, prompt: str, **kwargs) -> Dict: + async def agenerate_image(self, prompt: str, **kwargs: Dict[str, Any]) -> Dict: """ Asynchronously generates an image based on the prompt and waits for the result. @@ -221,13 +223,13 @@ async def agenerate_image(self, prompt: str, **kwargs) -> Dict: finally: await self._close_async_client() - def batch_generate(self, prompts: List[str], **kwargs) -> List[Dict]: + def batch_generate(self, prompts: List[str], **kwargs: Dict[str, Any]) -> List[Dict]: """ Generates images for a batch of prompts synchronously. Args: prompts (List[str]): List of text prompts - **kwargs: Additional arguments passed to generate_image + **kwargs (Dict[str, Any]): Additional arguments passed to generate_image Returns: List[Dict]: List of result dictionaries @@ -235,7 +237,7 @@ def batch_generate(self, prompts: List[str], **kwargs) -> List[Dict]: return [self.generate_image(prompt=prompt, **kwargs) for prompt in prompts] async def abatch_generate( - self, prompts: List[str], max_concurrent: int = 5, **kwargs + self, prompts: List[str], max_concurrent: int = 5, **kwargs: Dict[str, Any] ) -> List[Dict]: """ Asynchronously generates images for a batch of prompts. @@ -243,7 +245,7 @@ async def abatch_generate( Args: prompts (List[str]): List of text prompts max_concurrent (int): Maximum number of concurrent tasks - **kwargs: Additional arguments passed to agenerate_image + **kwargs (Dict[str, Any]): Additional arguments passed to agenerate_image Returns: List[Dict]: List of result dictionaries diff --git a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/DeepInfraImgGenModel.py b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/DeepInfraImgGenModel.py index 474512a44..38233c524 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/DeepInfraImgGenModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/DeepInfraImgGenModel.py @@ -1,6 +1,6 @@ import asyncio import contextlib -from typing import List, Literal +from typing import Any, Dict, List, Literal import httpx from pydantic import PrivateAttr, SecretStr @@ -35,7 +35,7 @@ class DeepInfraImgGenModel(ImageGenBase): name: str = "" # Default model type: Literal["DeepInfraImgGenModel"] = "DeepInfraImgGenModel" - def __init__(self, **kwargs): + def __init__(self, **kwargs: Dict[str, Any]): """ Initializes the DeepInfraImgGenModel instance. @@ -43,7 +43,7 @@ def __init__(self, **kwargs): operations and configures request headers with the provided API key. Args: - **data: Keyword arguments for model initialization. + **kwargs (Dict[str, Any]): Additional keyword arguments, which may includes api_key and allowed_models. """ super().__init__(**kwargs) self._headers = { diff --git a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/FalAIImgGenModel.py b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/FalAIImgGenModel.py index 5726a8ed3..b64ea4b76 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/FalAIImgGenModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/FalAIImgGenModel.py @@ -1,11 +1,11 @@ import asyncio import time -from typing import Dict, List, Literal, Optional +from typing import Any, Dict, List, Literal, Optional import httpx from pydantic import Field, PrivateAttr, SecretStr -from swarmauri_base.image_gens.ImageGenBase import ImageGenBase from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_base.image_gens.ImageGenBase import ImageGenBase from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -37,12 +37,12 @@ class FalAIImgGenModel(ImageGenBase): max_retries: int = Field(default=60) # Maximum number of status check retries retry_delay: float = Field(default=1.0) # Delay between status checks in seconds - def __init__(self, **kwargs): + def __init__(self, **kwargs: Dict[str, Any]): """ Initializes the model with the specified API key and model name. Args: - **data: Configuration parameters for the model. + **kwargs (Dict[str, Any]): Additional keyword arguments, which may includes api_key and allowed_models. Raises: ValueError: If an invalid model name is provided. @@ -77,13 +77,13 @@ async def _close_async_client(self): await self._async_client.aclose() self._async_client = None - def _create_request_payload(self, prompt: str, **kwargs) -> dict: + def _create_request_payload(self, prompt: str, **kwargs: Dict[str, Any]) -> dict: """ Creates a payload for the image generation request. Args: prompt (str): The text prompt for image generation. - **kwargs: Additional parameters for the request. + **kwargs (Dict[str, Any]): Additional parameters for the request. Returns: dict: The request payload. @@ -91,13 +91,13 @@ def _create_request_payload(self, prompt: str, **kwargs) -> dict: return {"prompt": prompt, **kwargs} @retry_on_status_codes((429, 529), max_retries=1) - def _send_request(self, prompt: str, **kwargs) -> Dict: + def _send_request(self, prompt: str, **kwargs: Dict[str, Any]) -> Dict: """ Sends an image generation request to the queue and returns the request ID. Args: prompt (str): The text prompt for image generation. - **kwargs: Additional parameters for the request. + **kwargs (Dict[str, Any]): Additional parameters for the request. Returns: Dict: The response containing the request ID. @@ -142,13 +142,13 @@ def _get_result(self, request_id: str) -> Dict: return response.json() @retry_on_status_codes((429, 529), max_retries=1) - async def _async_send_request(self, prompt: str, **kwargs) -> Dict: + async def _async_send_request(self, prompt: str, **kwargs: Dict[str, Any]) -> Dict: """ Asynchronously sends an image generation request to the queue. Args: prompt (str): The text prompt for image generation. - **kwargs: Additional parameters for the request. + **kwargs (Dict[str, Any]): Additional parameters for the request. Returns: Dict: The response containing the request ID. @@ -247,13 +247,13 @@ async def _async_wait_for_completion(self, request_id: str) -> Dict: f"Request {request_id} did not complete within the timeout period" ) - def generate_image(self, prompt: str, **kwargs) -> str: + def generate_image(self, prompt: str, **kwargs: Dict[str, Any]) -> str: """ Generates an image based on the prompt and returns the image URL. Args: prompt (str): The text prompt for image generation. - **kwargs: Additional parameters for the request. + **kwargs (Dict[str, Any]): Additional parameters for the request. Returns: str: The URL of the generated image. @@ -263,13 +263,13 @@ def generate_image(self, prompt: str, **kwargs) -> str: final_response = self._wait_for_completion(request_id) return final_response["images"][0]["url"] - async def agenerate_image(self, prompt: str, **kwargs) -> str: + async def agenerate_image(self, prompt: str, **kwargs: Dict[str, Any]) -> str: """ Asynchronously generates an image based on the prompt and returns the image URL. Args: prompt (str): The text prompt for image generation - **kwargs: Additional parameters to pass to the API + **kwargs (Dict[str, Any]): Additional parameters to pass to the API Returns: str: The URL of the generated image @@ -282,13 +282,13 @@ async def agenerate_image(self, prompt: str, **kwargs) -> str: finally: await self._close_async_client() - def batch_generate(self, prompts: List[str], **kwargs) -> List[str]: + def batch_generate(self, prompts: List[str], **kwargs: Dict[str, Any]) -> List[str]: """ Generates images for a batch of prompts. Args: prompts (List[str]): List of text prompts - **kwargs: Additional parameters to pass to the API + **kwargs (Dict[str, Any]): Additional parameters to pass to the API Returns: List[str]: List of image URLs @@ -296,7 +296,7 @@ def batch_generate(self, prompts: List[str], **kwargs) -> List[str]: return [self.generate_image(prompt, **kwargs) for prompt in prompts] async def abatch_generate( - self, prompts: List[str], max_concurrent: int = 5, **kwargs + self, prompts: List[str], max_concurrent: int = 5, **kwargs: Dict[str, Any] ) -> List[str]: """ Asynchronously generates images for a batch of prompts. @@ -304,7 +304,7 @@ async def abatch_generate( Args: prompts (List[str]): List of text prompts max_concurrent (int): Maximum number of concurrent requests - **kwargs: Additional parameters to pass to the API + **kwargs (Dict[str, Any]): Additional parameters to pass to the API Returns: List[str]: List of image URLs diff --git a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/HyperbolicImgGenModel.py b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/HyperbolicImgGenModel.py index 974b8fd47..100244b45 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/HyperbolicImgGenModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/HyperbolicImgGenModel.py @@ -1,6 +1,6 @@ import asyncio import contextlib -from typing import List, Literal +from typing import Any, Dict, List, Literal import httpx from pydantic import PrivateAttr, SecretStr @@ -51,7 +51,7 @@ class HyperbolicImgGenModel(ImageGenBase): enable_refiner: bool = False backend: str = "auto" - def __init__(self, **kwargs): + def __init__(self, **kwargs: Dict[str, Any]): """ Initializes the HyperbolicImgGenModel instance. @@ -59,7 +59,7 @@ def __init__(self, **kwargs): operations and configures request headers with the provided API key. Args: - **data: Keyword arguments for model initialization. + **kwargs (Dict[str, Any]): Additional keyword arguments, which may include api_key and allowed_models. """ super().__init__(**kwargs) self._headers = { diff --git a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/OpenAIImgGenModel.py b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/OpenAIImgGenModel.py index 84e446429..3c2287bfe 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/image_gens/OpenAIImgGenModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/image_gens/OpenAIImgGenModel.py @@ -1,5 +1,5 @@ import asyncio -from typing import Dict, List, Literal, Optional +from typing import Any, Dict, List, Literal, Optional import httpx from pydantic import PrivateAttr, SecretStr @@ -31,12 +31,12 @@ class OpenAIImgGenModel(ImageGenBase): _BASE_URL: str = PrivateAttr(default="https://api.openai.com/v1/images/generations") _headers: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **kwargs) -> None: + def __init__(self, **kwargs: Dict[str, Any]) -> None: """ Initialize the GroqAIAudio class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **kwargs (Dict[str, Any]): Additional keyword arguments, which may include api_key and allowed_models. """ super().__init__(**kwargs) self._headers = { diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/AI21StudioModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/AI21StudioModel.py index c4facc979..d3c368e10 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/AI21StudioModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/AI21StudioModel.py @@ -1,6 +1,6 @@ import asyncio import json -from typing import AsyncIterator, Iterator, List, Literal, Type +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type import httpx from pydantic import PrivateAttr, SecretStr @@ -43,7 +43,7 @@ class AI21StudioModel(LLMBase): ) timeout: float = 600.0 - def __init__(self, **data) -> None: + def __init__(self, **data: Dict[str, Any]) -> None: """ Initializes the GroqToolModel instance, setting up headers for API requests. diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/AnthropicToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/AnthropicToolModel.py index edd12e659..fd37bd58c 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/AnthropicToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/AnthropicToolModel.py @@ -2,19 +2,21 @@ import json import logging import warnings -from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Type import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_core.conversations.IConversation import IConversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.messages.FunctionMessage import FunctionMessage from swarmauri_standard.schema_converters.AnthropicSchemaConverter import ( AnthropicSchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.retry_decorator import retry_on_status_codes warnings.warn( @@ -36,10 +38,11 @@ class AnthropicToolModel(LLMBase): techniques to facilitate enhanced interactions involving tool usage within conversations. Attributes: - api_key (str): The API key used for authenticating requests to the Anthropic API. + api_key (SecretStr): The API key used for authenticating requests to the Anthropic API. allowed_models (List[str]): A list of allowed model versions that can be used. name (str): The default model name used for predictions. - type (Literal): The type of the model, which is set to "AnthropicToolModel". + type (Literal["AnthropicToolModel"]): The type of the model, which is set to "AnthropicToolModel". + timeout (float): Timeout duration for API requests in seconds. Linked to Allowed Models: https://docs.anthropic.com/en/docs/build-with-claude/tool-use Link to API KEY: https://console.anthropic.com/settings/keys @@ -56,7 +59,13 @@ class AnthropicToolModel(LLMBase): timeout: float = 600.0 - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: + """ + Initialize the AnthropicToolModel with the provided configuration. + + Args: + **data (Dict[str, Any]): Keyword arguments for model configuration. + """ super().__init__(**data) headers = { "Content-Type": "application/json", @@ -72,12 +81,12 @@ def __init__(self, **data): self.allowed_models = self.allowed_models or self.get_allowed_models() self.name = self.allowed_models[0] - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools(self, tools: Dict[str, Any]) -> List[Dict[str, Any]]: """ Converts a toolkit's tools to the Anthropic-compatible schema format. Args: - tools (List): A list of tools to be converted. + tools (Dict[str, Any]): A dictionary of tools to be converted. Returns: List[Dict[str, Any]]: A list of tool schemas converted to the Anthropic format. @@ -90,7 +99,7 @@ def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: def _format_messages( self, messages: List[Type[MessageBase]] - ) -> List[Dict[str, str]]: + ) -> List[Dict[str, Any]]: """ Formats a list of messages to a schema that matches the Anthropic API's expectations. @@ -98,7 +107,7 @@ def _format_messages( messages (List[Type[MessageBase]]): The conversation history. Returns: - List[Dict[str, str]]: A formatted list of message dictionaries. + List[Dict[str, Any]]: A formatted list of message dictionaries. """ message_properties = ["content", "role", "tool_call_id", "tool_calls"] formatted_messages = [ @@ -110,24 +119,24 @@ def _format_messages( def predict( self, - conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - ): + conversation: IConversation, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Dict[str, Any]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> IConversation: """ Predicts the response based on the given conversation and optional toolkit. Args: - conversation: The current conversation object. - toolkit: Optional toolkit object containing tools for tool-based responses. - tool_choice: Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. + conversation (IConversation): The current conversation object. + toolkit (Optional[Toolkit]): Optional toolkit object containing tools for tool-based responses. + tool_choice (Optional[Dict[str, Any]]): Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. temperature (float): The temperature for the model's output randomness. max_tokens (int): The maximum number of tokens in the response. Returns: - The conversation object updated with the assistant's response. + IConversation: The conversation object updated with the assistant's response. """ formatted_messages = self._format_messages(conversation.history) @@ -171,24 +180,24 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - ): + conversation: IConversation, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Dict[str, Any]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> IConversation: """ Asynchronous version of the `predict` method to handle concurrent processing of requests. Args: - conversation: The current conversation object. - toolkit: Optional toolkit object containing tools for tool-based responses. - tool_choice: Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. + conversation (IConversation): The current conversation object. + toolkit (Optional[Toolkit]): Optional toolkit object containing tools for tool-based responses. + tool_choice (Optional[Dict[str, Any]]): Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. temperature (float): The temperature for the model's output randomness. max_tokens (int): The maximum number of tokens in the response. Returns: - The conversation object updated with the assistant's response. + IConversation: The conversation object updated with the assistant's response. """ formatted_messages = self._format_messages(conversation.history) logging.info(f"formatted_messages: {formatted_messages}") @@ -231,19 +240,19 @@ async def apredict( def stream( self, - conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: IConversation, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Dict[str, Any]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Iterator[str]: """ Streams the response for a conversation in real-time, yielding text as it is received. Args: - conversation: The current conversation object. - toolkit: Optional toolkit object for tool-based responses. - tool_choice: Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. + conversation (IConversation): The current conversation object. + toolkit (Optional[Toolkit]): Optional toolkit object for tool-based responses. + tool_choice (Optional[Dict[str, Any]]): Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. temperature (float): The temperature for the model's output randomness. max_tokens (int): The maximum number of tokens in the response. @@ -305,19 +314,19 @@ def stream( async def astream( self, - conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: IConversation, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Dict[str, Any]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Asynchronously streams the response for a conversation, yielding text in real-time. Args: - conversation: The current conversation object. - toolkit: Optional toolkit object for tool-based responses. - tool_choice: Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. + conversation (IConversation): The current conversation object. + toolkit (Optional[Toolkit]): Optional toolkit object for tool-based responses. + tool_choice (Optional[Dict[str, Any]]): Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. temperature (float): The temperature for the model's output randomness. max_tokens (int): The maximum number of tokens in the response. @@ -384,24 +393,24 @@ async def astream( def batch( self, - conversations: List, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - ) -> List: + conversations: List[IConversation], + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Dict[str, Any]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> List[IConversation]: """ Processes a batch of conversations in a synchronous manner. Args: - conversations (List): A list of conversation objects to process. - toolkit: Optional toolkit object for tool-based responses. - tool_choice: Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. + conversations (List[IConversation]): A list of conversation objects to process. + toolkit (Optional[Toolkit]): Optional toolkit object for tool-based responses. + tool_choice (Optional[Dict[str, Any]]): Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. temperature (float): The temperature for the model's output randomness. max_tokens (int): The maximum number of tokens in the response. Returns: - List: A list of conversation objects updated with the assistant's responses. + List[IConversation]: A list of conversation objects updated with the assistant's responses. """ results = [] for conv in conversations: @@ -417,28 +426,27 @@ def batch( async def abatch( self, - conversations: List, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - max_concurrent=5, - ) -> List: + conversations: List[IConversation], + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Dict[str, Any]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, + max_concurrent: int = 5, + ) -> List[IConversation]: """ Processes a batch of conversations asynchronously with limited concurrency. Args: - conversations (List): A list of conversation objects to process. - toolkit: Optional toolkit object for tool-based responses. - tool_choice: Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. + conversations (List[IConversation]): A list of conversation objects to process. + toolkit (Optional[Toolkit]): Optional toolkit object for tool-based responses. + tool_choice (Optional[Dict[str, Any]]): Optional parameter to choose specific tools or set to 'auto' for automatic tool usage. temperature (float): The temperature for the model's output randomness. max_tokens (int): The maximum number of tokens in the response. max_concurrent (int): The maximum number of concurrent processes allowed. Returns: - List: A list of conversation objects updated with the assistant's responses. + List[IConversation]: A list of conversation objects updated with the assistant's responses. """ - semaphore = asyncio.Semaphore(max_concurrent) async def process_conversation(conv): @@ -456,10 +464,10 @@ async def process_conversation(conv): def get_allowed_models(self) -> List[str]: """ - Queries the LLMProvider API endpoint to retrieve the list of allowed models. + Retrieves the list of allowed models for the Anthropic API. Returns: - List[str]: A list of allowed model names retrieved from the API. + List[str]: A list of allowed model names. """ allowed_models = [ "claude-3-sonnet-20240229", diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/CohereModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/CohereModel.py index fa18bfba9..bb8011aed 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/CohereModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/CohereModel.py @@ -1,13 +1,14 @@ import asyncio import json -from typing import AsyncIterator, Dict, Iterator, List, Literal +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Type import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage, UsageData from swarmauri_standard.utils.duration_manager import DurationManager from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -20,10 +21,11 @@ class CohereModel(LLMBase): Cohere's chat endpoints, supporting single messages, streaming, and batch processing. Attributes: - api_key (str): The authentication key for accessing Cohere's API. + api_key (SecretStr): The authentication key for accessing Cohere's API. allowed_models (List[str]): List of supported Cohere model identifiers. name (str): The default model name to use (defaults to "command"). type (Literal["CohereModel"]): The type identifier for this model class. + timeout (float): Timeout for API requests in seconds. Link to Allowed Models: https://docs.cohere.com/docs/models Link to API Key: https://dashboard.cohere.com/api-keys @@ -39,12 +41,12 @@ class CohereModel(LLMBase): timeout: float = 600.0 - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the CohereModel with the provided configuration. Args: - **data: Keyword arguments for model configuration, must include 'api_key'. + **data (Dict[str, Any]): Keyword arguments for model configuration, must include 'api_key'. """ super().__init__(**data) headers = { @@ -59,6 +61,12 @@ def __init__(self, **data): self.name = self.allowed_models[0] def get_headers(self) -> Dict[str, str]: + """ + Generate the HTTP headers needed for API requests. + + Returns: + Dict[str, str]: Headers dictionary with authorization and content type. + """ return { "accept": "application/json", "content-type": "application/json", @@ -66,19 +74,19 @@ def get_headers(self) -> Dict[str, str]: } def _format_messages( - self, messages: List[MessageBase] - ) -> tuple[List[Dict[str, str]], str, str]: + self, messages: List[Type[MessageBase]] + ) -> tuple[List[Dict[str, str]], Optional[str], Optional[str]]: """ Format a list of messages into Cohere's expected chat format. Args: - messages: List of MessageBase objects containing the conversation history. + messages (List[Type[MessageBase]]): List of MessageBase objects containing the conversation history. Returns: tuple containing: - List[Dict[str, str]]: Formatted chat history - - str: System message (if any) - - str: Latest user message + - Optional[str]: System message (if any) + - Optional[str]: Latest user message """ chat_history = [] system_message = None @@ -107,7 +115,7 @@ def _format_messages( def _prepare_usage_data( self, - usage_data: Dict, + usage_data: Dict[str, int], prompt_time: float, completion_time: float, ) -> UsageData: @@ -115,9 +123,9 @@ def _prepare_usage_data( Prepare usage statistics from API response and timing data. Args: - usage_data: Dictionary containing token usage information from the API - prompt_time: Time taken to send the prompt - completion_time: Time taken to receive the completion + usage_data (Dict[str, int]): Dictionary containing token usage information from the API + prompt_time (float): Time taken to send the prompt + completion_time (float): Time taken to receive the completion Returns: UsageData: Object containing formatted usage statistics @@ -139,17 +147,22 @@ def _prepare_usage_data( return usage @retry_on_status_codes((429, 529), max_retries=1) - def predict(self, conversation, temperature=0.7, max_tokens=256): + def predict( + self, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + ) -> Conversation: """ Generate a single prediction from the model synchronously. Args: - conversation: The conversation object containing message history + conversation (Conversation): The conversation object containing message history temperature (float, optional): Sampling temperature. Defaults to 0.7 max_tokens (int, optional): Maximum tokens in response. Defaults to 256 Returns: - The updated conversation object with the model's response added + Conversation: The updated conversation object with the model's response added Raises: httpx.HTTPError: If the API request fails @@ -193,17 +206,22 @@ def predict(self, conversation, temperature=0.7, max_tokens=256): return conversation @retry_on_status_codes((429, 529), max_retries=1) - async def apredict(self, conversation, temperature=0.7, max_tokens=256): + async def apredict( + self, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + ) -> Conversation: """ Generate a single prediction from the model asynchronously. Args: - conversation: The conversation object containing message history + conversation (Conversation): The conversation object containing message history temperature (float, optional): Sampling temperature. Defaults to 0.7 max_tokens (int, optional): Maximum tokens in response. Defaults to 256 Returns: - The updated conversation object with the model's response added + Conversation: The updated conversation object with the model's response added Raises: httpx.HTTPError: If the API request fails @@ -250,7 +268,12 @@ async def apredict(self, conversation, temperature=0.7, max_tokens=256): return conversation @retry_on_status_codes((429, 529), max_retries=1) - def stream(self, conversation, temperature=0.7, max_tokens=256) -> Iterator[str]: + def stream( + self, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + ) -> Iterator[str]: """ Stream responses from the model synchronously, yielding content as it becomes available. @@ -259,7 +282,7 @@ def stream(self, conversation, temperature=0.7, max_tokens=256) -> Iterator[str] complete response to the conversation history. Args: - conversation: The conversation object containing message history + conversation (Conversation): The conversation object containing message history temperature (float, optional): Sampling temperature. Controls randomness in the response. Higher values (e.g., 0.8) create more diverse outputs, while lower values (e.g., 0.2) make outputs more deterministic. Defaults to 0.7. @@ -318,7 +341,10 @@ def stream(self, conversation, temperature=0.7, max_tokens=256) -> Iterator[str] @retry_on_status_codes((429, 529), max_retries=1) async def astream( - self, conversation, temperature=0.7, max_tokens=256 + self, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, ) -> AsyncIterator[str]: """ Stream responses from the model asynchronously, yielding content as it becomes available. @@ -328,7 +354,7 @@ async def astream( and manages its own AsyncClient instance to prevent event loop issues. Args: - conversation: The conversation object containing message history + conversation (Conversation): The conversation object containing message history temperature (float, optional): Sampling temperature. Controls randomness in the response. Higher values (e.g., 0.8) create more diverse outputs, while lower values (e.g., 0.2) make outputs more deterministic. Defaults to 0.7. @@ -395,17 +421,22 @@ async def astream( conversation.add_message(AgentMessage(content=full_content, usage=usage)) - def batch(self, conversations: List, temperature=0.7, max_tokens=256) -> List: + def batch( + self, + conversations: List[Conversation], + temperature: float = 0.7, + max_tokens: int = 256, + ) -> List[Conversation]: """ Process multiple conversations synchronously. Args: - conversations: List of conversation objects to process + conversations (List[Conversation]): List of conversation objects to process temperature (float, optional): Sampling temperature. Defaults to 0.7 max_tokens (int, optional): Maximum tokens in response. Defaults to 256 Returns: - List of updated conversation objects with model responses added + List[Conversation]: List of updated conversation objects with model responses added """ return [ self.predict(conv, temperature=temperature, max_tokens=max_tokens) @@ -413,23 +444,27 @@ def batch(self, conversations: List, temperature=0.7, max_tokens=256) -> List: ] async def abatch( - self, conversations: List, temperature=0.7, max_tokens=256, max_concurrent=5 - ) -> List: + self, + conversations: List[Conversation], + temperature: float = 0.7, + max_tokens: int = 256, + max_concurrent: int = 5, + ) -> List[Conversation]: """ Process multiple conversations asynchronously with concurrency control. Args: - conversations: List of conversation objects to process + conversations (List[Conversation]): List of conversation objects to process temperature (float, optional): Sampling temperature. Defaults to 0.7 max_tokens (int, optional): Maximum tokens in response. Defaults to 256 max_concurrent (int, optional): Maximum number of concurrent requests. Defaults to 5 Returns: - List of updated conversation objects with model responses added + List[Conversation]: List of updated conversation objects with model responses added """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv): + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, temperature=temperature, max_tokens=max_tokens diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/CohereToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/CohereToolModel.py index be0472444..c10ec2b3a 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/CohereToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/CohereToolModel.py @@ -1,19 +1,33 @@ import asyncio import json import warnings -from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type, Union +from typing import ( + Any, + AsyncIterator, + Dict, + Iterator, + List, + Literal, + Optional, + Type, + Union, +) import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_base.DynamicBase import SubclassUnion from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_base.tools.ToolBase import ToolBase +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage, UsageData from swarmauri_standard.messages.HumanMessage import HumanMessage, contentItem from swarmauri_standard.schema_converters.CohereSchemaConverter import ( CohereSchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.duration_manager import DurationManager from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -36,11 +50,12 @@ class CohereToolModel(LLMBase): responses and batch processing of multiple conversations. Attributes: - api_key (str): The API key for authenticating with Cohere's API + api_key (SecretStr): The API key for authenticating with Cohere's API allowed_models (List[str]): List of supported Cohere model names name (str): The default model name to use type (Literal["CohereToolModel"]): The type identifier for this model resource (str): The resource type identifier + timeout (float): Maximum timeout for API requests in seconds Link to Allowed Models: https://docs.cohere.com/docs/models#command Link to API Key: https://dashboard.cohere.com/api-keys @@ -56,12 +71,12 @@ class CohereToolModel(LLMBase): type: Literal["CohereToolModel"] = "CohereToolModel" timeout: float = 600.0 - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the CohereToolModel with the provided configuration. Args: - **data: Keyword arguments for configuring the model, including api_key + **data (Dict[str, Any]): Keyword arguments for configuring the model, including api_key """ super().__init__(**data) headers = { @@ -78,12 +93,14 @@ def __init__(self, **data): self.allowed_models = self.allowed_models or self.get_allowed_models() self.name = self.allowed_models[0] - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools( + self, tools: Dict[str, SubclassUnion[ToolBase]] + ) -> List[Dict[str, Any]]: """ Convert tool definitions to Cohere's expected schema format. Args: - tools: Dictionary of tools to convert + tools (Dict[str, SubclassUnion[ToolBase]]): Dictionary of tools to convert Returns: List[Dict[str, Any]]: List of converted tool definitions @@ -165,9 +182,9 @@ def _prepare_usage_data( Prepare usage statistics from API response and timing data. Args: - usage_data: Dictionary containing token usage information from the API - prompt_time: Time taken to send the prompt - completion_time: Time taken to receive the completion + usage_data (Dict[str, Any]): Dictionary containing token usage information from the API + prompt_time (float): Time taken to send the prompt + completion_time (float): Time taken to receive the completion Returns: UsageData: Object containing formatted usage statistics @@ -188,15 +205,17 @@ def _prepare_usage_data( ) return usage - def _ensure_conversation_has_message(self, conversation): + def _ensure_conversation_has_message( + self, conversation: Conversation + ) -> Conversation: """ Ensure that a conversation has at least one message by adding a default message if empty. Args: - conversation: The conversation to check + conversation (Conversation): The conversation to check Returns: - The conversation, potentially with an added default message + Conversation: The conversation, potentially with an added default message """ if not conversation.history: conversation.add_message( @@ -204,13 +223,15 @@ def _ensure_conversation_has_message(self, conversation): ) return conversation - def _process_tool_calls(self, response_data, toolkit): + def _process_tool_calls( + self, response_data: Dict[str, Any], toolkit: Toolkit + ) -> List[Dict[str, Any]]: """ Process tool calls from the model's response and execute them using the provided toolkit. Args: - response_data: The response data containing tool calls - toolkit: The toolkit containing the tools to execute + response_data (Dict[str, Any]): The response data containing tool calls + toolkit (Toolkit): The toolkit containing the tools to execute Returns: List[Dict[str, Any]]: Results of the tool executions @@ -233,9 +254,9 @@ def _process_tool_calls(self, response_data, toolkit): def _prepare_chat_payload( self, message: str, - chat_history: List[Dict[str, str]], - tools: List[Dict[str, Any]] = None, - tool_results: List[Dict[str, Any]] = None, + chat_history: Optional[List[Dict[str, str]]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + tool_results: Optional[List[Dict[str, Any]]] = None, temperature: float = 0.3, force_single_step: bool = True, ) -> Dict[str, Any]: @@ -244,11 +265,11 @@ def _prepare_chat_payload( Args: message (str): The current message to process - chat_history (List[Dict[str, str]]): Previous chat history - tools (List[Dict[str, Any]], optional): Available tools - tool_results (List[Dict[str, Any]], optional): Results from previous tool calls - temperature (float, optional): Sampling temperature - force_single_step (bool, optional): Whether to force single-step responses + chat_history (Optional[List[Dict[str, str]]]): Previous chat history + tools (Optional[List[Dict[str, Any]]]): Available tools + tool_results (Optional[List[Dict[str, Any]]]): Results from previous tool calls + temperature (float): Sampling temperature + force_single_step (bool): Whether to force single-step responses Returns: Dict[str, Any]: The prepared payload for the API request @@ -272,18 +293,24 @@ def _prepare_chat_payload( return payload @retry_on_status_codes((429, 529), max_retries=1) - def predict(self, conversation, toolkit=None, temperature=0.3, max_tokens=1024): + def predict( + self, + conversation: Conversation, + toolkit: Optional[Toolkit] = None, + temperature: float = 0.3, + max_tokens: int = 1024, + ) -> Conversation: """ Generate a response for a conversation synchronously. Args: - conversation: The conversation to generate a response for - toolkit: Optional toolkit containing available tools - temperature (float, optional): Sampling temperature - max_tokens (int, optional): Maximum number of tokens to generate + conversation (Conversation): The conversation to generate a response for + toolkit (Optional[Toolkit]): Optional toolkit containing available tools + temperature (float): Sampling temperature + max_tokens (int): Maximum number of tokens to generate Returns: - The updated conversation with the model's response + Conversation: The updated conversation with the model's response """ conversation = self._ensure_conversation_has_message(conversation) formatted_messages = self._format_messages(conversation.history) @@ -334,16 +361,20 @@ def predict(self, conversation, toolkit=None, temperature=0.3, max_tokens=1024): @retry_on_status_codes((429, 529), max_retries=1) def stream( - self, conversation, toolkit=None, temperature=0.3, max_tokens=1024 + self, + conversation: Conversation, + toolkit: Optional[Toolkit] = None, + temperature: float = 0.3, + max_tokens: int = 1024, ) -> Iterator[str]: """ Stream a response for a conversation synchronously. Args: - conversation: The conversation to generate a response for - toolkit: Optional toolkit containing available tools - temperature (float, optional): Sampling temperature - max_tokens (int, optional): Maximum number of tokens to generate + conversation (Conversation): The conversation to generate a response for + toolkit (Optional[Toolkit]): Optional toolkit containing available tools + temperature (float): Sampling temperature + max_tokens (int): Maximum number of tokens to generate Returns: Iterator[str]: An iterator yielding response chunks @@ -400,23 +431,27 @@ def stream( usage = self._prepare_usage_data( usage_data, prompt_timer.duration, completion_timer.duration ) - conversation.add_message(AgentMessage(content=full_content), usage=usage) + conversation.add_message(AgentMessage(content=full_content, usage=usage)) @retry_on_status_codes((429, 529), max_retries=1) async def apredict( - self, conversation, toolkit=None, temperature=0.3, max_tokens=1024 - ): + self, + conversation: Conversation, + toolkit: Optional[Toolkit] = None, + temperature: float = 0.3, + max_tokens: int = 1024, + ) -> Conversation: """ Generate a response for a conversation asynchronously. Args: - conversation: The conversation to generate a response for - toolkit: Optional toolkit containing available tools - temperature (float, optional): Sampling temperature - max_tokens (int, optional): Maximum number of tokens to generate + conversation (Conversation): The conversation to generate a response for + toolkit (Optional[Toolkit]): Optional toolkit containing available tools + temperature (float): Sampling temperature + max_tokens (int): Maximum number of tokens to generate Returns: - The updated conversation with the model's response + Conversation: The updated conversation with the model's response """ conversation = self._ensure_conversation_has_message(conversation) formatted_messages = self._format_messages(conversation.history) @@ -467,16 +502,20 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) async def astream( - self, conversation, toolkit=None, temperature=0.3, max_tokens=1024 + self, + conversation: Conversation, + toolkit: Optional[Toolkit] = None, + temperature: float = 0.3, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Stream a response for a conversation asynchronously. Args: - conversation: The conversation to generate a response for - toolkit: Optional toolkit containing available tools - temperature (float, optional): Sampling temperature - max_tokens (int, optional): Maximum number of tokens to generate + conversation (Conversation): The conversation to generate a response for + toolkit (Optional[Toolkit]): Optional toolkit containing available tools + temperature (float): Sampling temperature + max_tokens (int): Maximum number of tokens to generate Returns: AsyncIterator[str]: An async iterator yielding response chunks @@ -539,11 +578,15 @@ async def astream( usage = self._prepare_usage_data( usage_data, prompt_timer.duration, completion_timer.duration ) - conversation.add_message(AgentMessage(content=full_content), usage=usage) + conversation.add_message(AgentMessage(content=full_content, usage=usage)) def batch( - self, conversations: List, toolkit=None, temperature=0.3, max_tokens=1024 - ) -> List: + self, + conversations: List[Conversation], + toolkit: Optional[Toolkit] = None, + temperature: float = 0.3, + max_tokens: int = 1024, + ) -> List[Conversation]: """ Process multiple conversations in batch mode synchronously. @@ -552,15 +595,15 @@ def batch( parameters. Args: - conversations (List): A list of conversation objects to process - toolkit (optional): The toolkit containing available tools for the model - temperature (float, optional): The sampling temperature for response generation. + conversations (List[Conversation]): A list of conversation objects to process + toolkit (Optional[Toolkit]): The toolkit containing available tools for the model + temperature (float): The sampling temperature for response generation. Defaults to 0.3 - max_tokens (int, optional): The maximum number of tokens to generate for each + max_tokens (int): The maximum number of tokens to generate for each response. Defaults to 1024 Returns: - List: A list of processed conversations with their respective responses + List[Conversation]: A list of processed conversations with their respective responses """ return [ self.predict( @@ -571,12 +614,12 @@ def batch( async def abatch( self, - conversations: List, - toolkit=None, - temperature=0.3, - max_tokens=1024, - max_concurrent=5, - ) -> List: + conversations: List[Conversation], + toolkit: Optional[Toolkit] = None, + temperature: float = 0.3, + max_tokens: int = 1024, + max_concurrent: int = 5, + ) -> List[Conversation]: """ Process multiple conversations in batch mode asynchronously. @@ -585,22 +628,21 @@ async def abatch( overwhelming the API service while still maintaining efficient processing. Args: - conversations (List): A list of conversation objects to process - toolkit (optional): The toolkit containing available tools for the model - temperature (float, optional): The sampling temperature for response generation. + conversations (List[Conversation]): A list of conversation objects to process + toolkit (Optional[Toolkit]): The toolkit containing available tools for the model + temperature (float): The sampling temperature for response generation. Defaults to 0.3 - max_tokens (int, optional): The maximum number of tokens to generate for each + max_tokens (int): The maximum number of tokens to generate for each response. Defaults to 1024 - max_concurrent (int, optional): The maximum number of conversations to process + max_concurrent (int): The maximum number of conversations to process simultaneously. Defaults to 5 Returns: - List: A list of processed conversations with their respective responses + List[Conversation]: A list of processed conversations with their respective responses Note: The max_concurrent parameter helps control API usage and prevent rate limiting while still allowing for parallel processing of multiple conversations. - """ semaphore = asyncio.Semaphore(max_concurrent) diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/DeepInfraModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/DeepInfraModel.py index 04a6ee80a..9d77b2af6 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/DeepInfraModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/DeepInfraModel.py @@ -1,13 +1,14 @@ import asyncio import json -from typing import AsyncIterator, Dict, Iterator, List, Literal +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase, SubclassUnion from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase, SubclassUnion +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -21,7 +22,7 @@ class DeepInfraModel(LLMBase): providing support for predictions, streaming responses, and batch processing. Attributes: - api_key (str): DeepInfra API key for authentication + api_key (SecretStr): DeepInfra API key for authentication Can be obtained from: https://deepinfra.com/dash/api_keys allowed_models (List[str]): List of supported model identifiers on DeepInfra @@ -49,13 +50,13 @@ class DeepInfraModel(LLMBase): timeout: float = 600.0 - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: """ Initializes the DeepInfraModel instance with the provided API key and sets up httpx clients for both sync and async operations. Args: - **data: Keyword arguments for model initialization. + **data (Dict[str, Any]): Keyword arguments for model initialization. """ super().__init__(**data) headers = { @@ -96,9 +97,9 @@ def _create_request_payload( temperature: float, max_tokens: int, enable_json: bool, - stop: List[str] = None, + stop: Optional[List[str]] = None, stream: bool = False, - ) -> Dict: + ) -> Dict[str, Any]: """ Creates the payload for the API request. @@ -107,11 +108,11 @@ def _create_request_payload( temperature (float): Sampling temperature for the response. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Whether to enable JSON response format. - stop (List[str], optional): Stop sequences. + stop (Optional[List[str]], optional): Stop sequences. stream (bool): Whether to stream the response. Returns: - Dict: Payload for the API request. + Dict[str, Any]: Payload for the API request. """ payload = { "model": self.name, @@ -135,24 +136,24 @@ def _create_request_payload( @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation, - temperature=0.7, - max_tokens=256, - enable_json=False, - stop: List[str] = None, - ): + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + enable_json: bool = False, + stop: Optional[List[str]] = None, + ) -> Conversation: """ Sends a synchronous request to generate a response from the model. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Flag for enabling JSON response format. - stop (List[str], optional): Stop sequences for the response. + stop (Optional[List[str]], optional): Stop sequences for the response. Returns: - Updated conversation with the model's response. + Conversation: Updated conversation with the model's response. """ formatted_messages = self._format_messages(conversation.history) payload = self._create_request_payload( @@ -171,24 +172,24 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation, - temperature=0.7, - max_tokens=256, - enable_json=False, - stop: List[str] = None, - ): + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + enable_json: bool = False, + stop: Optional[List[str]] = None, + ) -> Conversation: """ Sends an asynchronous request to generate a response from the model. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Flag for enabling JSON response format. - stop (List[str], optional): Stop sequences for the response. + stop (Optional[List[str]], optional): Stop sequences for the response. Returns: - Updated conversation with the model's response. + Conversation: Updated conversation with the model's response. """ formatted_messages = self._format_messages(conversation.history) payload = self._create_request_payload( @@ -207,19 +208,19 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) def stream( self, - conversation, - temperature=0.7, - max_tokens=256, - stop: List[str] = None, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + stop: Optional[List[str]] = None, ) -> Iterator[str]: """ Streams response content from the model synchronously. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. - stop (List[str], optional): Stop sequences for the response. + stop (Optional[List[str]], optional): Stop sequences for the response. Yields: str: Chunks of content from the model's response. @@ -253,19 +254,19 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def astream( self, - conversation, - temperature=0.7, - max_tokens=256, - stop: List[str] = None, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + stop: Optional[List[str]] = None, ) -> AsyncIterator[str]: """ Streams response content from the model asynchronously. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. - stop (List[str], optional): Stop sequences for the response. + stop (Optional[List[str]], optional): Stop sequences for the response. Yields: str: Chunks of content from the model's response. @@ -296,24 +297,24 @@ async def astream( def batch( self, - conversations: List, - temperature=0.7, - max_tokens=256, - enable_json=False, - stop: List[str] = None, - ) -> List: + conversations: List[Conversation], + temperature: float = 0.7, + max_tokens: int = 256, + enable_json: bool = False, + stop: Optional[List[str]] = None, + ) -> List[Conversation]: """ Processes multiple conversations in batch synchronously. Args: - conversations (List): List of conversation objects. + conversations (List[Conversation]): List of conversation objects. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Flag for enabling JSON response format. - stop (List[str], optional): Stop sequences for responses. + stop (Optional[List[str]], optional): Stop sequences for responses. Returns: - List: List of updated conversations with model responses. + List[Conversation]: List of updated conversations with model responses. """ return [ self.predict( @@ -328,30 +329,30 @@ def batch( async def abatch( self, - conversations: List, - temperature=0.7, - max_tokens=256, - enable_json=False, - stop: List[str] = None, - max_concurrent=5, - ) -> List: + conversations: List[Conversation], + temperature: float = 0.7, + max_tokens: int = 256, + enable_json: bool = False, + stop: Optional[List[str]] = None, + max_concurrent: int = 5, + ) -> List[Conversation]: """ Processes multiple conversations asynchronously, with concurrency control. Args: - conversations (List): List of conversation objects. + conversations (List[Conversation]): List of conversation objects. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Flag for enabling JSON response format. - stop (List[str], optional): Stop sequences for responses. + stop (Optional[List[str]], optional): Stop sequences for responses. max_concurrent (int): Maximum number of concurrent tasks. Returns: - List: List of updated conversations with model responses. + List[Conversation]: List of updated conversations with model responses. """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv): + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/DeepSeekModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/DeepSeekModel.py index 81355c57b..79d9ef2e9 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/DeepSeekModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/DeepSeekModel.py @@ -1,13 +1,14 @@ import asyncio import json -from typing import AsyncIterator, Dict, Iterator, List, Literal +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase, SubclassUnion from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase, SubclassUnion +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -22,7 +23,7 @@ class DeepSeekModel(LLMBase): with the DeepSeek API. Attributes: - api_key (str): The API key for authenticating with DeepSeek. + api_key (SecretStr): The API key for authenticating with DeepSeek. allowed_models (List[str]): List of models supported by DeepSeek, defaulting to ["deepseek-chat"]. name (str): The model name, defaulting to "deepseek-chat". type (Literal): The class type for identifying the LLM, set to "DeepSeekModel". @@ -43,7 +44,7 @@ class DeepSeekModel(LLMBase): timeout: float = 600.0 - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: super().__init__(**data) self._client = httpx.Client( @@ -52,7 +53,7 @@ def __init__(self, **data): timeout=self.timeout, ) self._async_client = httpx.AsyncClient( - headers={"Authorization": f"Bearer {self.api_key}"}, + headers={"Authorization": f"Bearer {self.api_key.get_secret_value()}"}, base_url=self._BASE_URL, timeout=self.timeout, ) @@ -81,28 +82,28 @@ def _format_messages( @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation, - temperature=0.7, - max_tokens=256, - frequency_penalty=0, - presence_penalty=0, - stop="\n", - top_p=1.0, - ): + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + frequency_penalty: float = 0, + presence_penalty: float = 0, + stop: Optional[str] = "\n", + top_p: float = 1.0, + ) -> Conversation: """ Sends a synchronous request to the DeepSeek API to generate a chat response. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for randomness in response. max_tokens (int): Maximum number of tokens in the response. frequency_penalty (float): Penalty for frequent tokens in the response. presence_penalty (float): Penalty for new topics in the response. - stop (str): Token at which response generation should stop. + stop (Optional[str]): Token at which response generation should stop. top_p (float): Top-p sampling value for nucleus sampling. Returns: - Updated conversation object with the generated response added. + Conversation: Updated conversation object with the generated response added. """ formatted_messages = self._format_messages(conversation.history) payload = { @@ -125,28 +126,28 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation, - temperature=0.7, - max_tokens=256, - frequency_penalty=0, - presence_penalty=0, - stop="\n", - top_p=1.0, - ): + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + frequency_penalty: float = 0, + presence_penalty: float = 0, + stop: Optional[str] = "\n", + top_p: float = 1.0, + ) -> Conversation: """ Sends an asynchronous request to the DeepSeek API to generate a chat response. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for randomness in response. max_tokens (int): Maximum number of tokens in the response. frequency_penalty (float): Penalty for frequent tokens in the response. presence_penalty (float): Penalty for new topics in the response. - stop (str): Token at which response generation should stop. + stop (Optional[str]): Token at which response generation should stop. top_p (float): Top-p sampling value for nucleus sampling. Returns: - Updated conversation object with the generated response added. + Conversation: Updated conversation object with the generated response added. """ formatted_messages = self._format_messages(conversation.history) payload = { @@ -169,24 +170,24 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) def stream( self, - conversation, - temperature=0.7, - max_tokens=256, - frequency_penalty=0, - presence_penalty=0, - stop="\n", - top_p=1.0, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + frequency_penalty: float = 0, + presence_penalty: float = 0, + stop: Optional[str] = "\n", + top_p: float = 1.0, ) -> Iterator[str]: """ Streams the response token by token synchronously from the DeepSeek API. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for randomness in response. max_tokens (int): Maximum number of tokens in the response. frequency_penalty (float): Penalty for frequent tokens in the response. presence_penalty (float): Penalty for new topics in the response. - stop (str): Token at which response generation should stop. + stop (Optional[str]): Token at which response generation should stop. top_p (float): Top-p sampling value for nucleus sampling. Yields: @@ -226,24 +227,24 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def astream( self, - conversation, - temperature=0.7, - max_tokens=256, - frequency_penalty=0, - presence_penalty=0, - stop="\n", - top_p=1.0, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + frequency_penalty: float = 0, + presence_penalty: float = 0, + stop: Optional[str] = "\n", + top_p: float = 1.0, ) -> AsyncIterator[str]: """ Asynchronously streams the response token by token from the DeepSeek API. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for randomness in response. max_tokens (int): Maximum number of tokens in the response. frequency_penalty (float): Penalty for frequent tokens in the response. presence_penalty (float): Penalty for new topics in the response. - stop (str): Token at which response generation should stop. + stop (Optional[str]): Token at which response generation should stop. top_p (float): Top-p sampling value for nucleus sampling. Yields: @@ -284,28 +285,28 @@ async def astream( def batch( self, - conversations: List, - temperature=0.7, - max_tokens=256, - frequency_penalty=0, - presence_penalty=0, - stop="\n", - top_p=1.0, - ) -> List: + conversations: List[Conversation], + temperature: float = 0.7, + max_tokens: int = 256, + frequency_penalty: float = 0, + presence_penalty: float = 0, + stop: Optional[str] = "\n", + top_p: float = 1.0, + ) -> List[Conversation]: """ Processes multiple conversations synchronously in a batch. Args: - conversations (List): List of conversation objects. + conversations (List[Conversation]): List of conversation objects. temperature (float): Sampling temperature for randomness in response. max_tokens (int): Maximum number of tokens in the response. frequency_penalty (float): Penalty for frequent tokens in the response. presence_penalty (float): Penalty for new topics in the response. - stop (str): Token at which response generation should stop. + stop (Optional[str]): Token at which response generation should stop. top_p (float): Top-p sampling value for nucleus sampling. Returns: - List: List of updated conversation objects with responses added. + List[Conversation]: List of updated conversation objects with responses added. """ return [ self.predict( @@ -322,34 +323,34 @@ def batch( async def abatch( self, - conversations: List, - temperature=0.7, - max_tokens=256, - frequency_penalty=0, - presence_penalty=0, - stop="\n", - top_p=1.0, - max_concurrent=5, - ) -> List: + conversations: List[Conversation], + temperature: float = 0.7, + max_tokens: int = 256, + frequency_penalty: float = 0, + presence_penalty: float = 0, + stop: Optional[str] = "\n", + top_p: float = 1.0, + max_concurrent: int = 5, + ) -> List[Conversation]: """ Processes multiple conversations asynchronously in parallel, with concurrency control. Args: - conversations (List): List of conversation objects. + conversations (List[Conversation]): List of conversation objects. temperature (float): Sampling temperature for randomness in response. max_tokens (int): Maximum number of tokens in the response. frequency_penalty (float): Penalty for frequent tokens in the response. presence_penalty (float): Penalty for new topics in the response. - stop (str): Token at which response generation should stop. + stop (Optional[str]): Token at which response generation should stop. top_p (float): Top-p sampling value for nucleus sampling. max_concurrent (int): Maximum number of concurrent tasks allowed. Returns: - List: List of updated conversation objects with responses added. + List[Conversation]: List of updated conversation objects with responses added. """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv): + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/GeminiToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/GeminiToolModel.py index d5309a453..9ec1aedbb 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/GeminiToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/GeminiToolModel.py @@ -2,13 +2,13 @@ import json import logging import warnings -from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Type import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage @@ -34,10 +34,11 @@ class GeminiToolModel(LLMBase): convert messages for compatible schema. This model supports synchronous and asynchronous operations. Attributes: - api_key (str): The API key used to authenticate requests to the Gemini API. + api_key (SecretStr): The API key used to authenticate requests to the Gemini API. allowed_models (List[str]): List of supported model names. name (str): The name of the Gemini model in use. type (Literal["GeminiToolModel"]): The model type, set to "GeminiToolModel". + timeout (float): Maximum timeout for API requests in seconds. Providers Resources: https://ai.google.dev/api/python/google/generativeai/protos/ """ @@ -75,27 +76,29 @@ class GeminiToolModel(LLMBase): ] ) - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any) -> None: """ Initializes the GeminiToolModel instance with the provided API key and model name. Args: - api_key (SecretStr): The API key used to authenticate requests to the Gemini API. - name (str): The name of the Gemini model in use. + *args (Any): Additional positional arguments. + **kwargs (Any): Additional keyword arguments, including 'allowed_models'. """ super().__init__(*args, **kwargs) self.allowed_models = self.allowed_models or self.get_allowed_models() self.name = self.allowed_models[0] - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools( + self, tools: Dict[str, Any] + ) -> Dict[str, List[Dict[str, Any]]]: """ Converts toolkit tools into a format compatible with the Gemini schema. Args: - tools (dict): A dictionary of tools to convert. + tools (Dict[str, Any]): A dictionary of tools to convert. Returns: - List[Dict[str, Any]]: List of converted tool definitions. + Dict[str, List[Dict[str, Any]]]: Dictionary with function declarations for Gemini. """ response = [GeminiSchemaConverter().convert(tools[tool]) for tool in tools] logging.info(response) @@ -103,7 +106,7 @@ def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: def _format_messages( self, messages: List[Type[MessageBase]] - ) -> List[Dict[str, str]]: + ) -> List[Dict[str, Any]]: """ Formats message history for compatibility with Gemini API, sanitizing content and updating roles. @@ -111,7 +114,7 @@ def _format_messages( messages (List[Type[MessageBase]]): A list of message objects. Returns: - List[Dict[str, str]]: List of formatted message dictionaries. + List[Dict[str, Any]]: List of formatted message dictionaries. """ message_properties = ["content", "role", "tool_call_id", "tool_calls"] sanitized_messages = [ @@ -132,17 +135,22 @@ def _format_messages( return sanitized_messages - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase]: + def _process_tool_calls( + self, + tool_calls: List[Dict[str, Any]], + toolkit: Toolkit, + messages: List[Dict[str, Any]], + ) -> List[Dict[str, Any]]: """ Executes tool calls and appends results to the message list. Args: - tool_calls (List[Dict]): List of tool calls to process. + tool_calls (List[Dict[str, Any]]): List of tool calls to process. toolkit (Toolkit): Toolkit instance for handling tools. - messages (List[MessageBase]): List of messages to update. + messages (List[Dict[str, Any]]): List of messages to update. Returns: - List[MessageBase]: Updated list of messages. + List[Dict[str, Any]]: Updated list of messages. """ tool_results = {} @@ -179,7 +187,7 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase ) return messages - def _get_system_context(self, messages: List[Type[MessageBase]]) -> str: + def _get_system_context(self, messages: List[Type[MessageBase]]) -> Optional[str]: """ Extracts system context message from message history. @@ -187,7 +195,7 @@ def _get_system_context(self, messages: List[Type[MessageBase]]) -> str: messages (List[Type[MessageBase]]): List of message objects. Returns: - str: Content of the system context message. + Optional[str]: Content of the system context message, or None if no system message exists. """ system_context = None for message in messages: @@ -199,7 +207,7 @@ def _get_system_context(self, messages: List[Type[MessageBase]]) -> str: def predict( self, conversation: Conversation, - toolkit: Toolkit = None, + toolkit: Optional[Toolkit] = None, temperature: float = 0.7, max_tokens: int = 256, ) -> Conversation: @@ -208,8 +216,8 @@ def predict( Args: conversation (Conversation): The conversation instance. - toolkit (Toolkit): Optional toolkit for handling tools. - temperature (float): Sampling temperature. + toolkit (Optional[Toolkit]): Optional toolkit for handling tools. + temperature (float): Sampling temperature, controls randomness in generation. max_tokens (int): Maximum token limit for generation. Returns: @@ -285,7 +293,7 @@ def predict( async def apredict( self, conversation: Conversation, - toolkit: Toolkit = None, + toolkit: Optional[Toolkit] = None, temperature: float = 0.7, max_tokens: int = 256, ) -> Conversation: @@ -294,8 +302,8 @@ async def apredict( Args: conversation (Conversation): The conversation instance. - toolkit (Toolkit): Optional toolkit for handling tools. - temperature (float): Sampling temperature. + toolkit (Optional[Toolkit]): Optional toolkit for handling tools. + temperature (float): Sampling temperature, controls randomness in generation. max_tokens (int): Maximum token limit for generation. Returns: @@ -371,7 +379,7 @@ async def apredict( def stream( self, conversation: Conversation, - toolkit: Toolkit = None, + toolkit: Optional[Toolkit] = None, temperature: float = 0.7, max_tokens: int = 256, ) -> Iterator[str]: @@ -380,8 +388,8 @@ def stream( Args: conversation (Conversation): The conversation instance. - toolkit (Toolkit): Optional toolkit for handling tools. - temperature (float): Sampling temperature. + toolkit (Optional[Toolkit]): Optional toolkit for handling tools. + temperature (float): Sampling temperature, controls randomness in generation. max_tokens (int): Maximum token limit for generation. Yields: @@ -457,7 +465,7 @@ def stream( async def astream( self, conversation: Conversation, - toolkit: Toolkit = None, + toolkit: Optional[Toolkit] = None, temperature: float = 0.7, max_tokens: int = 256, ) -> AsyncIterator[str]: @@ -466,8 +474,8 @@ async def astream( Args: conversation (Conversation): The conversation instance. - toolkit (Toolkit): Optional toolkit for handling tools. - temperature (float): Sampling temperature. + toolkit (Optional[Toolkit]): Optional toolkit for handling tools. + temperature (float): Sampling temperature, controls randomness in generation. max_tokens (int): Maximum token limit for generation. Yields: @@ -542,7 +550,7 @@ async def astream( def batch( self, conversations: List[Conversation], - toolkit: Toolkit = None, + toolkit: Optional[Toolkit] = None, temperature: float = 0.7, max_tokens: int = 256, ) -> List[Conversation]: @@ -551,8 +559,8 @@ def batch( Args: conversations (List[Conversation]): List of conversation instances. - toolkit (Toolkit): Optional toolkit for handling tools. - temperature (float): Sampling temperature. + toolkit (Optional[Toolkit]): Optional toolkit for handling tools. + temperature (float): Sampling temperature, controls randomness in generation. max_tokens (int): Maximum token limit for generation. Returns: @@ -571,7 +579,7 @@ def batch( async def abatch( self, conversations: List[Conversation], - toolkit: Toolkit = None, + toolkit: Optional[Toolkit] = None, temperature: float = 0.7, max_tokens: int = 256, max_concurrent: int = 5, @@ -581,8 +589,8 @@ async def abatch( Args: conversations (List[Conversation]): List of conversation instances. - toolkit (Toolkit): Optional toolkit for handling tools. - temperature (float): Sampling temperature. + toolkit (Optional[Toolkit]): Optional toolkit for handling tools. + temperature (float): Sampling temperature, controls randomness in generation. max_tokens (int): Maximum token limit for generation. max_concurrent (int): Maximum number of concurrent asynchronous tasks. @@ -591,7 +599,7 @@ async def abatch( """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv) -> Conversation: + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/GroqModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/GroqModel.py index dfe74aa89..1d4c61e04 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/GroqModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/GroqModel.py @@ -42,12 +42,12 @@ class GroqModel(LLMBase): timeout: float = 600.0 - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]): """ Initialize the GroqAIAudio class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._client = httpx.Client( diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/GroqToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/GroqToolModel.py index 65191ec0d..70d4d09f1 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/GroqToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/GroqToolModel.py @@ -1,13 +1,23 @@ import asyncio import json import warnings -from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type +from typing import ( + Any, + AsyncIterator, + Dict, + Iterator, + List, + Literal, + Optional, + Type, + Union, +) import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage @@ -35,10 +45,11 @@ class GroqToolModel(LLMBase): and handle tool-related functions. Attributes: - api_key (str): API key to authenticate with Groq API. + api_key (SecretStr): API key to authenticate with Groq API. allowed_models (List[str]): List of permissible model names. name (str): Default model name for predictions. - type (Literal): Type identifier for the model. + type (Literal["GroqToolModel"]): Type identifier for the model. + timeout (float): Request timeout in seconds. Provider Documentation: https://console.groq.com/docs/tool-use#models """ @@ -56,12 +67,12 @@ class GroqToolModel(LLMBase): timeout: float = 600.0 - def __init__(self, **data): + def __init__(self, **data: Any): """ Initialize the GroqAIAudio class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Any): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._client = httpx.Client( @@ -78,31 +89,36 @@ def __init__(self, **data): self.allowed_models = self.allowed_models or self.get_allowed_models() self.name = self.allowed_models[0] - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools(self, tools: Dict[str, Any]) -> List[Dict[str, Any]]: """ Converts toolkit items to API-compatible schema format. Parameters: - tools: Dictionary of tools to be converted. + tools (Dict[str, Any]): Dictionary of tools to be converted. Returns: List[Dict[str, Any]]: Formatted list of tool dictionaries. """ return [GroqSchemaConverter().convert(tools[tool]) for tool in tools] - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase]: + def _process_tool_calls( + self, + tool_calls: List[Dict[str, Any]], + toolkit: Any, + messages: List[Dict[str, Any]], + ) -> List[Dict[str, Any]]: """ Processes a list of tool calls and appends the results to the messages list. Args: - tool_calls (list): A list of dictionaries representing tool calls. Each dictionary should contain + tool_calls (List[Dict[str, Any]]): A list of dictionaries representing tool calls. Each dictionary should contain a "function" key with a nested dictionary that includes the "name" and "arguments" of the function to be called, and an "id" key for the tool call identifier. - toolkit (object): An object that provides access to tools via the `get_tool_by_name` method. - messages (list): A list of message dictionaries to which the results of the tool calls will be appended. + toolkit (Any): An object that provides access to tools via the `get_tool_by_name` method. + messages (List[Dict[str, Any]]): A list of message dictionaries to which the results of the tool calls will be appended. Returns: - List[MessageBase]: The updated list of messages with the results of the tool calls appended. + List[Dict[str, Any]]: The updated list of messages with the results of the tool calls appended. """ if tool_calls: for tool_call in tool_calls: @@ -124,15 +140,15 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase def _format_messages( self, messages: List[Type[MessageBase]] - ) -> List[Dict[str, str]]: + ) -> List[Dict[str, Any]]: """ Formats messages for API compatibility. Parameters: - messages (List[MessageBase]): List of message instances to format. + messages (List[Type[MessageBase]]): List of message instances to format. Returns: - List[Dict[str, str]]: List of formatted message dictionaries. + List[Dict[str, Any]]: List of formatted message dictionaries. """ message_properties = ["content", "role", "name", "tool_call_id", "tool_calls"] formatted_messages = [ @@ -144,19 +160,19 @@ def _format_messages( @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Conversation: """ Makes a synchronous prediction using the Groq model. Parameters: conversation (Conversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -205,19 +221,19 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Conversation: """ Makes an asynchronous prediction using the Groq model. Parameters: conversation (Conversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -267,18 +283,18 @@ async def apredict( def stream( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Iterator[str]: """ Streams response from Groq model in real-time. Parameters: conversation (Conversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -333,19 +349,19 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def astream( self, - conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Asynchronously streams response from Groq model. Parameters: conversation (Conversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -398,21 +414,20 @@ async def astream( def batch( self, conversations: List[Conversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> List[Conversation]: """ Processes a batch of conversations and generates responses for each sequentially. Args: conversations (List[Conversation]): List of conversations to process. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature for response diversity. max_tokens (int): Maximum tokens for each response. - top_p (float): Cumulative probability for nucleus sampling. - enable_json (bool): Whether to format the response as JSON. - stop (Optional[List[str]]): List of stop sequences for response termination. Returns: List[Conversation]: List of updated conversations with model responses. @@ -434,22 +449,21 @@ def batch( async def abatch( self, conversations: List[Conversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - max_concurrent=5, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, + max_concurrent: int = 5, ) -> List[Conversation]: """ Async method for processing a batch of conversations concurrently. Args: conversations (List[Conversation]): List of conversations to process. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature for response diversity. max_tokens (int): Maximum tokens for each response. - top_p (float): Cumulative probability for nucleus sampling. - enable_json (bool): Whether to format the response as JSON. - stop (Optional[List[str]]): List of stop sequences for response termination. max_concurrent (int): Maximum number of concurrent requests. Returns: @@ -460,7 +474,7 @@ async def abatch( semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv) -> Conversation: + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/HyperbolicModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/HyperbolicModel.py index 23c63b47c..824b0d5a8 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/HyperbolicModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/HyperbolicModel.py @@ -37,12 +37,12 @@ class HyperbolicModel(LLMBase): _BASE_URL: str = PrivateAttr(default="https://api.hyperbolic.xyz/v1/") _headers: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **data) -> None: + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the HyperbolicModel class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._headers = { diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/LlamaCppModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/LlamaCppModel.py index 118d44eb4..0e5d6ddaf 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/LlamaCppModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/LlamaCppModel.py @@ -1,13 +1,14 @@ import asyncio import json -from typing import AsyncIterator, Dict, Iterator, List, Literal, Optional +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase, SubclassUnion from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase, SubclassUnion +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -15,25 +16,17 @@ @ComponentBase.register_type(LLMBase, "LlamaCppModel") class LlamaCppModel(LLMBase): """ - A class for interacting with DeepInfra's model API for text generation. + A class for interacting with LlamaCpp's model API for text generation. This implementation uses httpx for both synchronous and asynchronous HTTP requests, providing support for predictions, streaming responses, and batch processing. Attributes: - api_key (str): DeepInfra API key for authentication - Can be obtained from: https://deepinfra.com/dash/api_keys - - allowed_models (List[str]): List of supported model identifiers on DeepInfra - Full list available at: https://deepinfra.com/models/text-generation - + api_key (Optional[SecretStr]): API key for authentication (optional for local LlamaCpp) + allowed_models (List[str]): List of supported model identifiers name (str): The currently selected model name - Defaults to "Qwen/Qwen2-72B-Instruct" - - type (Literal["DeepInfraModel"]): Type identifier for the model class - - Link to Allowed Models: https://deepinfra.com/models/text-generation - Link to API KEY: https://deepinfra.com/dash/api_keys + type (Literal["LlamaCppModel"]): Type identifier for the model class + timeout (float): Request timeout in seconds """ _BASE_URL: str = PrivateAttr("http://localhost:8080/v1") @@ -49,13 +42,13 @@ class LlamaCppModel(LLMBase): timeout: float = 600.0 - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: """ - Initializes the DeepInfraModel instance with the provided API key - and sets up httpx clients for both sync and async operations. + Initializes the LlamaCppModel instance and sets up httpx clients + for both sync and async operations. Args: - **data: Keyword arguments for model initialization. + **data (Dict[str, Any]): Keyword arguments for model initialization. """ super().__init__(**data) if self.api_key: @@ -99,9 +92,9 @@ def _create_request_payload( temperature: float, max_tokens: int, enable_json: bool, - stop: List[str] = None, + stop: Optional[List[str]] = None, stream: bool = False, - ) -> Dict: + ) -> Dict[str, Any]: """ Creates the payload for the API request. @@ -110,11 +103,11 @@ def _create_request_payload( temperature (float): Sampling temperature for the response. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Whether to enable JSON response format. - stop (List[str], optional): Stop sequences. + stop (Optional[List[str]]): Stop sequences. stream (bool): Whether to stream the response. Returns: - Dict: Payload for the API request. + Dict[str, Any]: Payload for the API request. """ payload = { "model": self.name, @@ -138,24 +131,24 @@ def _create_request_payload( @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation, - temperature=0.7, - max_tokens=256, - enable_json=False, - stop: List[str] = None, - ): + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + enable_json: bool = False, + stop: Optional[List[str]] = None, + ) -> Conversation: """ Sends a synchronous request to generate a response from the model. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Flag for enabling JSON response format. - stop (List[str], optional): Stop sequences for the response. + stop (Optional[List[str]]): Stop sequences for the response. Returns: - Updated conversation with the model's response. + Conversation: Updated conversation with the model's response. """ formatted_messages = self._format_messages(conversation.history) payload = self._create_request_payload( @@ -174,24 +167,24 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation, - temperature=0.7, - max_tokens=256, - enable_json=False, - stop: List[str] = None, - ): + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + enable_json: bool = False, + stop: Optional[List[str]] = None, + ) -> Conversation: """ Sends an asynchronous request to generate a response from the model. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Flag for enabling JSON response format. - stop (List[str], optional): Stop sequences for the response. + stop (Optional[List[str]]): Stop sequences for the response. Returns: - Updated conversation with the model's response. + Conversation: Updated conversation with the model's response. """ formatted_messages = self._format_messages(conversation.history) payload = self._create_request_payload( @@ -210,19 +203,19 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) def stream( self, - conversation, - temperature=0.7, - max_tokens=256, - stop: List[str] = None, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + stop: Optional[List[str]] = None, ) -> Iterator[str]: """ Streams response content from the model synchronously. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. - stop (List[str], optional): Stop sequences for the response. + stop (Optional[List[str]]): Stop sequences for the response. Yields: str: Chunks of content from the model's response. @@ -256,19 +249,19 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def astream( self, - conversation, - temperature=0.7, - max_tokens=256, - stop: List[str] = None, + conversation: Conversation, + temperature: float = 0.7, + max_tokens: int = 256, + stop: Optional[List[str]] = None, ) -> AsyncIterator[str]: """ Streams response content from the model asynchronously. Args: - conversation: The conversation object containing message history. + conversation (Conversation): The conversation object containing message history. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. - stop (List[str], optional): Stop sequences for the response. + stop (Optional[List[str]]): Stop sequences for the response. Yields: str: Chunks of content from the model's response. @@ -299,24 +292,24 @@ async def astream( def batch( self, - conversations: List, - temperature=0.7, - max_tokens=256, - enable_json=False, - stop: List[str] = None, - ) -> List: + conversations: List[Conversation], + temperature: float = 0.7, + max_tokens: int = 256, + enable_json: bool = False, + stop: Optional[List[str]] = None, + ) -> List[Conversation]: """ Processes multiple conversations in batch synchronously. Args: - conversations (List): List of conversation objects. + conversations (List[Conversation]): List of conversation objects. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Flag for enabling JSON response format. - stop (List[str], optional): Stop sequences for responses. + stop (Optional[List[str]]): Stop sequences for responses. Returns: - List: List of updated conversations with model responses. + List[Conversation]: List of updated conversations with model responses. """ return [ self.predict( @@ -331,30 +324,30 @@ def batch( async def abatch( self, - conversations: List, - temperature=0.7, - max_tokens=256, - enable_json=False, - stop: List[str] = None, - max_concurrent=5, - ) -> List: + conversations: List[Conversation], + temperature: float = 0.7, + max_tokens: int = 256, + enable_json: bool = False, + stop: Optional[List[str]] = None, + max_concurrent: int = 5, + ) -> List[Conversation]: """ Processes multiple conversations asynchronously, with concurrency control. Args: - conversations (List): List of conversation objects. + conversations (List[Conversation]): List of conversation objects. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum number of tokens to generate. enable_json (bool): Flag for enabling JSON response format. - stop (List[str], optional): Stop sequences for responses. + stop (Optional[List[str]]): Stop sequences for responses. max_concurrent (int): Maximum number of concurrent tasks. Returns: - List: List of updated conversations with model responses. + List[Conversation]: List of updated conversations with model responses. """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv): + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/MistralModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/MistralModel.py index ee69656f7..5c0c0b1b7 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/MistralModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/MistralModel.py @@ -1,6 +1,6 @@ import asyncio import json -from typing import AsyncIterator, Dict, Iterator, List, Literal, Type +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type import httpx from pydantic import PrivateAttr, SecretStr @@ -40,12 +40,12 @@ class MistralModel(LLMBase): _async_client: httpx.AsyncClient = PrivateAttr(default=None) _BASE_URL: str = PrivateAttr(default="https://api.mistral.ai/v1/") - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]): """ Initialize the GroqAIAudio class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._client = httpx.Client( diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/MistralToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/MistralToolModel.py index e2426c6ba..c933cfc40 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/MistralToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/MistralToolModel.py @@ -2,13 +2,13 @@ import json import logging import warnings -from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Type import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage @@ -35,10 +35,11 @@ class MistralToolModel(LLMBase): It supports processing single and batch conversations, as well as streaming responses. Attributes: - api_key (str): The API key for authenticating requests with the Mistral API. + api_key (SecretStr): The API key for authenticating requests with the Mistral API. allowed_models (List[str]): A list of supported model names for the Mistral API. name (str): The default model name to use for predictions. type (Literal["MistralToolModel"]): The type identifier for the model. + timeout (float): Maximum time to wait for API responses in seconds. Provider resources: https://docs.mistral.ai/capabilities/function_calling/#available-models """ @@ -52,12 +53,12 @@ class MistralToolModel(LLMBase): _async_client: httpx.AsyncClient = PrivateAttr(default=None) _BASE_URL: str = PrivateAttr(default="https://api.mistral.ai/v1/") - def __init__(self, **data) -> None: + def __init__(self, **data: Any) -> None: """ - Initializes the GroqToolModel instance, setting up headers for API requests. + Initializes the MistralToolModel instance, setting up headers for API requests. Parameters: - **data: Arbitrary keyword arguments for initialization. + **data (Any): Arbitrary keyword arguments for initialization. """ super().__init__(**data) self._client = httpx.Client( @@ -73,12 +74,12 @@ def __init__(self, **data) -> None: self.allowed_models = self.allowed_models or self.get_allowed_models() self.name = self.allowed_models[0] - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools(self, tools: Dict[str, Any]) -> List[Dict[str, Any]]: """ Convert a dictionary of tools to the schema format required by Mistral API. Args: - tools (dict): A dictionary of tool objects. + tools (Dict[str, Any]): A dictionary of tool objects. Returns: List[Dict[str, Any]]: A list of converted tool schemas. @@ -130,10 +131,10 @@ def get_allowed_models(self) -> List[str]: def predict( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[str] = None, + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, ) -> Conversation: """ @@ -141,8 +142,8 @@ def predict( Args: conversation (Conversation): The conversation object. - toolkit (Optional): The toolkit for tool assistance. - tool_choice (Optional): The tool choice strategy (default is "auto"). + toolkit (Optional[Any]): The toolkit for tool assistance. + tool_choice (Optional[str]): The tool choice strategy (default is "auto"). temperature (float): The temperature for response variability. max_tokens (int): The maximum number of tokens for the response. safe_prompt (bool): Whether to use a safer prompt. @@ -207,10 +208,10 @@ def predict( async def apredict( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[str] = None, + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, ) -> Conversation: """ @@ -218,8 +219,8 @@ async def apredict( Args: conversation (Conversation): The conversation object. - toolkit (Optional): The toolkit for tool assistance. - tool_choice (Optional): The tool choice strategy. + toolkit (Optional[Any]): The toolkit for tool assistance. + tool_choice (Optional[str]): The tool choice strategy. temperature (float): The temperature for response variability. max_tokens (int): The maximum number of tokens for the response. safe_prompt (bool): Whether to use a safer prompt. @@ -281,10 +282,10 @@ async def apredict( def stream( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[str] = None, + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, ) -> Iterator[str]: """ @@ -295,8 +296,8 @@ def stream( Args: conversation (Conversation): The conversation object containing the message history. - toolkit (Optional): The toolkit for tool assistance, providing external tools to be invoked. - tool_choice (Optional): The tool choice strategy, such as "auto" or "manual". + toolkit (Optional[Any]): The toolkit for tool assistance, providing external tools to be invoked. + tool_choice (Optional[str]): The tool choice strategy, such as "auto" or "manual". temperature (float): The sampling temperature for response variability. max_tokens (int): The maximum number of tokens to generate in the response. safe_prompt (bool): Whether to use a safer prompt, reducing potential harmful content. @@ -377,10 +378,10 @@ def stream( async def astream( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[str] = None, + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, ) -> AsyncIterator[str]: """ @@ -391,8 +392,8 @@ async def astream( Args: conversation (Conversation): The conversation object containing the message history. - toolkit (Optional): The toolkit for tool assistance, providing external tools to be invoked. - tool_choice (Optional): The tool choice strategy, such as "auto" or "manual". + toolkit (Optional[Any]): The toolkit for tool assistance, providing external tools to be invoked. + tool_choice (Optional[str]): The tool choice strategy, such as "auto" or "manual". temperature (float): The sampling temperature for response variability. max_tokens (int): The maximum number of tokens to generate in the response. safe_prompt (bool): Whether to use a safer prompt, reducing potential harmful content. @@ -472,10 +473,10 @@ async def astream( def batch( self, conversations: List[Conversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[str] = None, + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, ) -> List[Conversation]: """ @@ -483,11 +484,11 @@ def batch( Args: conversations (List[Conversation]): List of conversations to process. - temperature (float, optional): Sampling temperature for response generation. - max_tokens (int, optional): Maximum tokens for the response. - top_p (int, optional): Nucleus sampling parameter. - enable_json (bool, optional): If True, enables JSON output format. - safe_prompt (bool, optional): If True, enables safe prompting. + toolkit (Optional[Any]): The toolkit for tool assistance. + tool_choice (Optional[str]): The tool choice strategy. + temperature (float): Sampling temperature for response generation. + max_tokens (int): Maximum tokens for the response. + safe_prompt (bool): If True, enables safe prompting. Returns: List[Conversation]: List of updated conversations with generated responses. @@ -507,10 +508,10 @@ def batch( async def abatch( self, conversations: List[Conversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[str] = None, + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, max_concurrent: int = 5, ) -> List[Conversation]: @@ -519,19 +520,19 @@ async def abatch( Args: conversations (List[Conversation]): List of conversations to process. - temperature (float, optional): Sampling temperature for response generation. - max_tokens (int, optional): Maximum tokens for the response. - top_p (int, optional): Nucleus sampling parameter. - enable_json (bool, optional): If True, enables JSON output format. - safe_prompt (bool, optional): If True, enables safe prompting. - max_concurrent (int, optional): Maximum number of concurrent tasks. + toolkit (Optional[Any]): The toolkit for tool assistance. + tool_choice (Optional[str]): The tool choice strategy. + temperature (float): Sampling temperature for response generation. + max_tokens (int): Maximum tokens for the response. + safe_prompt (bool): If True, enables safe prompting. + max_concurrent (int): Maximum number of concurrent tasks. Returns: List[Conversation]: List of updated conversations with generated responses. """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv) -> Conversation: + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIModel.py index 8a34194aa..5b2ab70f2 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIModel.py @@ -5,9 +5,9 @@ import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage, UsageData @@ -47,12 +47,12 @@ class OpenAIModel(LLMBase): _BASE_URL: str = PrivateAttr(default="https://api.openai.com/v1/chat/completions") _headers: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **data) -> None: + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the OpenAIModel class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._headers = { @@ -96,7 +96,7 @@ def _format_messages( def _prepare_usage_data( self, - usage_data, + usage_data: UsageData, prompt_time: float = 0.0, completion_time: float = 0.0, ) -> UsageData: diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIReasonModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIReasonModel.py index 66eee95ef..e471cccee 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIReasonModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIReasonModel.py @@ -3,9 +3,9 @@ import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage, UsageData @@ -21,10 +21,13 @@ class OpenAIReasonModel(LLMBase): model, receive predictions, and stream responses. Attributes: - api_key (str): API key for authenticating requests to the Groq API. + api_key (SecretStr): API key for authenticating requests to the Groq API. allowed_models (List[str]): List of allowed model names that can be used. name (str): The default model name to use for predictions. type (Literal["OpenAIReasonModel"]): The type identifier for this class. + timeout (float): Timeout duration for API requests. + _BASE_URL (str): Base URL for the OpenAI API. + _headers (Dict[str, str]): Headers for API requests. Provider resources: https://platform.openai.com/docs/models """ @@ -37,12 +40,12 @@ class OpenAIReasonModel(LLMBase): _BASE_URL: str = PrivateAttr(default="https://api.openai.com/v1/chat/completions") _headers: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **data) -> None: + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the OpenAIModel class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._headers = { @@ -60,7 +63,7 @@ def _format_messages( Formats conversation messages into the structure expected by the API. Args: - messages (List[MessageBase]): List of message objects from the conversation history. + messages (List[Type[MessageBase]]): List of message objects from the conversation history. Returns: List[Dict[str, Any]]: List of formatted message dictionaries. @@ -87,7 +90,7 @@ def _format_messages( def _prepare_usage_data( self, - usage_data, + usage_data: UsageData, prompt_time: float = 0.0, completion_time: float = 0.0, ) -> UsageData: @@ -95,7 +98,7 @@ def _prepare_usage_data( Prepare usage data by combining token counts and timing information. Args: - usage_data: Raw usage data containing token counts. + usage_data (UsageData): Raw usage data containing token counts. prompt_time (float): Time taken for prompt processing. completion_time (float): Time taken for response completion. @@ -191,9 +194,7 @@ async def apredict( Args: conversation (Conversation): Conversation object with message history. - temperature (float): Sampling temperature for response diversity. max_completion_tokens (int): Maximum tokens for the model's response. - top_p (float): Cumulative probability for nucleus sampling. enable_json (bool): Whether to format the response as JSON. stop (Optional[List[str]]): List of stop sequences for response termination. @@ -249,7 +250,19 @@ def stream( max_completion_tokens: int = 256, enable_json: bool = False, stop: Optional[List[str]] = None, - ): + ) -> None: + """ + Not implemented. + + Args: + conversation (Conversation): Conversation object with message history. + max_completion_tokens (int): Maximum tokens for the model's response. + enable_json (bool): Whether to format the response as JSON. + stop (Optional[List[str]]): List of stop sequences for response termination. + + Raises: + NotImplementedError: This method is not implemented. + """ raise NotImplementedError async def astream( @@ -258,7 +271,19 @@ async def astream( max_completion_tokens: int = 256, enable_json: bool = False, stop: Optional[List[str]] = None, - ): + ) -> None: + """ + Not implemented. + + Args: + conversation (Conversation): Conversation object with message history. + max_completion_tokens (int): Maximum tokens for the model's response. + enable_json (bool): Whether to format the response as JSON. + stop (Optional[List[str]]): List of stop sequences for response termination. + + Raises: + NotImplementedError: This method is not implemented. + """ raise NotImplementedError def batch( @@ -267,7 +292,19 @@ def batch( max_completion_tokens: int = 256, enable_json: bool = False, stop: Optional[List[str]] = None, - ): + ) -> None: + """ + Not implemented. + + Args: + conversations (List[Conversation]): List of conversations to process. + max_completion_tokens (int): Maximum tokens for the model's response. + enable_json (bool): Whether to format the response as JSON. + stop (Optional[List[str]]): List of stop sequences for response termination. + + Raises: + NotImplementedError: This method is not implemented. + """ raise NotImplementedError async def abatch( @@ -276,5 +313,17 @@ async def abatch( max_completion_tokens: int = 256, enable_json: bool = False, stop: Optional[List[str]] = None, - ): + ) -> None: + """ + Not implemented. + + Args: + conversations (List[Conversation]): List of conversations to process. + max_completion_tokens (int): Maximum tokens for the model's response. + enable_json (bool): Whether to format the response as JSON. + stop (Optional[List[str]]): List of stop sequences for response termination. + + Raises: + NotImplementedError: This method is not implemented. + """ raise NotImplementedError diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIToolModel.py index b5c92620f..4e8b74aaf 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/OpenAIToolModel.py @@ -1,13 +1,13 @@ import asyncio import json import warnings -from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Union import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage @@ -35,10 +35,11 @@ class OpenAIToolModel(LLMBase): and handle tool-related functions. Attributes: - api_key (str): API key to authenticate with Groq API. + api_key (SecretStr): API key to authenticate with Groq API. allowed_models (List[str]): List of permissible model names. name (str): Default model name for predictions. - type (Literal): Type identifier for the model. + type (Literal["OpenAIToolModel"]): Type identifier for the model. + timeout (float): Timeout for API requests in seconds. Provider resources: https://platform.openai.com/docs/guides/function-calling/which-models-support-function-calling """ @@ -51,7 +52,7 @@ class OpenAIToolModel(LLMBase): _BASE_URL: str = PrivateAttr(default="https://api.openai.com/v1/chat/completions") _headers: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **data): + def __init__(self, **data: Any) -> None: """ Initialize the OpenAIToolModel class with the provided data. @@ -66,31 +67,34 @@ def __init__(self, **data): self.allowed_models = self.allowed_models or self.get_allowed_models() self.name = self.allowed_models[0] - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools(self, tools: Dict[str, Any]) -> List[Dict[str, Any]]: return [OpenAISchemaConverter().convert(tools[tool]) for tool in tools] - def _format_messages( - self, messages: List[Type[MessageBase]] - ) -> List[Dict[str, str]]: + def _format_messages(self, messages: List[MessageBase]) -> List[Dict[str, Any]]: message_properties = ["content", "role", "name", "tool_call_id", "tool_calls"] return [ message.model_dump(include=message_properties, exclude_none=True) for message in messages ] - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase]: + def _process_tool_calls( + self, + tool_calls: List[Dict[str, Any]], + toolkit: Any, + messages: List[Dict[str, Any]], + ) -> List[Dict[str, Any]]: """ Processes a list of tool calls and appends the results to the messages list. Args: - tool_calls (list): A list of dictionaries representing tool calls. Each dictionary should contain + tool_calls (List[Dict[str, Any]]): A list of dictionaries representing tool calls. Each dictionary should contain a "function" key with a nested dictionary that includes the "name" and "arguments" of the function to be called, and an "id" key for the tool call identifier. - toolkit (object): An object that provides access to tools via the `get_tool_by_name` method. - messages (list): A list of message dictionaries to which the results of the tool calls will be appended. + toolkit (Any): An object that provides access to tools via the `get_tool_by_name` method. + messages (List[Dict[str, Any]]): A list of message dictionaries to which the results of the tool calls will be appended. Returns: - List[MessageBase]: The updated list of messages with the results of the tool calls appended. + List[Dict[str, Any]]: The updated list of messages with the results of the tool calls appended. """ if tool_calls: for tool_call in tool_calls: @@ -114,18 +118,18 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase def predict( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Conversation: """ Makes a synchronous prediction using the Groq model. Parameters: conversation (Conversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -172,18 +176,18 @@ def predict( async def apredict( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Conversation: """ Makes an asynchronous prediction using the OpenAI model. Parameters: conversation (Conversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -233,18 +237,18 @@ async def apredict( def stream( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Iterator[str]: """ Streams response from OpenAI model in real-time. Parameters: conversation (Conversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -303,18 +307,18 @@ def stream( async def astream( self, conversation: Conversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Asynchronously streams response from Groq model. Parameters: conversation (Conversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -373,21 +377,20 @@ async def astream( def batch( self, conversations: List[Conversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> List[Conversation]: """ Processes a batch of conversations and generates responses for each sequentially. Args: conversations (List[Conversation]): List of conversations to process. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature for response diversity. max_tokens (int): Maximum tokens for each response. - top_p (float): Cumulative probability for nucleus sampling. - enable_json (bool): Whether to format the response as JSON. - stop (Optional[List[str]]): List of stop sequences for response termination. Returns: List[Conversation]: List of updated conversations with model responses. @@ -406,22 +409,21 @@ def batch( async def abatch( self, conversations: List[Conversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - max_concurrent=5, + toolkit: Optional[Any] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, + max_concurrent: int = 5, ) -> List[Conversation]: """ Async method for processing a batch of conversations concurrently. Args: conversations (List[Conversation]): List of conversations to process. + toolkit (Optional[Any]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature for response diversity. max_tokens (int): Maximum tokens for each response. - top_p (float): Cumulative probability for nucleus sampling. - enable_json (bool): Whether to format the response as JSON. - stop (Optional[List[str]]): List of stop sequences for response termination. max_concurrent (int): Maximum number of concurrent requests. Returns: @@ -429,7 +431,7 @@ async def abatch( """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv): + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/PerplexityModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/PerplexityModel.py index e81955f51..47ff79722 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/PerplexityModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/PerplexityModel.py @@ -1,12 +1,12 @@ import asyncio import json -from typing import AsyncIterator, Dict, Iterator, List, Literal, Optional, Type +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Type import httpx from pydantic import PrivateAttr, SecretStr +from swarmauri_base.ComponentBase import ComponentBase from swarmauri_base.llms.LLMBase import LLMBase from swarmauri_base.messages.MessageBase import MessageBase -from swarmauri_base.ComponentBase import ComponentBase from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage, UsageData @@ -23,10 +23,11 @@ class PerplexityModel(LLMBase): and batch processing of conversations using the Perplexity language models. Attributes: - api_key (str): API key for authenticating requests to the Perplexity API. + api_key (SecretStr): API key for authenticating requests to the Perplexity API. allowed_models (List[str]): List of allowed model names that can be used. name (str): The default model name to use for predictions. type (Literal["PerplexityModel"]): The type identifier for this class. + timeout (float): Timeout for API requests in seconds. Provider resources: https://docs.perplexity.ai/guides/model-cards Link to deprecated models: https://docs.perplexity.ai/changelog/changelog#model-deprecation-notice @@ -41,12 +42,12 @@ class PerplexityModel(LLMBase): _async_client: httpx.AsyncClient = PrivateAttr(default=None) _BASE_URL: str = PrivateAttr(default="https://api.perplexity.ai/chat/completions") - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the GroqAIAudio class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._client = httpx.Client( @@ -69,10 +70,10 @@ def _format_messages( Formats the list of message objects for the API request. Args: - messages: A list of message objects. + messages (List[Type[MessageBase]]): A list of message objects. Returns: - A list of formatted message dictionaries. + List[Dict[str, str]]: A list of formatted message dictionaries. """ message_properties = ["content", "role", "name"] formatted_messages = [ @@ -83,7 +84,7 @@ def _format_messages( def _prepare_usage_data( self, - usage_data, + usage_data: Dict[str, int], prompt_time: float = 0, completion_time: float = 0, ) -> UsageData: @@ -91,12 +92,12 @@ def _prepare_usage_data( Prepares usage data and calculates response timing. Args: - usage_data: The raw usage data from the API response. - prompt_time: Time taken for the prompt processing. - completion_time: Time taken for the completion processing. + usage_data (Dict[str, int]): The raw usage data from the API response. + prompt_time (float): Time taken for the prompt processing. + completion_time (float): Time taken for the completion processing. Returns: - A UsageData object containing token and timing information. + UsageData: A UsageData object containing token and timing information. """ total_time = prompt_time + completion_time @@ -116,11 +117,11 @@ def _prepare_usage_data( def predict( self, conversation: Conversation, - temperature=0.7, - max_tokens=256, + temperature: float = 0.7, + max_tokens: int = 256, top_p: Optional[float] = None, top_k: Optional[int] = None, - return_citations: Optional[bool] = False, + return_citations: bool = False, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, ) -> Conversation: @@ -128,17 +129,17 @@ def predict( Makes a synchronous prediction request. Args: - conversation: The conversation object containing the history. - temperature: Sampling temperature for response generation. - max_tokens: Maximum number of tokens for the response. - top_p: Nucleus sampling parameter. - top_k: Top-k sampling parameter. - return_citations: Whether to return citations in the response. - presence_penalty: Penalty for new tokens based on presence. - frequency_penalty: Penalty for new tokens based on frequency. + conversation (Conversation): The conversation object containing the history. + temperature (float): Sampling temperature for response generation. Defaults to 0.7. + max_tokens (int): Maximum number of tokens for the response. Defaults to 256. + top_p (Optional[float]): Nucleus sampling parameter. If specified, `top_k` should not be set. + top_k (Optional[int]): Top-k sampling parameter. If specified, `top_p` should not be set. + return_citations (bool): Whether to return citations in the response. Defaults to False. + presence_penalty (Optional[float]): Penalty for new tokens based on presence. + frequency_penalty (Optional[float]): Penalty for new tokens based on frequency. Returns: - An updated Conversation object with the model's response. + Conversation: An updated Conversation object with the model's response. """ if top_p and top_k: @@ -181,11 +182,11 @@ def predict( async def apredict( self, conversation: Conversation, - temperature=0.7, - max_tokens=256, + temperature: float = 0.7, + max_tokens: int = 256, top_p: Optional[float] = None, top_k: Optional[int] = None, - return_citations: Optional[bool] = False, + return_citations: bool = False, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, ) -> Conversation: @@ -193,17 +194,17 @@ async def apredict( Makes an asynchronous prediction request. Args: - conversation: The conversation object containing the history. - temperature: Sampling temperature for response generation. - max_tokens: Maximum number of tokens for the response. - top_p: Nucleus sampling parameter. - top_k: Top-k sampling parameter. - return_citations: Whether to return citations in the response. - presence_penalty: Penalty for new tokens based on presence. - frequency_penalty: Penalty for new tokens based on frequency. + conversation (Conversation): The conversation object containing the history. + temperature (float): Sampling temperature for response generation. Defaults to 0.7. + max_tokens (int): Maximum number of tokens for the response. Defaults to 256. + top_p (Optional[float]): Nucleus sampling parameter. If specified, `top_k` should not be set. + top_k (Optional[int]): Top-k sampling parameter. If specified, `top_p` should not be set. + return_citations (bool): Whether to return citations in the response. Defaults to False. + presence_penalty (Optional[float]): Penalty for new tokens based on presence. + frequency_penalty (Optional[float]): Penalty for new tokens based on frequency. Returns: - An updated Conversation object with the model's response. + Conversation: An updated Conversation object with the model's response. """ if top_p and top_k: @@ -248,11 +249,11 @@ async def apredict( def stream( self, conversation: Conversation, - temperature=0.7, - max_tokens=256, + temperature: float = 0.7, + max_tokens: int = 256, top_p: Optional[float] = None, top_k: Optional[int] = None, - return_citations: Optional[bool] = False, + return_citations: bool = False, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, ) -> Iterator[str]: @@ -261,13 +262,13 @@ def stream( Args: conversation (Conversation): The conversation object containing message history. - temperature (float, optional): Sampling temperature for response generation. Defaults to 0.7. - max_tokens (int, optional): Maximum number of tokens in the generated response. Defaults to 256. - top_p (Optional[float], optional): Nucleus sampling parameter. If specified, `top_k` should not be set. - top_k (Optional[int], optional): Top-k sampling parameter. If specified, `top_p` should not be set. - return_citations (Optional[bool], optional): Whether to return citations in the response. Defaults to False. - presence_penalty (Optional[float], optional): Penalty for introducing new topics. Defaults to None. - frequency_penalty (Optional[float], optional): Penalty for repeating existing tokens. Defaults to None. + temperature (float): Sampling temperature for response generation. Defaults to 0.7. + max_tokens (int): Maximum number of tokens in the generated response. Defaults to 256. + top_p (Optional[float]): Nucleus sampling parameter. If specified, `top_k` should not be set. + top_k (Optional[int]): Top-k sampling parameter. If specified, `top_p` should not be set. + return_citations (bool): Whether to return citations in the response. Defaults to False. + presence_penalty (Optional[float]): Penalty for introducing new topics. Defaults to None. + frequency_penalty (Optional[float]): Penalty for repeating existing tokens. Defaults to None. Yields: str: Chunks of response content as the data is streamed. @@ -302,6 +303,7 @@ def stream( response.raise_for_status() message_content = "" + usage_data = {} with DurationManager() as completion_timer: for chunk in response.iter_lines(): @@ -315,7 +317,7 @@ def stream( ) message_content += delta_content yield delta_content - if chunk_data["usage"]: + if chunk_data.get("usage"): usage_data = chunk_data["usage"] usage = self._prepare_usage_data( @@ -328,11 +330,11 @@ def stream( async def astream( self, conversation: Conversation, - temperature=0.7, - max_tokens=256, + temperature: float = 0.7, + max_tokens: int = 256, top_p: Optional[float] = None, top_k: Optional[int] = None, - return_citations: Optional[bool] = False, + return_citations: bool = False, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, ) -> AsyncIterator[str]: @@ -341,13 +343,13 @@ async def astream( Args: conversation (Conversation): The conversation object containing message history. - temperature (float, optional): Sampling temperature for response generation. Defaults to 0.7. - max_tokens (int, optional): Maximum number of tokens in the generated response. Defaults to 256. - top_p (Optional[float], optional): Nucleus sampling parameter. If specified, `top_k` should not be set. - top_k (Optional[int], optional): Top-k sampling parameter. If specified, `top_p` should not be set. - return_citations (Optional[bool], optional): Whether to return citations in the response. Defaults to False. - presence_penalty (Optional[float], optional): Penalty for introducing new topics. Defaults to None. - frequency_penalty (Optional[float], optional): Penalty for repeating existing tokens. Defaults to None. + temperature (float): Sampling temperature for response generation. Defaults to 0.7. + max_tokens (int): Maximum number of tokens in the generated response. Defaults to 256. + top_p (Optional[float]): Nucleus sampling parameter. If specified, `top_k` should not be set. + top_k (Optional[int]): Top-k sampling parameter. If specified, `top_p` should not be set. + return_citations (bool): Whether to return citations in the response. Defaults to False. + presence_penalty (Optional[float]): Penalty for introducing new topics. Defaults to None. + frequency_penalty (Optional[float]): Penalty for repeating existing tokens. Defaults to None. Yields: str: Chunks of response content as the data is streamed asynchronously. @@ -399,11 +401,11 @@ async def astream( def batch( self, conversations: List[Conversation], - temperature=0.7, - max_tokens=256, + temperature: float = 0.7, + max_tokens: int = 256, top_p: Optional[float] = None, top_k: Optional[int] = None, - return_citations: Optional[bool] = False, + return_citations: bool = False, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, ) -> List[Conversation]: @@ -412,13 +414,13 @@ def batch( Args: conversations (List[Conversation]): List of conversation objects. - temperature (float, optional): Sampling temperature for response generation. Defaults to 0.7. - max_tokens (int, optional): Maximum number of tokens in the generated response. Defaults to 256. - top_p (Optional[float], optional): Nucleus sampling parameter. If specified, `top_k` should not be set. - top_k (Optional[int], optional): Top-k sampling parameter. If specified, `top_p` should not be set. - return_citations (Optional[bool], optional): Whether to return citations in the response. Defaults to False. - presence_penalty (Optional[float], optional): Penalty for introducing new topics. Defaults to None. - frequency_penalty (Optional[float], optional): Penalty for repeating existing tokens. Defaults to None. + temperature (float): Sampling temperature for response generation. Defaults to 0.7. + max_tokens (int): Maximum number of tokens in the generated response. Defaults to 256. + top_p (Optional[float]): Nucleus sampling parameter. If specified, `top_k` should not be set. + top_k (Optional[int]): Top-k sampling parameter. If specified, `top_p` should not be set. + return_citations (bool): Whether to return citations in the response. Defaults to False. + presence_penalty (Optional[float]): Penalty for introducing new topics. Defaults to None. + frequency_penalty (Optional[float]): Penalty for repeating existing tokens. Defaults to None. Returns: List[Conversation]: List of updated conversation objects after processing. @@ -440,11 +442,11 @@ def batch( async def abatch( self, conversations: List[Conversation], - temperature=0.7, - max_tokens=256, + temperature: float = 0.7, + max_tokens: int = 256, top_p: Optional[float] = None, top_k: Optional[int] = None, - return_citations: Optional[bool] = False, + return_citations: bool = False, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, max_concurrent: int = 5, # Maximum concurrent tasks @@ -454,21 +456,21 @@ async def abatch( Args: conversations (List[Conversation]): List of conversation objects. - temperature (float, optional): Sampling temperature for response generation. Defaults to 0.7. - max_tokens (int, optional): Maximum number of tokens in the generated response. Defaults to 256. - top_p (Optional[float], optional): Nucleus sampling parameter. If specified, `top_k` should not be set. - top_k (Optional[int], optional): Top-k sampling parameter. If specified, `top_p` should not be set. - return_citations (Optional[bool], optional): Whether to return citations in the response. Defaults to False. - presence_penalty (Optional[float], optional): Penalty for introducing new topics. Defaults to None. - frequency_penalty (Optional[float], optional): Penalty for repeating existing tokens. Defaults to None. - max_concurrent (int, optional): Maximum number of concurrent tasks. Defaults to 5. + temperature (float): Sampling temperature for response generation. Defaults to 0.7. + max_tokens (int): Maximum number of tokens in the generated response. Defaults to 256. + top_p (Optional[float]): Nucleus sampling parameter. If specified, `top_k` should not be set. + top_k (Optional[int]): Top-k sampling parameter. If specified, `top_p` should not be set. + return_citations (bool): Whether to return citations in the response. Defaults to False. + presence_penalty (Optional[float]): Penalty for introducing new topics. Defaults to None. + frequency_penalty (Optional[float]): Penalty for repeating existing tokens. Defaults to None. + max_concurrent (int): Maximum number of concurrent tasks. Defaults to 5. Returns: List[Conversation]: List of updated conversation objects after processing asynchronously. """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv) -> Conversation: + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conversation=conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/llms/WhisperLargeModel.py b/pkgs/swarmauri_standard/swarmauri_standard/llms/WhisperLargeModel.py index 1ff40e055..5f5e41904 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/llms/WhisperLargeModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/llms/WhisperLargeModel.py @@ -1,6 +1,6 @@ import asyncio import warnings -from typing import Dict, List, Literal +from typing import Any, Dict, List, Literal import httpx from pydantic import PrivateAttr, SecretStr @@ -52,12 +52,12 @@ class WhisperLargeModel(LLMBase): _client: httpx.Client = PrivateAttr() _header: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]): """ Initialize the WhisperLargeModel instance. Args: - **data: Keyword arguments containing model configuration. + **data (Dict[str, Any]): Keyword arguments containing model configuration. Must include 'api_key' for HuggingFace API authentication. Raises: diff --git a/pkgs/swarmauri_standard/swarmauri_standard/parsers/HTMLTagStripParser.py b/pkgs/swarmauri_standard/swarmauri_standard/parsers/HTMLTagStripParser.py index e19f0ab77..f2f133e5a 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/parsers/HTMLTagStripParser.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/parsers/HTMLTagStripParser.py @@ -1,6 +1,6 @@ import html import re -from typing import Literal +from typing import List, Literal from swarmauri_standard.documents.Document import Document from swarmauri_base.parsers.ParserBase import ParserBase from swarmauri_base.ComponentBase import ComponentBase @@ -15,7 +15,7 @@ class HTMLTagStripParser(ParserBase): type: Literal["HTMLTagStripParser"] = "HTMLTagStripParser" - def parse(self, data: str): + def parse(self, data: str) -> List[Document]: """ Strips HTML tags from input data and unescapes HTML content. @@ -23,7 +23,7 @@ def parse(self, data: str): data (str): The HTML content to be parsed. Returns: - List[IDocument]: A list containing a single IDocument instance of the stripped text. + List[Document]: A list containing a single IDocument instance of the stripped text. """ # Ensure that input is a string diff --git a/pkgs/swarmauri_standard/swarmauri_standard/stt/GroqSTT.py b/pkgs/swarmauri_standard/swarmauri_standard/stt/GroqSTT.py index 9ac3bd531..2ce0f247a 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/stt/GroqSTT.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/stt/GroqSTT.py @@ -1,5 +1,5 @@ import asyncio -from typing import Dict, List, Literal +from typing import Any, Dict, List, Literal import aiofiles import httpx @@ -33,12 +33,12 @@ class GroqSTT(STTBase): _async_client: httpx.AsyncClient = PrivateAttr(default=None) _BASE_URL: str = PrivateAttr(default="https://api.groq.com/openai/v1/audio/") - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]): """ Initialize the GroqSTT class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._client = httpx.Client( diff --git a/pkgs/swarmauri_standard/swarmauri_standard/stt/OpenaiSTT.py b/pkgs/swarmauri_standard/swarmauri_standard/stt/OpenaiSTT.py index 9c939f0ca..e4463ea41 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/stt/OpenaiSTT.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/stt/OpenaiSTT.py @@ -1,5 +1,5 @@ import asyncio -from typing import Dict, List, Literal +from typing import Any, Dict, List, Literal import aiofiles import httpx @@ -35,12 +35,12 @@ class OpenaiSTT(STTBase): _async_client: httpx.AsyncClient = PrivateAttr(default=None) _BASE_URL: str = PrivateAttr(default="https://api.openai.com/v1/audio/") - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]): """ Initialize the OpenaiSTT class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._client = httpx.Client( diff --git a/pkgs/swarmauri_standard/swarmauri_standard/stt/WhisperLargeSTT.py b/pkgs/swarmauri_standard/swarmauri_standard/stt/WhisperLargeSTT.py index 53c6f226b..61ad3f73f 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/stt/WhisperLargeSTT.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/stt/WhisperLargeSTT.py @@ -1,5 +1,5 @@ import asyncio -from typing import Dict, List, Literal +from typing import Any, Dict, List, Literal import httpx from pydantic import PrivateAttr, SecretStr @@ -42,12 +42,12 @@ class WhisperLargeSTT(STTBase): _client: httpx.Client = PrivateAttr() _header: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]): """ Initialize the WhisperLargeSTT instance. Args: - **data: Keyword arguments containing model configuration. + **data (Dict[str, Any]): Keyword arguments containing model configuration. Must include 'api_key' for HuggingFace API authentication. Raises: diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/AnthropicToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/AnthropicToolModel.py index a73555f74..e005a9250 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/AnthropicToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/AnthropicToolModel.py @@ -6,16 +6,20 @@ import httpx from pydantic import PrivateAttr from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_base.DynamicBase import SubclassUnion from swarmauri_base.messages.MessageBase import MessageBase from swarmauri_base.schema_converters.SchemaConverterBase import SchemaConverterBase from swarmauri_base.tool_llms.ToolLLMBase import ToolLLMBase +from swarmauri_base.tools.ToolBase import ToolBase from swarmauri_core.conversations.IConversation import IConversation +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.messages.FunctionMessage import FunctionMessage from swarmauri_standard.schema_converters.AnthropicSchemaConverter import ( AnthropicSchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -44,7 +48,7 @@ class AnthropicToolModel(ToolLLMBase): name: str = "" type: Literal["AnthropicToolModel"] = "AnthropicToolModel" - def __init__(self, **data): + def __init__(self, **data: dict[str, Any]): super().__init__(**data) self._headers = { "Content-Type": "application/json", @@ -69,7 +73,9 @@ def get_schema_converter(self) -> Type[SchemaConverterBase]: """ return AnthropicSchemaConverter - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools( + self, tools: Dict[str, SubclassUnion[ToolBase]] + ) -> List[Dict[str, Any]]: """ Converts a toolkit's tools to the Anthropic-compatible schema format. @@ -105,7 +111,9 @@ def _format_messages( ] return formatted_messages - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase]: + def _process_tool_calls( + self, tool_calls: List[Any], toolkit: Toolkit, messages: List[Type[MessageBase]] + ) -> List[Type[MessageBase]]: """ Processes tool calls from Anthropic API response and adds the results to messages. @@ -155,13 +163,13 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase def predict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], multiturn: bool = True, - temperature=0.7, - max_tokens=1024, - ) -> IConversation: + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> Conversation: """ Predicts the response based on the given conversation and optional toolkit. @@ -248,12 +256,12 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], multiturn: bool = True, - temperature=0.7, - max_tokens=1024, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> IConversation: """ Asynchronous version of the `predict` method to handle concurrent processing of requests. @@ -340,11 +348,11 @@ async def apredict( def stream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Iterator[str]: """ Streams the response for a conversation in real-time, yielding text as it is received. @@ -446,11 +454,11 @@ def stream( async def astream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Asynchronously streams the response for a conversation, yielding text in real-time. @@ -545,11 +553,11 @@ async def astream( def batch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversations: List[Conversation], + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, ) -> List[IConversation]: """ Processes a batch of conversations in a synchronous manner. @@ -578,11 +586,11 @@ def batch( async def abatch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversations: List[Conversation], + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, max_concurrent=5, ) -> List[IConversation]: """ diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/CohereToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/CohereToolModel.py index 3f73175b3..dd72e81f8 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/CohereToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/CohereToolModel.py @@ -7,16 +7,21 @@ import httpx from pydantic import PrivateAttr from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_base.DynamicBase import SubclassUnion from swarmauri_base.messages.MessageBase import MessageBase from swarmauri_base.schema_converters.SchemaConverterBase import SchemaConverterBase from swarmauri_base.tool_llms.ToolLLMBase import ToolLLMBase +from swarmauri_base.tools.ToolBase import ToolBase +from swarmauri_core.conversations.IConversation import IConversation +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage, UsageData from swarmauri_standard.messages.FunctionMessage import FunctionMessage from swarmauri_standard.messages.HumanMessage import HumanMessage, contentItem from swarmauri_standard.schema_converters.CohereSchemaConverter import ( CohereSchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.duration_manager import DurationManager from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -24,7 +29,8 @@ @ComponentBase.register_type(ToolLLMBase, "CohereToolModel") class CohereToolModel(ToolLLMBase): """ - A language model implementation for interacting with Cohere's API, specifically designed for tool-augmented conversations. + A language model implementation for interacting with Cohere's API, specifically designed for\ + tool-augmented conversations. This class provides both synchronous and asynchronous methods for generating responses, handling tool calls, and managing conversations with the Cohere API. It supports streaming @@ -47,12 +53,12 @@ class CohereToolModel(ToolLLMBase): name: str = "" type: Literal["CohereToolModel"] = "CohereToolModel" - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the CohereToolModel with the provided configuration. Args: - **data: Keyword arguments for configuring the model, including api_key + **data (Dict[str, Any]): Keyword arguments for configuring the model, including api_key """ super().__init__(**data) self._headers = { @@ -78,7 +84,9 @@ def get_schema_converter(self) -> Type[SchemaConverterBase]: """ return CohereSchemaConverter - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools( + self, tools: Dict[str, SubclassUnion[ToolBase]] + ) -> List[Dict[str, Any]]: """ Convert tool definitions to Cohere's expected schema format. @@ -165,9 +173,9 @@ def _prepare_usage_data( Prepare usage statistics from API response and timing data. Args: - usage_data: Dictionary containing token usage information from the API - prompt_time: Time taken to send the prompt - completion_time: Time taken to receive the completion + usage_data (Dict[str, Any]): Dictionary containing token usage information from the API + prompt_time (float): Time taken to send the prompt + completion_time (float): Time taken to receive the completion Returns: UsageData: Object containing formatted usage statistics @@ -188,15 +196,17 @@ def _prepare_usage_data( ) return usage - def _ensure_conversation_has_message(self, conversation): + def _ensure_conversation_has_message( + self, conversation: Conversation + ) -> Conversation: """ Ensure that a conversation has at least one message by adding a default message if empty. Args: - conversation: The conversation to check + conversation (Conversation): The conversation to check Returns: - The conversation, potentially with an added default message + Conversation: The conversation, potentially with an added default message """ if not conversation.history: conversation.add_message( @@ -204,16 +214,18 @@ def _ensure_conversation_has_message(self, conversation): ) return conversation - def _process_tool_calls(self, response_data, toolkit): + def _process_tool_calls( + self, response_data: Dict[str, Any], toolkit: Toolkit + ) -> tuple[List[Dict[str, Any]], List[FunctionMessage]]: """ Process tool calls from the model's response and execute them using the provided toolkit. Args: - response_data: The response data containing tool calls - toolkit: The toolkit containing the tools to execute + response_data (Dict[str, Any]): The response data containing tool calls + toolkit (Toolkit): The toolkit containing the tools to execute Returns: - List[Dict[str, Any]]: Results of the tool executions + tuple[List[Dict[str, Any]], List[FunctionMessage]]: Results of the tool executions and tool messages """ tool_results = [] tool_calls = response_data.get("tool_calls", []) @@ -284,23 +296,24 @@ def _prepare_chat_payload( @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation, - toolkit=None, - temperature=0.3, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, multiturn: bool = True, - ): + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> IConversation: """ Generate a response for a conversation synchronously. Args: - conversation: The conversation to generate a response for - toolkit: Optional toolkit containing available tools - temperature (float, optional): Sampling temperature - max_tokens (int, optional): Maximum number of tokens to generate + conversation (Conversation): The conversation to generate a response for + toolkit (Toolkit): Toolkit containing available tools + multiturn (bool, optional): Whether to use multi-turn conversation. Defaults to True + temperature (float, optional): Sampling temperature. Defaults to 0.7 + max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 1024 Returns: - The updated conversation with the model's response + IConversation: The updated conversation with the model's response """ conversation = self._ensure_conversation_has_message(conversation) formatted_messages = self._format_messages(conversation.history) @@ -355,16 +368,20 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) def stream( - self, conversation, toolkit=None, temperature=0.3, max_tokens=1024 + self, + conversation: Conversation, + toolkit: Toolkit, + temperature: float = 0.3, + max_tokens: int = 1024, ) -> Iterator[str]: """ Stream a response for a conversation synchronously. Args: - conversation: The conversation to generate a response for - toolkit: Optional toolkit containing available tools - temperature (float, optional): Sampling temperature - max_tokens (int, optional): Maximum number of tokens to generate + conversation (Conversation): The conversation to generate a response for + toolkit (Toolkit): Toolkit containing available tools + temperature (float, optional): Sampling temperature. Defaults to 0.3 + max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 1024 Returns: Iterator[str]: An iterator yielding response chunks @@ -428,23 +445,24 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation, - toolkit=None, - temperature=0.3, - max_tokens=1024, - multiturn=True, - ): + conversation: Conversation, + toolkit: Toolkit, + multiturn: bool = True, + temperature: float = 0.3, + max_tokens: int = 1024, + ) -> IConversation: """ Generate a response for a conversation asynchronously. Args: - conversation: The conversation to generate a response for - toolkit: Optional toolkit containing available tools - temperature (float, optional): Sampling temperature - max_tokens (int, optional): Maximum number of tokens to generate + conversation (Conversation): The conversation to generate a response for + toolkit (Toolkit): Toolkit containing available tools + multiturn (bool, optional): Whether to use multi-turn conversation. Defaults to True + temperature (float, optional): Sampling temperature. Defaults to 0.3 + max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 1024 Returns: - The updated conversation with the model's response + IConversation: The updated conversation with the model's response """ conversation = self._ensure_conversation_has_message(conversation) formatted_messages = self._format_messages(conversation.history) @@ -498,16 +516,20 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) async def astream( - self, conversation, toolkit=None, temperature=0.3, max_tokens=1024 + self, + conversation: Conversation, + toolkit: Toolkit, + temperature: float = 0.3, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Stream a response for a conversation asynchronously. Args: - conversation: The conversation to generate a response for - toolkit: Optional toolkit containing available tools - temperature (float, optional): Sampling temperature - max_tokens (int, optional): Maximum number of tokens to generate + conversation (Conversation): The conversation to generate a response for + toolkit (Toolkit): Toolkit containing available tools + temperature (float, optional): Sampling temperature. Defaults to 0.3 + max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 1024 Returns: AsyncIterator[str]: An async iterator yielding response chunks @@ -575,8 +597,12 @@ async def astream( conversation.add_message(AgentMessage(content=full_content), usage=usage) def batch( - self, conversations: List, toolkit=None, temperature=0.3, max_tokens=1024 - ) -> List: + self, + conversations: List[Conversation], + toolkit: Toolkit, + temperature: float = 0.3, + max_tokens: int = 1024, + ) -> List[IConversation]: """ Process multiple conversations in batch mode synchronously. @@ -585,15 +611,15 @@ def batch( parameters. Args: - conversations (List): A list of conversation objects to process - toolkit (optional): The toolkit containing available tools for the model + conversations (List[Conversation]): A list of conversation objects to process + toolkit (Toolkit): The toolkit containing available tools for the model temperature (float, optional): The sampling temperature for response generation. Defaults to 0.3 max_tokens (int, optional): The maximum number of tokens to generate for each response. Defaults to 1024 Returns: - List: A list of processed conversations with their respective responses + List[IConversation]: A list of processed conversations with their respective responses """ return [ self.predict( @@ -604,12 +630,12 @@ def batch( async def abatch( self, - conversations: List, - toolkit=None, - temperature=0.3, - max_tokens=1024, - max_concurrent=5, - ) -> List: + conversations: List[Conversation], + toolkit: Toolkit, + temperature: float = 0.7, + max_tokens: int = 1024, + max_concurrent: int = 5, + ) -> List[IConversation]: """ Process multiple conversations in batch mode asynchronously. @@ -618,22 +644,21 @@ async def abatch( overwhelming the API service while still maintaining efficient processing. Args: - conversations (List): A list of conversation objects to process - toolkit (optional): The toolkit containing available tools for the model + conversations (List[Conversation]): A list of conversation objects to process + toolkit (Toolkit): The toolkit containing available tools for the model temperature (float, optional): The sampling temperature for response generation. - Defaults to 0.3 + Defaults to 0.7 max_tokens (int, optional): The maximum number of tokens to generate for each response. Defaults to 1024 max_concurrent (int, optional): The maximum number of conversations to process simultaneously. Defaults to 5 Returns: - List: A list of processed conversations with their respective responses + List[IConversation]: A list of processed conversations with their respective responses Note: The max_concurrent parameter helps control API usage and prevent rate limiting while still allowing for parallel processing of multiple conversations. - """ semaphore = asyncio.Semaphore(max_concurrent) diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/DeepInfraToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/DeepInfraToolModel.py index 0c0d5f36c..712889f15 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/DeepInfraToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/DeepInfraToolModel.py @@ -1,6 +1,16 @@ import asyncio import json -from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Type +from typing import ( + Any, + AsyncIterator, + Dict, + Iterator, + List, + Literal, + Optional, + Type, + Union, +) import httpx from swarmauri_base.ComponentBase import ComponentBase @@ -14,6 +24,7 @@ from swarmauri_standard.schema_converters.OpenAISchemaConverter import ( OpenAISchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -41,7 +52,7 @@ class DeepInfraToolModel(ToolLLMBase): type: Literal["DeepInfraToolModel"] = "DeepInfraToolModel" BASE_URL: str = "https://api.deepinfra.com/v1/openai/chat/completions" - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the DeepInfraToolModel with the provided data. @@ -67,12 +78,12 @@ def get_schema_converter(self) -> Type[SchemaConverterBase]: """ return OpenAISchemaConverter - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools(self, tools: Dict[str, Any]) -> List[Dict[str, Any]]: """ Converts a toolkit's tools to the DeepInfra-compatible schema format. Args: - tools (Dict): A dictionary of tools to be converted. + tools (Dict[str, Any]): A dictionary of tools to be converted. Returns: List[Dict[str, Any]]: A list of tool schemas in OpenAI format. @@ -80,14 +91,12 @@ def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: converter = self.get_schema_converter()() return [converter.convert(tools[tool]) for tool in tools] - def _format_messages( - self, messages: List[Type[MessageBase]] - ) -> List[Dict[str, str]]: + def _format_messages(self, messages: List[MessageBase]) -> List[Dict[str, str]]: """ Formats a list of messages to a schema that matches the DeepInfra API's expectations. Args: - messages (List[Type[MessageBase]]): The conversation history. + messages (List[MessageBase]): The conversation history. Returns: List[Dict[str, str]]: A formatted list of message dictionaries. @@ -99,17 +108,22 @@ def _format_messages( if m.role != "tool" ] - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase]: + def _process_tool_calls( + self, + tool_calls: List[Dict[str, Any]], + toolkit: Toolkit, + messages: List[Dict[str, Any]], + ) -> List[Dict[str, Any]]: """ Processes a list of tool calls and appends the results to the messages list. Args: - tool_calls (list): Tool calls from the LLM response. - toolkit: Toolkit containing tools to be called. - messages (list): Message list to append tool responses to. + tool_calls (List[Dict[str, Any]]): Tool calls from the LLM response. + toolkit (Toolkit): Toolkit containing tools to be called. + messages (List[Dict[str, Any]]): Message list to append tool responses to. Returns: - List[MessageBase]: Updated list of messages with tool responses added. + List[Dict[str, Any]]: Updated list of messages with tool responses added. """ if tool_calls: for tool_call in tool_calls: @@ -132,19 +146,19 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase def predict( self, conversation: IConversation, - toolkit=None, - tool_choice=None, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, multiturn: bool = True, - temperature=0.7, - max_tokens=1024, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> IConversation: """ Makes a synchronous prediction using the DeepInfra model. Parameters: conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Toolkit]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. multiturn (bool): Whether to follow up a tool call with additional LLM call. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -216,19 +230,19 @@ def predict( async def apredict( self, conversation: IConversation, - toolkit=None, - tool_choice=None, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, multiturn: bool = True, - temperature=0.7, - max_tokens=1024, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> IConversation: """ Makes an asynchronous prediction using the DeepInfra model. Parameters: conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Toolkit]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. multiturn (bool): Whether to follow up a tool call with additional LLM call. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -302,18 +316,18 @@ async def apredict( def stream( self, conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Iterator[str]: """ Streams response from DeepInfra model in real-time. Parameters: conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Toolkit]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -391,18 +405,18 @@ def stream( async def astream( self, conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Asynchronously streams response from DeepInfra model. Parameters: conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Toolkit]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -481,18 +495,18 @@ async def astream( def batch( self, conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> List[IConversation]: """ Processes a batch of conversations sequentially. Args: conversations (List[IConversation]): List of conversations to process. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Toolkit]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -514,19 +528,19 @@ def batch( async def abatch( self, conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - max_concurrent=5, + toolkit: Optional[Toolkit] = None, + tool_choice: Optional[Union[str, Dict[str, Any]]] = None, + temperature: float = 0.7, + max_tokens: int = 1024, + max_concurrent: int = 5, ) -> List[IConversation]: """ Processes a batch of conversations concurrently with limited concurrency. Args: conversations (List[IConversation]): List of conversations to process. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Optional[Toolkit]): Optional toolkit for tool conversion. + tool_choice (Optional[Union[str, Dict[str, Any]]]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. max_concurrent (int): Maximum number of concurrent requests. @@ -536,7 +550,7 @@ async def abatch( """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv): + async def process_conversation(conv: IConversation) -> IConversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/GeminiToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/GeminiToolModel.py index ac15cd00c..cac6f75eb 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/GeminiToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/GeminiToolModel.py @@ -11,11 +11,13 @@ from swarmauri_base.tool_llms.ToolLLMBase import ToolLLMBase from swarmauri_core.conversations.IConversation import IConversation +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.messages.FunctionMessage import FunctionMessage from swarmauri_standard.schema_converters.GeminiSchemaConverter import ( GeminiSchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -61,12 +63,13 @@ class GeminiToolModel(ToolLLMBase): ] ) - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any) -> None: """ Initializes the GeminiToolModel instance with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + *args (Any): Variable length argument list. + **kwargs (Any): Arbitrary keyword arguments containing initialization data. """ super().__init__(*args, **kwargs) self.allowed_models = self.allowed_models or self.get_allowed_models() @@ -82,12 +85,14 @@ def get_schema_converter(self) -> Type[SchemaConverterBase]: """ return GeminiSchemaConverter - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools( + self, tools: Dict[str, Any] + ) -> Dict[str, List[Dict[str, Any]]]: """ Converts toolkit tools into a format compatible with the Gemini schema. Args: - tools (dict): A dictionary of tools to convert. + tools (Dict[str, Any]): A dictionary of tools to convert. Returns: Dict[str, List[Dict[str, Any]]]: Dictionary containing converted tool definitions. @@ -130,17 +135,22 @@ def _format_messages( return sanitized_messages - def _process_tool_calls(self, tool_calls, toolkit, messages) -> tuple: + def _process_tool_calls( + self, + tool_calls: List[Dict[str, Any]], + toolkit: Toolkit, + messages: List[Dict[str, Any]], + ) -> tuple[List[Dict[str, Any]], List[FunctionMessage]]: """ Executes tool calls and creates appropriate response messages. Args: - tool_calls (List[Dict]): List of tool calls to process. - toolkit: Toolkit instance for handling tools. - messages (List): List of messages in the conversation. + tool_calls (List[Dict[str, Any]]): List of tool calls to process. + toolkit (Toolkit): Toolkit instance for handling tools. + messages (List[Dict[str, Any]]): List of messages in the conversation. Returns: - tuple: A tuple containing (updated messages, tool messages for the conversation) + tuple[List[Dict[str, Any]], List[FunctionMessage]]: A tuple containing (updated messages, tool messages for the conversation) """ if not toolkit or not tool_calls: return messages, [] @@ -209,9 +219,9 @@ def _get_system_context(self, messages: List[Type[MessageBase]]) -> str: @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, # Not used by Gemini but included for compatibility + conversation: Conversation, + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, multiturn: bool = True, temperature: float = 0.7, max_tokens: int = 1024, @@ -220,9 +230,9 @@ def predict( Generates model responses for a conversation synchronously. Args: - conversation (IConversation): The conversation instance. - toolkit: Optional toolkit for handling tools. - tool_choice: Tool selection strategy (not used in Gemini but included for API compatibility) + conversation (Conversation): The conversation instance. + toolkit (Toolkit, optional): Optional toolkit for handling tools. + tool_choice (Dict[str, Any], optional): Tool selection strategy (not used in Gemini but included for API compatibility) multiturn (bool): Whether to follow up a tool call with another LLM request. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit for generation. @@ -321,26 +331,26 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, # Not used by Gemini but included for compatibility + conversation: Conversation, + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, multiturn: bool = True, temperature: float = 0.7, max_tokens: int = 1024, - ) -> IConversation: + ) -> Conversation: """ Asynchronously generates model responses for a conversation. Args: - conversation (IConversation): The conversation instance. - toolkit: Optional toolkit for handling tools. - tool_choice: Tool selection strategy (not used in Gemini but included for API compatibility) + conversation (Conversation): The conversation instance. + toolkit (Toolkit, optional): Optional toolkit for handling tools. + tool_choice (Dict[str, Any], optional): Tool selection strategy (not used in Gemini but included for API compatibility) multiturn (bool): Whether to follow up a tool call with another LLM request. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit for generation. Returns: - IConversation: Updated conversation with model response. + Conversation: Updated conversation with model response. """ generation_config = { "temperature": temperature, @@ -433,9 +443,9 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) def stream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, temperature: float = 0.7, max_tokens: int = 1024, ) -> Iterator[str]: @@ -443,9 +453,9 @@ def stream( Streams response generation in real-time. Args: - conversation (IConversation): The conversation instance. - toolkit: Optional toolkit for handling tools. - tool_choice: Tool selection strategy (not used in Gemini but included for API compatibility) + conversation (Conversation): The conversation instance. + toolkit (Toolkit, optional): Optional toolkit for handling tools. + tool_choice (Dict[str, Any], optional): Tool selection strategy (not used in Gemini but included for API compatibility) temperature (float): Sampling temperature. max_tokens (int): Maximum token limit for generation. @@ -564,9 +574,9 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def astream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, temperature: float = 0.7, max_tokens: int = 1024, ) -> AsyncIterator[str]: @@ -574,9 +584,9 @@ async def astream( Asynchronously streams response generation in real-time. Args: - conversation (IConversation): The conversation instance. - toolkit: Optional toolkit for handling tools. - tool_choice: Tool selection strategy (not used in Gemini but included for API compatibility) + conversation (Conversation): The conversation instance. + toolkit (Toolkit, optional): Optional toolkit for handling tools. + tool_choice (Dict[str, Any], optional): Tool selection strategy (not used in Gemini but included for API compatibility) temperature (float): Sampling temperature. max_tokens (int): Maximum token limit for generation. @@ -693,24 +703,24 @@ async def astream( def batch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, + conversations: List[Conversation], + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, temperature: float = 0.7, max_tokens: int = 1024, - ) -> List[IConversation]: + ) -> List[Conversation]: """ Processes multiple conversations synchronously. Args: - conversations (List[IConversation]): List of conversation instances. - toolkit: Optional toolkit for handling tools. - tool_choice: Tool selection strategy (not used in Gemini but included for API compatibility) + conversations (List[Conversation]): List of conversation instances. + toolkit (Toolkit, optional): Optional toolkit for handling tools. + tool_choice (Dict[str, Any], optional): Tool selection strategy (not used in Gemini but included for API compatibility) temperature (float): Sampling temperature. max_tokens (int): Maximum token limit for generation. Returns: - List[IConversation]: List of updated conversations with model responses. + List[Conversation]: List of updated conversations with model responses. """ results = [] for conv in conversations: @@ -726,30 +736,30 @@ def batch( async def abatch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, # Not used by Gemini but included for compatibility + conversations: List[Conversation], + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, temperature: float = 0.7, max_tokens: int = 1024, max_concurrent: int = 5, - ) -> List[IConversation]: + ) -> List[Conversation]: """ Asynchronously processes multiple conversations with concurrency control. Args: - conversations (List[IConversation]): List of conversation instances. - toolkit: Optional toolkit for handling tools. - tool_choice: Tool selection strategy (not used in Gemini but included for API compatibility) + conversations (List[Conversation]): List of conversation instances. + toolkit (Toolkit, optional): Optional toolkit for handling tools. + tool_choice (Dict[str, Any], optional): Tool selection strategy (not used in Gemini but included for API compatibility) temperature (float): Sampling temperature. max_tokens (int): Maximum token limit for generation. max_concurrent (int): Maximum number of concurrent asynchronous tasks. Returns: - List[IConversation]: List of updated conversations with model responses. + List[Conversation]: List of updated conversations with model responses. """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv) -> IConversation: + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/GroqToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/GroqToolModel.py index f19d016f8..b83efe4a9 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/GroqToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/GroqToolModel.py @@ -8,13 +8,16 @@ from swarmauri_base.messages.MessageBase import MessageBase from swarmauri_base.schema_converters.SchemaConverterBase import SchemaConverterBase from swarmauri_base.tool_llms.ToolLLMBase import ToolLLMBase +from swarmauri_base.tools.ToolBase import ToolBase from swarmauri_core.conversations.IConversation import IConversation +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.messages.FunctionMessage import FunctionMessage from swarmauri_standard.schema_converters.GroqSchemaConverter import ( GroqSchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -43,7 +46,7 @@ class GroqToolModel(ToolLLMBase): _async_client: httpx.AsyncClient = PrivateAttr(default=None) BASE_URL: str = "https://api.groq.com/openai/v1/chat/completions" - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]) -> None: """ Initialize the GroqToolModel class with the provided data. @@ -76,12 +79,12 @@ def get_schema_converter(self) -> Type[SchemaConverterBase]: """ return GroqSchemaConverter - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools(self, tools: Dict[str, ToolBase]) -> List[Dict[str, Any]]: """ Converts toolkit items to API-compatible schema format. Parameters: - tools: Dictionary of tools to be converted. + tools (Dict[str, ToolBase]): Dictionary of tools to be converted. Returns: List[Dict[str, Any]]: Formatted list of tool dictionaries. @@ -96,7 +99,7 @@ def _format_messages( Formats messages for API compatibility. Parameters: - messages (List[MessageBase]): List of message instances to format. + messages (List[Type[MessageBase]]): List of message instances to format. Returns: List[Dict[str, str]]: List of formatted message dictionaries. @@ -109,19 +112,24 @@ def _format_messages( ] return formatted_messages - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase]: + def _process_tool_calls( + self, + tool_calls: List[Dict[str, Any]], + toolkit: Toolkit, + messages: List[Dict[str, Any]], + ) -> List[Dict[str, Any]]: """ Processes a list of tool calls and appends the results to the messages list. Args: - tool_calls (list): A list of dictionaries representing tool calls. Each dictionary should contain + tool_calls (List[Dict[str, Any]]): A list of dictionaries representing tool calls. Each dictionary should contain a "function" key with a nested dictionary that includes the "name" and "arguments" of the function to be called, and an "id" key for the tool call identifier. - toolkit (object): An object that provides access to tools via the `get_tool_by_name` method. - messages (list): A list of message dictionaries to which the results of the tool calls will be appended. + toolkit (Toolkit): An object that provides access to tools via the `get_tool_by_name` method. + messages (List[Dict[str, Any]]): A list of message dictionaries to which the results of the tool calls will be appended. Returns: - List[MessageBase]: The updated list of messages with the results of the tool calls appended. + List[Dict[str, Any]]: The updated list of messages with the results of the tool calls appended. """ if tool_calls: for tool_call in tool_calls: @@ -144,20 +152,20 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, multiturn: bool = True, - temperature=0.7, - max_tokens=1024, - ) -> IConversation: + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> Conversation: """ Makes a synchronous prediction using the Groq model. Parameters: - conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + conversation (Conversation): Conversation instance with message history. + toolkit (Toolkit): Optional toolkit for tool conversion. + tool_choice (Dict[str, Any]): Tool selection strategy. multiturn (bool): Whether to follow up a tool call with another LLM request. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -224,20 +232,20 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, multiturn: bool = True, - temperature=0.7, - max_tokens=1024, - ) -> IConversation: + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> Conversation: """ Makes an asynchronous prediction using the Groq model. Parameters: - conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + conversation (Conversation): Conversation instance with message history. + toolkit (Toolkit): Optional toolkit for tool conversion. + tool_choice (Dict[str, Any]): Tool selection strategy. multiturn (bool): Whether to follow up a tool call with another LLM request. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -310,19 +318,19 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) def stream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Iterator[str]: """ Streams response from Groq model in real-time. Parameters: - conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + conversation (Conversation): Conversation instance with message history. + toolkit (Toolkit): Optional toolkit for tool conversion. + tool_choice (Dict[str, Any]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -407,19 +415,19 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def astream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Asynchronously streams response from Groq model. Parameters: conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Toolkit): Optional toolkit for tool conversion. + tool_choice (Dict[str, Any]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -507,24 +515,24 @@ async def astream( def batch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversations: List[Conversation], + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> List[IConversation]: """ Processes a batch of conversations sequentially. Args: - conversations (List[IConversation]): List of conversations to process. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + conversations (List[Conversation]): List of conversations to process. + toolkit (Toolkit): Optional toolkit for tool conversion. + tool_choice (Dict[str, Any]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. Returns: - List[IConversation]: List of updated conversations with responses. + List[Conversation]: List of updated conversations with responses. """ results = [] for conv in conversations: @@ -540,30 +548,30 @@ def batch( async def abatch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - max_concurrent=5, - ) -> List[IConversation]: + conversations: List[Conversation], + toolkit: Toolkit = None, + tool_choice: Dict[str, Any] = None, + temperature: float = 0.7, + max_tokens: int = 1024, + max_concurrent: int = 5, + ) -> List[Conversation]: """ Processes a batch of conversations concurrently with limited concurrency. Args: - conversations (List[IConversation]): List of conversations to process. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + conversations (List[Conversation]): List of conversations to process. + toolkit (Toolkit): Optional toolkit for tool conversion. + tool_choice (Dict[str, Any]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. max_concurrent (int): Maximum number of concurrent requests. Returns: - List[IConversation]: List of updated conversations with responses. + List[Conversation]: List of updated conversations with responses. """ semaphore = asyncio.Semaphore(max_concurrent) - async def process_conversation(conv): + async def process_conversation(conv: Conversation) -> Conversation: async with semaphore: return await self.apredict( conv, diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/MistralToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/MistralToolModel.py index 9a80e7152..4b1569ddc 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/MistralToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/MistralToolModel.py @@ -6,16 +6,20 @@ import httpx from pydantic import PrivateAttr from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_base.DynamicBase import SubclassUnion from swarmauri_base.messages.MessageBase import MessageBase from swarmauri_base.schema_converters.SchemaConverterBase import SchemaConverterBase from swarmauri_base.tool_llms.ToolLLMBase import ToolLLMBase +from swarmauri_base.tools.ToolBase import ToolBase from swarmauri_core.conversations.IConversation import IConversation +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.messages.FunctionMessage import FunctionMessage from swarmauri_standard.schema_converters.MistralSchemaConverter import ( MistralSchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -43,12 +47,12 @@ class MistralToolModel(ToolLLMBase): _client: httpx.Client = PrivateAttr(default=None) _async_client: httpx.AsyncClient = PrivateAttr(default=None) - def __init__(self, **data) -> None: + def __init__(self, **data: dict[str, Any]) -> None: """ Initializes the MistralToolModel instance, setting up headers for API requests. Parameters: - **data: Arbitrary keyword arguments for initialization. + **data (dict[str, Any]): Arbitrary keyword arguments for initialization. """ super().__init__(**data) self._headers = {"Authorization": f"Bearer {self.api_key.get_secret_value()}"} @@ -73,12 +77,14 @@ def get_schema_converter(self) -> Type[SchemaConverterBase]: """ return MistralSchemaConverter - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools( + self, tools: Dict[str, SubclassUnion[ToolBase]] + ) -> List[Dict[str, Any]]: """ Convert a dictionary of tools to the schema format required by Mistral API. Args: - tools (dict): A dictionary of tool objects. + tools (Dict[str, SubclassUnion[ToolBase]]): A dictionary of tool objects. Returns: List[Dict[str, Any]]: A list of converted tool schemas. @@ -106,13 +112,15 @@ def _format_messages( ] return formatted_messages - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[Dict]: + def _process_tool_calls( + self, tool_calls: List[Any], toolkit: Toolkit, messages: List[Type[MessageBase]] + ) -> List[Dict]: """ Processes a list of tool calls and appends the results to the messages list. Args: tool_calls (list): Tool calls from the LLM response. - toolkit: Toolkit containing tools to be called. + toolkit (Toolkit): Toolkit containing tools to be called. messages (list): Message list to append tool responses to. Returns: @@ -164,21 +172,21 @@ def get_allowed_models(self) -> List[str]: @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], multiturn: bool = True, - temperature=0.7, - max_tokens=1024, + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, - ) -> IConversation: + ) -> Conversation: """ Make a synchronous prediction using the Mistral API. Args: - conversation (IConversation): The conversation object. - toolkit: The toolkit for tool assistance. - tool_choice: The tool choice strategy (default is "auto"). + conversation (Conversation): The conversation object. + toolkit (Toolkit): The toolkit for tool assistance. + tool_choice (dict): The tool choice strategy (default is "auto"). multiturn (bool): Whether to follow up a tool call with another LLM request. temperature (float): The temperature for response variability. max_tokens (int): The maximum number of tokens for the response. @@ -250,21 +258,21 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], multiturn: bool = True, - temperature=0.7, - max_tokens=1024, + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, - ) -> IConversation: + ) -> Conversation: """ Make an asynchronous prediction using the Mistral API. Args: - conversation (IConversation): The conversation object. - toolkit: The toolkit for tool assistance. - tool_choice: The tool choice strategy. + conversation (Conversation): The conversation object. + toolkit (Toolkit): The toolkit for tool assistance. + tool_choice (dict): The tool choice strategy. multiturn (bool): Whether to follow up a tool call with another LLM request. temperature (float): The temperature for response variability. max_tokens (int): The maximum number of tokens for the response. @@ -341,11 +349,11 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) def stream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, ) -> Iterator[str]: """ @@ -355,9 +363,9 @@ def stream( and returns a generator that yields response content as it is received. Args: - conversation (IConversation): The conversation object containing the message history. - toolkit: The toolkit for tool assistance, providing external tools to be invoked. - tool_choice: The tool choice strategy, such as "auto" or "manual". + conversation (Conversation): The conversation object containing the message history. + toolkit (Toolkit): The toolkit for tool assistance, providing external tools to be invoked. + tool_choice (dict): The tool choice strategy, such as "auto" or "manual". temperature (float): The sampling temperature for response variability. max_tokens (int): The maximum number of tokens to generate in the response. safe_prompt (bool): Whether to use a safer prompt, reducing potential harmful content. @@ -448,11 +456,11 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def astream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, ) -> AsyncIterator[str]: """ @@ -462,9 +470,9 @@ async def astream( and returns an asynchronous generator that yields response content as it is received. Args: - conversation (IConversation): The conversation object containing the message history. - toolkit: The toolkit for tool assistance, providing external tools to be invoked. - tool_choice: The tool choice strategy, such as "auto" or "manual". + conversation (Conversation): The conversation object containing the message history. + toolkit (Toolkit): The toolkit for tool assistance, providing external tools to be invoked. + tool_choice (dict): The tool choice strategy, such as "auto" or "manual". temperature (float): The sampling temperature for response variability. max_tokens (int): The maximum number of tokens to generate in the response. safe_prompt (bool): Whether to use a safer prompt, reducing potential harmful content. @@ -557,26 +565,26 @@ async def astream( def batch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversations: List[Conversation], + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, - ) -> List[IConversation]: + ) -> List[Conversation]: """ Synchronously processes multiple conversations and generates responses for each. Args: - conversations (List[IConversation]): List of conversations to process. - toolkit: The toolkit for tool assistance. - tool_choice: The tool choice strategy. + conversations (List[Conversation]): List of conversations to process. + toolkit (Toolkit): The toolkit for tool assistance. + tool_choice (dict): The tool choice strategy. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum tokens for the response. safe_prompt (bool): If True, enables safe prompting. Returns: - List[IConversation]: List of updated conversations with generated responses. + List[Conversation]: List of updated conversations with generated responses. """ results = [] for conv in conversations: @@ -593,28 +601,28 @@ def batch( async def abatch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversations: List[Conversation], + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, safe_prompt: bool = False, max_concurrent: int = 5, - ) -> List[IConversation]: + ) -> List[Conversation]: """ Asynchronously processes multiple conversations with controlled concurrency. Args: - conversations (List[IConversation]): List of conversations to process. - toolkit: The toolkit for tool assistance. - tool_choice: The tool choice strategy. + conversations (List[Conversation]): List of conversations to process. + toolkit (Toolkit): The toolkit for tool assistance. + tool_choice (dict): The tool choice strategy. temperature (float): Sampling temperature for response generation. max_tokens (int): Maximum tokens for the response. safe_prompt (bool): If True, enables safe prompting. max_concurrent (int): Maximum number of concurrent tasks. Returns: - List[IConversation]: List of updated conversations with generated responses. + List[Conversation]: List of updated conversations with generated responses. """ semaphore = asyncio.Semaphore(max_concurrent) diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/OpenAIToolModel.py b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/OpenAIToolModel.py index 9e701f259..31d44c4b0 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/OpenAIToolModel.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/OpenAIToolModel.py @@ -6,16 +6,20 @@ import httpx from pydantic import PrivateAttr from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_base.DynamicBase import SubclassUnion from swarmauri_base.messages.MessageBase import MessageBase from swarmauri_base.schema_converters.SchemaConverterBase import SchemaConverterBase from swarmauri_base.tool_llms.ToolLLMBase import ToolLLMBase +from swarmauri_base.tools.ToolBase import ToolBase from swarmauri_core.conversations.IConversation import IConversation +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.messages.FunctionMessage import FunctionMessage from swarmauri_standard.schema_converters.OpenAISchemaConverter import ( OpenAISchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit from swarmauri_standard.utils.retry_decorator import retry_on_status_codes @@ -42,12 +46,12 @@ class OpenAIToolModel(ToolLLMBase): BASE_URL: str = "https://api.openai.com/v1/chat/completions" _headers: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **data): + def __init__(self, **data: dict[str, Any]): """ Initialize the OpenAIToolModel class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data dict[str, Any]: Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._headers = { @@ -67,12 +71,14 @@ def get_schema_converter(self) -> Type[SchemaConverterBase]: """ return OpenAISchemaConverter - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools( + self, tools: Dict[str, SubclassUnion[ToolBase]] + ) -> List[Dict[str, Any]]: """ Convert a dictionary of tools to the schema format required by OpenAI API. Args: - tools (dict): A dictionary of tool objects. + tools (Dict[str, SubclassUnion[ToolBase]]): A dictionary of tool objects. Returns: List[Dict[str, Any]]: A list of converted tool schemas. @@ -99,7 +105,9 @@ def _format_messages( if message.role != "tool" ] - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[Dict]: + def _process_tool_calls( + self, tool_calls: List[Any], toolkit: Toolkit, messages: List[Type[MessageBase]] + ) -> List[Dict]: """ Processes a list of tool calls and appends the results to the messages list. @@ -107,7 +115,7 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[Dict]: tool_calls (list): A list of dictionaries representing tool calls. Each dictionary should contain a "function" key with a nested dictionary that includes the "name" and "arguments" of the function to be called, and an "id" key for the tool call identifier. - toolkit (object): An object that provides access to tools via the `get_tool_by_name` method. + toolkit (Toolkit): Toolkit that provides access to tools via the `get_tool_by_name` method. messages (list): A list of message dictionaries to which the results of the tool calls will be appended. Returns: @@ -134,13 +142,13 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[Dict]: @retry_on_status_codes((429, 529), max_retries=1) def predict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], multiturn: bool = True, - temperature=0.7, - max_tokens=1024, - ) -> IConversation: + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> Conversation: """ Makes a synchronous prediction using the OpenAI model. @@ -215,20 +223,20 @@ def predict( @retry_on_status_codes((429, 529), max_retries=1) async def apredict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], multiturn: bool = True, - temperature=0.7, - max_tokens=1024, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> IConversation: """ Makes an asynchronous prediction using the OpenAI model. Parameters: conversation (IConversation): Conversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + toolkit (Toolkit): Optional toolkit for tool conversion. + tool_choice (dict[str, Any]): Tool selection strategy. multiturn (bool): Whether to follow up a tool call with another LLM request. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -296,11 +304,11 @@ async def apredict( @retry_on_status_codes((429, 529), max_retries=1) def stream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Iterator[str]: """ Streams response from OpenAI model in real-time. @@ -393,11 +401,11 @@ def stream( @retry_on_status_codes((429, 529), max_retries=1) async def astream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Asynchronously streams response from OpenAI model. @@ -490,11 +498,11 @@ async def astream( def batch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversations: List[Conversation], + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, ) -> List[IConversation]: """ Synchronously processes multiple conversations and generates responses for each. @@ -523,12 +531,12 @@ def batch( async def abatch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - max_concurrent=5, + conversations: List[Conversation], + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, + max_concurrent: int = 5, ) -> List[IConversation]: """ Asynchronously processes multiple conversations with controlled concurrency. diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/ToolLLM.py b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/ToolLLM.py index ee6d31194..000b49dea 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/ToolLLM.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tool_llms/ToolLLM.py @@ -4,21 +4,25 @@ import httpx from swarmauri_base.ComponentBase import ComponentBase +from swarmauri_base.DynamicBase import SubclassUnion from swarmauri_base.messages.MessageBase import MessageBase from swarmauri_base.schema_converters.SchemaConverterBase import SchemaConverterBase from swarmauri_base.tool_llms.ToolLLMBase import ToolLLMBase +from swarmauri_base.tools.ToolBase import ToolBase from swarmauri_core.conversations.IConversation import IConversation +from swarmauri_standard.conversations.Conversation import Conversation from swarmauri_standard.messages.AgentMessage import AgentMessage from swarmauri_standard.messages.FunctionMessage import FunctionMessage from swarmauri_standard.schema_converters.OpenAISchemaConverter import ( OpenAISchemaConverter, ) +from swarmauri_standard.toolkits.Toolkit import Toolkit @ComponentBase.register_type() class ToolLLM(ToolLLMBase): - def __init__(self, **data): + def __init__(self, **data: dict[str, Any]) -> None: """ Initialize the OpenAIToolModel class with the provided data. @@ -35,7 +39,9 @@ def __init__(self, **data): def get_schema_converter(self) -> Type["SchemaConverterBase"]: return OpenAISchemaConverter() - def _schema_convert_tools(self, tools) -> List[Dict[str, Any]]: + def _schema_convert_tools( + self, tools: Dict[str, SubclassUnion[ToolBase]] + ) -> List[Dict[str, Any]]: converter = self.get_schema_converter() return [converter.convert(tools[tool]) for tool in tools] @@ -49,7 +55,9 @@ def _format_messages( if m.role != "tool" ] - def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase]: + def _process_tool_calls( + tool_calls: List[Any], toolkit: Toolkit, messages: List[Type[MessageBase]] + ) -> List[Type[MessageBase]]: """ Processes a list of tool calls and appends the results to the messages list. @@ -57,7 +65,7 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase tool_calls (list): A list of dictionaries representing tool calls. Each dictionary should contain a "function" key with a nested dictionary that includes the "name" and "arguments" of the function to be called, and an "id" key for the tool call identifier. - toolkit (object): An object that provides access to tools via the `get_tool_by_name` method. + toolkit (Toolkit): An object that provides access to tools via the `get_tool_by_name` method. messages (list): A list of message dictionaries to which the results of the tool calls will be appended. Returns: @@ -83,20 +91,20 @@ def _process_tool_calls(self, tool_calls, toolkit, messages) -> List[MessageBase def predict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], multiturn: bool = True, - temperature=0.7, - max_tokens=1024, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> IConversation: """ Makes a synchronous prediction using the Groq model. Parameters: - conversation (IConversation): IConversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + conversation (Conversation): Conversation instance with message history. + toolkit (Tookit): Optional toolkit for tool conversion. + tool_choice (dict[str, Any]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -154,25 +162,25 @@ def predict( async def apredict( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], multiturn: bool = True, - temperature=0.7, - max_tokens=1024, + temperature: float = 0.7, + max_tokens: int = 1024, ) -> IConversation: """ Makes an asynchronous prediction using the OpenAI model. Parameters: - conversation (IConversation): IConversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + conversation (Conversation): Conversation instance with message history. + toolkit (Tookit): Optional toolkit for tool conversion. + tool_choice (dict[str, Any]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. Returns: - IConversation: Updated conversation with agent responses and tool calls. + Conversation: Updated conversation with agent responses and tool calls. """ formatted_messages = self._format_messages(conversation.history) payload = { @@ -227,18 +235,18 @@ async def apredict( def stream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, ) -> Iterator[str]: """ Streams response from OpenAI model in real-time. Parameters: - conversation (IConversation): IConversation instance with message history. - toolkit: Optional toolkit for tool conversion. + conversation (Conversation): Conversation instance with message history. + toolkit (Tookit): Optional toolkit for tool conversion. tool_choice: Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -296,19 +304,19 @@ def stream( async def astream( self, - conversation: IConversation, - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, + conversation: Conversation, + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, ) -> AsyncIterator[str]: """ Asynchronously streams response from Groq model. Parameters: - conversation (IConversation): IConversation instance with message history. - toolkit: Optional toolkit for tool conversion. - tool_choice: Tool selection strategy. + conversation (Conversation): Conversation instance with message history. + toolkit (Tookit): Optional toolkit for tool conversion. + tool_choice (dict[str, Any]): Tool selection strategy. temperature (float): Sampling temperature. max_tokens (int): Maximum token limit. @@ -366,22 +374,21 @@ async def astream( def batch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - ) -> List[IConversation]: + conversations: List[Conversation], + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, + ) -> List[Conversation]: """ Processes a batch of conversations and generates responses for each sequentially. Args: - conversations (List[IConversation]): List of conversations to process. + conversations (List[Conversation]): List of conversations to process. temperature (float): Sampling temperature for response diversity. + tool_choice dict[str, Any]): Tool selection strategy. + toolkit (Tookit): Optional toolkit for tool conversion. max_tokens (int): Maximum tokens for each response. - top_p (float): Cumulative probability for nucleus sampling. - enable_json (bool): Whether to format the response as JSON. - stop (Optional[List[str]]): List of stop sequences for response termination. Returns: List[IConversation]: List of updated conversations with model responses. @@ -399,27 +406,26 @@ def batch( async def abatch( self, - conversations: List[IConversation], - toolkit=None, - tool_choice=None, - temperature=0.7, - max_tokens=1024, - max_concurrent=5, - ) -> List[IConversation]: + conversations: List[Conversation], + toolkit: Toolkit, + tool_choice: dict[str, Any], + temperature: float = 0.7, + max_tokens: int = 1024, + max_concurrent: int = 5, + ) -> List[Conversation]: """ Async method for processing a batch of conversations concurrently. Args: - conversations (List[IConversation]): List of conversations to process. + conversations (List[Conversation]): List of conversations to process. temperature (float): Sampling temperature for response diversity. + tool_choice (dict[str, Any]): Tool selection strategy. + toolkit (Tookit): Optional toolkit for tool conversion.s max_tokens (int): Maximum tokens for each response. - top_p (float): Cumulative probability for nucleus sampling. - enable_json (bool): Whether to format the response as JSON. - stop (Optional[List[str]]): List of stop sequences for response termination. max_concurrent (int): Maximum number of concurrent requests. Returns: - List[IConversation]: List of updated conversations with model responses. + List[Conversation]: List of updated conversations with model responses. """ semaphore = asyncio.Semaphore(max_concurrent) diff --git a/pkgs/swarmauri_standard/swarmauri_standard/tts/OpenaiTTS.py b/pkgs/swarmauri_standard/swarmauri_standard/tts/OpenaiTTS.py index ee569e2ce..ac9cf1307 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/tts/OpenaiTTS.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/tts/OpenaiTTS.py @@ -1,7 +1,7 @@ import asyncio import io import os -from typing import AsyncIterator, Dict, Iterator, List, Literal +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal import httpx from pydantic import PrivateAttr, SecretStr, model_validator @@ -39,12 +39,12 @@ class OpenaiTTS(TTSBase): _BASE_URL: str = PrivateAttr(default="https://api.openai.com/v1/audio/speech") _headers: Dict[str, str] = PrivateAttr(default=None) - def __init__(self, **data): + def __init__(self, **data: Dict[str, Any]): """ Initialize the OpenaiTTS class with the provided data. Args: - **data: Arbitrary keyword arguments containing initialization data. + **data (Dict[str, Any]): Arbitrary keyword arguments containing initialization data. """ super().__init__(**data) self._headers = { diff --git a/pkgs/swarmauri_standard/swarmauri_standard/vlms/FalVLM.py b/pkgs/swarmauri_standard/swarmauri_standard/vlms/FalVLM.py index a15b8449e..78c21ca59 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/vlms/FalVLM.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/vlms/FalVLM.py @@ -1,6 +1,6 @@ import asyncio import time -from typing import Dict, List, Literal +from typing import Any, Dict, List, Literal import httpx from pydantic import Field, PrivateAttr, SecretStr @@ -42,7 +42,7 @@ class FalVLM(VLMBase): max_retries: int = Field(default=60) retry_delay: float = Field(default=1.0) - def __init__(self, **data): + def __init__(self, **data: dict[str, Any]) -> None: """ Initialize the FalOCR with API key, HTTP clients, and model name validation. @@ -59,14 +59,14 @@ def __init__(self, **data): self.name = self.allowed_models[0] @retry_on_status_codes((429, 529), max_retries=1) - def _send_request(self, image_url: str, prompt: str, **kwargs) -> Dict: + def _send_request(self, image_url: str, prompt: str, **kwargs: dict[str, Any]) -> Dict: """ Send a synchronous request to the vision model API for image processing. Args: image_url (str): The URL of the image to process. prompt (str): The question or instruction to apply to the image. - **kwargs: Additional parameters for the API request. + **kwargs (dict[str, Any]): Additional parameters for the API request. Returns: Dict: The result of the image processing request. @@ -84,14 +84,14 @@ def _send_request(self, image_url: str, prompt: str, **kwargs) -> Dict: return response_data # For immediate responses @retry_on_status_codes((429, 529), max_retries=1) - async def _async_send_request(self, image_url: str, prompt: str, **kwargs) -> Dict: + async def _async_send_request(self, image_url: str, prompt: str, **kwargs: dict[str, Any]) -> Dict: """ Send an asynchronous request to the vision model API for image processing. Args: image_url (str): The URL of the image to process. prompt (str): The question or instruction to apply to the image. - **kwargs: Additional parameters for the API request. + **kwargs: dict[str, Any]: Additional parameters for the API request. Returns: Dict: The result of the image processing request. @@ -177,14 +177,14 @@ async def _async_wait_for_completion(self, request_id: str) -> Dict: f"Request {request_id} did not complete within the timeout period" ) - def predict_vision(self, image_url: str, prompt: str, **kwargs) -> str: + def predict_vision(self, image_url: str, prompt: str, **kwargs: dict[str, Any]) -> str: """ Process an image and answer a question based on the prompt. Args: image_url (str): The URL of the image to process. prompt (str): The question or instruction to apply to the image. - **kwargs: Additional parameters for the API request. + **kwargs (dict[str, Any]): Additional parameters for the API request. Returns: str: The answer or result of the image processing. @@ -192,14 +192,14 @@ def predict_vision(self, image_url: str, prompt: str, **kwargs) -> str: response_data = self._send_request(image_url, prompt, **kwargs) return response_data.get("output", "") - async def apredict_vision(self, image_url: str, prompt: str, **kwargs) -> str: + async def apredict_vision(self, image_url: str, prompt: str, **kwargs: dict[str, Any]) -> str: """ Asynchronously process an image and answer a question based on the prompt. Args: image_url (str): The URL of the image to process. prompt (str): The question or instruction to apply to the image. - **kwargs: Additional parameters for the API request. + **kwargs (dict[str, Any]): Additional parameters for the API request. Returns: str: The answer or result of the image processing. @@ -207,14 +207,14 @@ async def apredict_vision(self, image_url: str, prompt: str, **kwargs) -> str: response_data = await self._async_send_request(image_url, prompt, **kwargs) return response_data.get("output", "") - def batch(self, image_urls: List[str], prompts: List[str], **kwargs) -> List[str]: + def batch(self, image_urls: List[str], prompts: List[str], **kwargs: dict[str, Any]) -> List[str]: """ Process a batch of images and answer questions for each image synchronously. Args: image_urls (List[str]): A list of image URLs to process. prompts (List[str]): A list of prompts corresponding to each image. - **kwargs: Additional parameters for the API requests. + **kwargs (dict[str, Any]): Additional parameters for the API requests. Returns: List[str]: A list of answers or results for each image. @@ -225,7 +225,7 @@ def batch(self, image_urls: List[str], prompts: List[str], **kwargs) -> List[str ] async def abatch( - self, image_urls: List[str], prompts: List[str], **kwargs + self, image_urls: List[str], prompts: List[str], **kwargs: dict[str, Any] ) -> List[str]: """ Asynchronously process a batch of images and answer questions for each image. @@ -233,7 +233,7 @@ async def abatch( Args: image_urls (List[str]): A list of image URLs to process. prompts (List[str]): A list of prompts corresponding to each image. - **kwargs: Additional parameters for the API requests. + **kwargs: (dict[str, Any]): Additional parameters for the API requests. Returns: List[str]: A list of answers or results for each image. diff --git a/pkgs/swarmauri_standard/swarmauri_standard/vlms/GroqVLM.py b/pkgs/swarmauri_standard/swarmauri_standard/vlms/GroqVLM.py index 6d460aeb8..afe4e8b41 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/vlms/GroqVLM.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/vlms/GroqVLM.py @@ -41,7 +41,7 @@ class GroqVLM(VLMBase): default="https://api.groq.com/openai/v1/chat/completions" ) - def __init__(self, **data): + def __init__(self, **data: dict[str, Any]): """ Initialize the GroqAIAudio class with the provided data. diff --git a/pkgs/swarmauri_standard/swarmauri_standard/vlms/HyperbolicVLM.py b/pkgs/swarmauri_standard/swarmauri_standard/vlms/HyperbolicVLM.py index 7e2d6bd6e..1159d0f23 100644 --- a/pkgs/swarmauri_standard/swarmauri_standard/vlms/HyperbolicVLM.py +++ b/pkgs/swarmauri_standard/swarmauri_standard/vlms/HyperbolicVLM.py @@ -40,7 +40,7 @@ class HyperbolicVLM(VLMBase): _client: httpx.Client = PrivateAttr(default=None) _BASE_URL: str = PrivateAttr(default="https://api.hyperbolic.xyz/v1/") - def __init__(self, **data): + def __init__(self, **data: dict[str, Any]) -> None: """ Initialize the HyperbolicVisionModel class with the provided data.