From 5e09e4711d1632eaab09005a7e9c878b3f4e0475 Mon Sep 17 00:00:00 2001 From: tribhuwan-kumar Date: Sun, 14 Sep 2025 18:00:06 +0530 Subject: [PATCH] fix: complete uncompleted work [refactore code structure of terminator cli & add gemini and openai support to terminato-cli chat] --- terminator-cli/Cargo.toml | 2 + terminator-cli/src/cli.rs | 162 ++ terminator-cli/src/command.rs | 119 ++ terminator-cli/src/main.rs | 1447 +---------------- terminator-cli/src/mcp_client.rs | 1222 -------------- terminator-cli/src/mpc_client/anthropic.rs | 255 +++ terminator-cli/src/mpc_client/gemini.rs | 192 +++ .../src/mpc_client/interactive_chat.rs | 325 ++++ terminator-cli/src/mpc_client/mod.rs | 6 + terminator-cli/src/mpc_client/natural_lang.rs | 13 + terminator-cli/src/mpc_client/openai.rs | 197 +++ terminator-cli/src/mpc_client/utils.rs | 59 + terminator-cli/src/telemetry/mod.rs | 3 + terminator-cli/src/telemetry/process.rs | 185 +++ terminator-cli/src/telemetry/receiver.rs | 37 + terminator-cli/src/telemetry/traces.rs | 174 ++ terminator-cli/src/telemetry_receiver.rs | 383 ----- terminator-cli/src/utils.rs | 127 ++ terminator-cli/src/version_control.rs | 557 +++++++ terminator-cli/src/workflow_exec/cron.rs | 35 + terminator-cli/src/workflow_exec/exec.rs | 491 ++++++ terminator-cli/src/workflow_exec/input.rs | 68 + terminator-cli/src/workflow_exec/mod.rs | 7 + terminator-cli/src/workflow_exec/parsing.rs | 65 + .../result.rs} | 0 .../src/workflow_exec/validation.rs | 103 ++ terminator-cli/src/workflow_exec/workflow.rs | 343 ++++ 27 files changed, 3549 insertions(+), 3028 deletions(-) create mode 100644 terminator-cli/src/cli.rs create mode 100644 terminator-cli/src/command.rs delete mode 100644 terminator-cli/src/mcp_client.rs create mode 100644 terminator-cli/src/mpc_client/anthropic.rs create mode 100644 terminator-cli/src/mpc_client/gemini.rs create mode 100644 terminator-cli/src/mpc_client/interactive_chat.rs create mode 100644 terminator-cli/src/mpc_client/mod.rs create mode 100644 terminator-cli/src/mpc_client/natural_lang.rs create mode 100644 terminator-cli/src/mpc_client/openai.rs create mode 100644 terminator-cli/src/mpc_client/utils.rs create mode 100644 terminator-cli/src/telemetry/mod.rs create mode 100644 terminator-cli/src/telemetry/process.rs create mode 100644 terminator-cli/src/telemetry/receiver.rs create mode 100644 terminator-cli/src/telemetry/traces.rs delete mode 100644 terminator-cli/src/telemetry_receiver.rs create mode 100644 terminator-cli/src/utils.rs create mode 100644 terminator-cli/src/version_control.rs create mode 100644 terminator-cli/src/workflow_exec/cron.rs create mode 100644 terminator-cli/src/workflow_exec/exec.rs create mode 100644 terminator-cli/src/workflow_exec/input.rs create mode 100644 terminator-cli/src/workflow_exec/mod.rs create mode 100644 terminator-cli/src/workflow_exec/parsing.rs rename terminator-cli/src/{workflow_result.rs => workflow_exec/result.rs} (100%) create mode 100644 terminator-cli/src/workflow_exec/validation.rs create mode 100644 terminator-cli/src/workflow_exec/workflow.rs diff --git a/terminator-cli/Cargo.toml b/terminator-cli/Cargo.toml index 07c86b02..c6504394 100644 --- a/terminator-cli/Cargo.toml +++ b/terminator-cli/Cargo.toml @@ -39,6 +39,8 @@ dotenvy = "0.15.7" reqwest = { version = "0.12.22", features = ["json", "rustls-tls"] } anthropic-sdk = "0.1.5" +openai-api-rs = "6.0.8" +gemini-rs = "2.0.0" # Cron scheduling support tokio-cron-scheduler = "0.14" diff --git a/terminator-cli/src/cli.rs b/terminator-cli/src/cli.rs new file mode 100644 index 00000000..d1ca4e93 --- /dev/null +++ b/terminator-cli/src/cli.rs @@ -0,0 +1,162 @@ +use clap::{Parser, Subcommand, ValueEnum}; + +#[derive(Parser)] +#[command(name = "terminator")] +#[command(about = "šŸ¤– Terminator CLI - AI-native GUI automation")] +#[command( + long_about = "Terminator CLI provides tools for managing the Terminator project, including version management, releases, and development workflows." +)] +pub struct Cli { + #[command(subcommand)] + pub command: Commands, +} + +#[derive(ValueEnum, Clone, Copy, Debug, Default)] +#[clap(rename_all = "lower")] +pub enum BumpLevel { + #[default] + Patch, + Minor, + Major, +} + +#[derive(ValueEnum, Clone, Copy, Debug, Default)] +#[clap(rename_all = "lower")] +pub enum AIProvider { + #[default] + Anthropic, + OpenAI, + Gemini, +} + +impl std::fmt::Display for BumpLevel { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", format!("{self:?}").to_lowercase()) + } +} + +#[derive(Parser, Debug)] +pub struct ReleaseArgs { + /// The part of the version to bump: patch, minor, or major. + #[clap(value_enum, default_value_t = BumpLevel::Patch)] + pub level: BumpLevel, +} + +#[derive(Parser, Debug)] +pub struct McpChatArgs { + /// MCP server URL (e.g., http://localhost:3000) + #[clap(long, short = 'u', conflicts_with = "command")] + pub url: Option, + + /// Command to start MCP server via stdio (e.g., "npx -y terminator-mcp-agent") + #[clap(long, short = 'c', conflicts_with = "url")] + pub command: Option, + + /// Specify AIProvider + #[clap(long, short = 'a', default_value_t = AIProvider::Anthropic, value_enum)] + pub aiprovider: AIProvider, +} + +#[derive(Parser, Debug)] +pub struct McpExecArgs { + /// MCP server URL + #[clap(long, short = 'u', conflicts_with = "command")] + pub url: Option, + + /// Command to start MCP server via stdio + #[clap(long, short = 'c', conflicts_with = "url")] + pub command: Option, + + /// Tool name to execute + pub tool: String, + + /// Arguments for the tool (as JSON or simple string) + pub args: Option, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, ValueEnum)] +#[clap(rename_all = "lower")] +pub enum InputType { + Auto, + Gist, + Raw, + File, +} + +#[derive(Parser, Debug, Clone)] +pub struct McpRunArgs { + /// MCP server URL (e.g., http://localhost:3000) + #[clap(long, short = 'u', conflicts_with = "command")] + pub url: Option, + + /// Command to start MCP server via stdio (e.g., "npx -y terminator-mcp-agent") + #[clap(long, short = 'c', conflicts_with = "url")] + pub command: Option, + + /// Input source - can be a GitHub gist URL, raw gist URL, or local file path (JSON/YAML) + pub input: String, + + /// Input type (auto-detected by default) + #[clap(long, value_enum, default_value = "auto")] + pub input_type: InputType, + + /// Dry run - parse and validate the workflow without executing + #[clap(long)] + pub dry_run: bool, + + /// Verbose output + #[clap(long, short)] + pub verbose: bool, + + /// Stop on first error (default: true) + #[clap(long)] + pub no_stop_on_error: bool, + + /// Include detailed results (default: true) + #[clap(long)] + pub no_detailed_results: bool, + + /// Skip retry logic on errors (default: false, will retry on errors) + #[clap(long)] + pub no_retry: bool, +} + +#[derive(Subcommand)] +pub enum McpCommands { + /// Interactive chat with MCP server + Chat(McpChatArgs), + /// Interactive AI-powered chat with MCP server + AiChat(McpChatArgs), + /// Execute a single MCP tool + Exec(McpExecArgs), + /// Execute a workflow sequence from a local file or GitHub gist + Run(McpRunArgs), +} + +#[derive(Subcommand)] +pub enum VersionCommands { + /// Bump patch version (x.y.Z+1) + Patch, + /// Bump minor version (x.Y+1.0) + Minor, + /// Bump major version (X+1.0.0) + Major, + /// Sync all package versions without bumping + Sync, + /// Show current version status + Status, + /// Tag current version and push (triggers CI) + Tag, + /// Full release: bump version + tag + push + Release(ReleaseArgs), +} + +#[derive(Subcommand)] +pub enum Commands { + /// Version management commands + #[command(subcommand)] + Version(VersionCommands), + /// MCP client commands + #[command(subcommand)] + Mcp(McpCommands), +} diff --git a/terminator-cli/src/command.rs b/terminator-cli/src/command.rs new file mode 100644 index 00000000..c0da0dd1 --- /dev/null +++ b/terminator-cli/src/command.rs @@ -0,0 +1,119 @@ +use anyhow::Result; +use std::process::{Command, Stdio}; +use crate::{ + workflow_exec::{exec::execute_command, workflow::{run_workflow, Transport}}, + mpc_client::{natural_lang::aichat, interactive_chat::interactive_chat}, + cli::{McpCommands, VersionCommands}, + version_control::{ensure_project_root, full_release, + sync_all_versions, bump_version, tag_and_push, show_status} +}; + +pub fn run_command(program: &str, args: &[&str]) -> Result<(), Box> { + let output = Command::new(program) + .args(args) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output()?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(format!( + "Command failed: {} {}\nError: {}", + program, + args.join(" "), + stderr + ) + .into()); + } + + Ok(()) +} + +pub fn parse_transport(url: Option, command: Option) -> Transport { + if let Some(url) = url { + Transport::Http(url) + } else if let Some(command) = command { + let parts = parse_command(&command); + Transport::Stdio(parts) + } else { + // Default to spawning local MCP agent via npx for convenience + let default_cmd = "npx -y terminator-mcp-agent@latest"; + println!("ā„¹ļø No --url or --command specified. Falling back to '{default_cmd}'"); + let parts = parse_command(default_cmd); + Transport::Stdio(parts) + } +} + + +pub fn parse_command(command: &str) -> Vec { + // Simple command parsing - splits by spaces but respects quotes + let mut parts = Vec::new(); + let mut current = String::new(); + let mut in_quotes = false; + + for c in command.chars() { + match c { + '"' => in_quotes = !in_quotes, + ' ' if !in_quotes => { + if !current.is_empty() { + parts.push(current.clone()); + current.clear(); + } + } + _ => current.push(c), + } + } + + if !current.is_empty() { + parts.push(current); + } + + parts +} + +pub fn handle_mcp_command(cmd: McpCommands) { + let transport = match cmd { + McpCommands::Chat(ref args) => parse_transport(args.url.clone(), args.command.clone()), + McpCommands::AiChat(ref args) => parse_transport(args.url.clone(), args.command.clone()), + McpCommands::Exec(ref args) => parse_transport(args.url.clone(), args.command.clone()), + McpCommands::Run(ref args) => parse_transport(args.url.clone(), args.command.clone()), + }; + + let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); + + let result = rt.block_on(async { + match cmd { + McpCommands::Chat(_) => { + interactive_chat(transport).await + } + McpCommands::AiChat(args) => { + aichat(transport, args.aiprovider).await + } + McpCommands::Exec(args) => { + execute_command(transport, args.tool, args.args).await + } + McpCommands::Run(args) => { + run_workflow(transport, args).await + } + } + }); + + if let Err(e) = result { + eprintln!("āŒ MCP command error: {e}"); + std::process::exit(1); + } +} + +pub fn handle_version_command(version_cmd: VersionCommands) { + ensure_project_root(); + match version_cmd { + VersionCommands::Patch => bump_version("patch"), + VersionCommands::Minor => bump_version("minor"), + VersionCommands::Major => bump_version("major"), + VersionCommands::Sync => sync_all_versions(), + VersionCommands::Status => show_status(), + VersionCommands::Tag => tag_and_push(), + VersionCommands::Release(args) => { full_release(&args.level.to_string()) } + } +} + diff --git a/terminator-cli/src/main.rs b/terminator-cli/src/main.rs index e3efb548..72536d17 100644 --- a/terminator-cli/src/main.rs +++ b/terminator-cli/src/main.rs @@ -6,1435 +6,36 @@ //! releases, and development workflows. //! //! Usage from workspace root: -//! cargo run --bin terminator -- patch # Bump patch version -//! cargo run --bin terminator -- minor # Bump minor version -//! cargo run --bin terminator -- major # Bump major version -//! cargo run --bin terminator -- sync # Sync all versions -//! cargo run --bin terminator -- status # Show current status -//! cargo run --bin terminator -- tag # Tag and push current version -//! cargo run --bin terminator -- release # Full release: bump patch + tag + push -//! cargo run --bin terminator -- release minor # Full release: bump minor + tag + push - -use anyhow::{Context, Result}; -use clap::{Parser, Subcommand, ValueEnum}; -use serde_json::Value; -use std::env; -use std::fs; -use std::path::Path; -use std::process::{Command, Stdio}; - -mod mcp_client; -mod telemetry_receiver; -mod workflow_result; - -use workflow_result::WorkflowResult; - -#[derive(Parser)] -#[command(name = "terminator")] -#[command(about = "šŸ¤– Terminator CLI - AI-native GUI automation")] -#[command( - long_about = "Terminator CLI provides tools for managing the Terminator project, including version management, releases, and development workflows." -)] -struct Cli { - #[command(subcommand)] - command: Commands, -} - -#[derive(ValueEnum, Clone, Copy, Debug, Default)] -#[clap(rename_all = "lower")] -enum BumpLevel { - #[default] - Patch, - Minor, - Major, -} - -impl std::fmt::Display for BumpLevel { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", format!("{self:?}").to_lowercase()) - } -} - -#[derive(Parser, Debug)] -struct ReleaseArgs { - /// The part of the version to bump: patch, minor, or major. - #[clap(value_enum, default_value_t = BumpLevel::Patch)] - level: BumpLevel, -} - -#[derive(Parser, Debug)] -struct McpChatArgs { - /// MCP server URL (e.g., http://localhost:3000) - #[clap(long, short = 'u', conflicts_with = "command")] - url: Option, - - /// Command to start MCP server via stdio (e.g., "npx -y terminator-mcp-agent") - #[clap(long, short = 'c', conflicts_with = "url")] - command: Option, -} - -#[derive(Parser, Debug)] -struct McpExecArgs { - /// MCP server URL - #[clap(long, short = 'u', conflicts_with = "command")] - url: Option, - - /// Command to start MCP server via stdio - #[clap(long, short = 'c', conflicts_with = "url")] - command: Option, - - /// Tool name to execute - tool: String, - - /// Arguments for the tool (as JSON or simple string) - args: Option, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, ValueEnum)] -#[clap(rename_all = "lower")] -enum InputType { - Auto, - Gist, - Raw, - File, -} - -#[derive(Parser, Debug, Clone)] -struct McpRunArgs { - /// MCP server URL (e.g., http://localhost:3000) - #[clap(long, short = 'u', conflicts_with = "command")] - url: Option, - - /// Command to start MCP server via stdio (e.g., "npx -y terminator-mcp-agent") - #[clap(long, short = 'c', conflicts_with = "url")] - command: Option, - - /// Input source - can be a GitHub gist URL, raw gist URL, or local file path (JSON/YAML) - input: String, - - /// Input type (auto-detected by default) - #[clap(long, value_enum, default_value = "auto")] - input_type: InputType, - - /// Dry run - parse and validate the workflow without executing - #[clap(long)] - dry_run: bool, - - /// Verbose output - #[clap(long, short)] - verbose: bool, - - /// Stop on first error (default: true) - #[clap(long)] - no_stop_on_error: bool, - - /// Include detailed results (default: true) - #[clap(long)] - no_detailed_results: bool, - - /// Skip retry logic on errors (default: false, will retry on errors) - #[clap(long)] - no_retry: bool, -} - -#[derive(Subcommand)] -enum McpCommands { - /// Interactive chat with MCP server - Chat(McpChatArgs), - /// Interactive AI-powered chat with MCP server - AiChat(McpChatArgs), - /// Execute a single MCP tool - Exec(McpExecArgs), - /// Execute a workflow sequence from a local file or GitHub gist - Run(McpRunArgs), -} - -#[derive(Subcommand)] -enum Commands { - /// Bump patch version (x.y.Z+1) - Patch, - /// Bump minor version (x.Y+1.0) - Minor, - /// Bump major version (X+1.0.0) - Major, - /// Sync all package versions without bumping - Sync, - /// Show current version status - Status, - /// Tag current version and push (triggers CI) - Tag, - /// Full release: bump version + tag + push - Release(ReleaseArgs), - /// MCP client commands - #[command(subcommand)] - Mcp(McpCommands), -} +//! cargo run --bin terminator -- version patch # Bump patch version +//! cargo run --bin terminator -- version minor # Bump minor version +//! cargo run --bin terminator -- version major # Bump major version +//! cargo run --bin terminator -- version sync # Sync all versions +//! cargo run --bin terminator -- version status # Show current status +//! cargo run --bin terminator -- version tag # Tag and push current version +//! cargo run --bin terminator -- version release # Full release: bump patch + tag + push +//! cargo run --bin terminator -- version release minor # Full release: bump minor + tag + push + + +use crate::cli::{Cli, Commands}; +use crate::command::{ + handle_mcp_command, handle_version_command +}; + +mod cli; +mod utils; +mod command; +mod telemetry; +mod mpc_client; +mod workflow_exec; +mod version_control; fn main() { + use clap::Parser; let cli = Cli::parse(); // Only ensure we're in the project root for development commands match cli.command { - Commands::Patch => { - ensure_project_root(); - bump_version("patch"); - } - Commands::Minor => { - ensure_project_root(); - bump_version("minor"); - } - Commands::Major => { - ensure_project_root(); - bump_version("major"); - } - Commands::Sync => { - ensure_project_root(); - sync_all_versions(); - } - Commands::Status => { - ensure_project_root(); - show_status(); - } - Commands::Tag => { - ensure_project_root(); - tag_and_push(); - } - Commands::Release(args) => { - ensure_project_root(); - full_release(&args.level.to_string()); - } + Commands::Version(version_cmd) => handle_version_command(version_cmd), Commands::Mcp(mcp_cmd) => handle_mcp_command(mcp_cmd), } } - -fn ensure_project_root() { - // Check if we're already in the project root - if Path::new("Cargo.toml").exists() && Path::new("terminator").exists() { - return; - } - - // If we're in terminator-cli, go up one level - if env::current_dir() - .map(|p| { - p.file_name() - .map(|n| n == "terminator-cli") - .unwrap_or(false) - }) - .unwrap_or(false) - && env::set_current_dir("..").is_err() - { - eprintln!("āŒ Failed to change to project root directory"); - std::process::exit(1); - } - - // Final check - if !Path::new("Cargo.toml").exists() || !Path::new("terminator").exists() { - eprintln!("āŒ Not in Terminator project root. Please run from workspace root."); - eprintln!("šŸ’” Usage: terminator "); - std::process::exit(1); - } -} - -fn get_workspace_version() -> Result> { - let cargo_toml = fs::read_to_string("Cargo.toml")?; - let mut in_workspace_package = false; - - for line in cargo_toml.lines() { - let trimmed_line = line.trim(); - if trimmed_line == "[workspace.package]" { - in_workspace_package = true; - continue; - } - - if in_workspace_package { - if trimmed_line.starts_with('[') { - // We've left the workspace.package section - break; - } - if trimmed_line.starts_with("version") { - if let Some(version_part) = trimmed_line.split('=').nth(1) { - if let Some(version) = version_part.trim().split('"').nth(1) { - return Ok(version.to_string()); - } - } - } - } - } - - Err("Version not found in [workspace.package] in Cargo.toml".into()) -} - -fn sync_cargo_versions() -> Result<(), Box> { - println!("šŸ“¦ Syncing Cargo.toml dependency versions..."); - let workspace_version = get_workspace_version()?; - - let cargo_toml = fs::read_to_string("Cargo.toml")?; - let mut lines: Vec = cargo_toml.lines().map(|s| s.to_string()).collect(); - let mut in_workspace_deps = false; - let mut deps_version_updated = false; - - let tmp = 0..lines.len(); - for i in tmp { - let line = &lines[i]; - let trimmed_line = line.trim(); - - if trimmed_line.starts_with('[') { - in_workspace_deps = trimmed_line == "[workspace.dependencies]"; - continue; - } - - if in_workspace_deps && trimmed_line.starts_with("terminator =") { - let line_clone = line.clone(); - if let Some(start) = line_clone.find("version = \"") { - let version_start = start + "version = \"".len(); - if let Some(end_quote_offset) = line_clone[version_start..].find('"') { - let range = version_start..(version_start + end_quote_offset); - if &line_clone[range.clone()] != workspace_version.as_str() { - lines[i].replace_range(range, &workspace_version); - println!( - "āœ… Updated 'terminator' dependency version to {workspace_version}." - ); - deps_version_updated = true; - } else { - println!("āœ… 'terminator' dependency version is already up to date."); - deps_version_updated = true; // Mark as done - } - } - } - break; // Assume only one terminator dependency to update - } - } - - if deps_version_updated { - fs::write("Cargo.toml", lines.join("\n") + "\n")?; - } else { - eprintln!( - "āš ļø Warning: Could not find 'terminator' in [workspace.dependencies] to sync version." - ); - } - Ok(()) -} - -fn set_workspace_version(new_version: &str) -> Result<(), Box> { - let cargo_toml = fs::read_to_string("Cargo.toml")?; - let mut lines: Vec = cargo_toml.lines().map(|s| s.to_string()).collect(); - let mut in_workspace_package = false; - let mut package_version_updated = false; - - let tmp = 0..lines.len(); - for i in tmp { - let line = &lines[i]; - let trimmed_line = line.trim(); - - if trimmed_line.starts_with('[') { - in_workspace_package = trimmed_line == "[workspace.package]"; - continue; - } - - if in_workspace_package && trimmed_line.starts_with("version =") { - let indentation = line.len() - line.trim_start().len(); - lines[i] = format!("{}version = \"{}\"", " ".repeat(indentation), new_version); - package_version_updated = true; - break; // Exit after finding and updating the version - } - } - - if !package_version_updated { - return Err("version key not found in [workspace.package] in Cargo.toml".into()); - } - - fs::write("Cargo.toml", lines.join("\n") + "\n")?; - Ok(()) -} - -fn parse_version(version: &str) -> Result<(u32, u32, u32), Box> { - let parts: Vec<&str> = version.split('.').collect(); - if parts.len() != 3 { - return Err("Invalid version format".into()); - } - - let major = parts[0].parse::()?; - let minor = parts[1].parse::()?; - let patch = parts[2].parse::()?; - - Ok((major, minor, patch)) -} - -fn bump_version(bump_type: &str) { - println!("šŸ”„ Bumping {bump_type} version..."); - - let current_version = match get_workspace_version() { - Ok(v) => v, - Err(e) => { - eprintln!("āŒ Failed to get current version: {e}"); - return; - } - }; - - let (major, minor, patch) = match parse_version(¤t_version) { - Ok(v) => v, - Err(e) => { - eprintln!("āŒ Failed to parse version {current_version}: {e}"); - return; - } - }; - - let new_version = match bump_type { - "patch" => format!("{}.{}.{}", major, minor, patch + 1), - "minor" => format!("{}.{}.0", major, minor + 1), - "major" => format!("{}.0.0", major + 1), - _ => { - eprintln!("āŒ Invalid bump type: {bump_type}"); - return; - } - }; - - println!("šŸ“ {current_version} → {new_version}"); - - if let Err(e) = set_workspace_version(&new_version) { - eprintln!("āŒ Failed to update workspace version: {e}"); - return; - } - - println!("āœ… Updated workspace version to {new_version}"); - sync_all_versions(); -} - -fn sync_all_versions() { - println!("šŸ”„ Syncing all package versions..."); - - // First, sync versions within Cargo.toml - if let Err(e) = sync_cargo_versions() { - eprintln!("āŒ Failed to sync versions in Cargo.toml: {e}"); - return; - } - - let workspace_version = match get_workspace_version() { - Ok(v) => v, - Err(e) => { - eprintln!("āŒ Failed to get workspace version: {e}"); - return; - } - }; - - println!("šŸ“¦ Workspace version: {workspace_version}"); - - // Sync Node.js bindings - sync_nodejs_bindings(&workspace_version); - - // Sync MCP agent - sync_mcp_agent(&workspace_version); - - // Sync Browser Extension - sync_browser_extension(&workspace_version); - - // Update Cargo.lock - println!("šŸ”’ Updating Cargo.lock..."); - if let Err(e) = run_command("cargo", &["check", "--quiet"]) { - eprintln!("āš ļø Warning: Failed to update Cargo.lock: {e}"); - } - - println!("āœ… All versions synchronized!"); -} - -fn sync_nodejs_bindings(version: &str) { - println!("šŸ“¦ Syncing Node.js bindings to version {version}..."); - - let nodejs_dir = Path::new("bindings/nodejs"); - if !nodejs_dir.exists() { - println!("āš ļø Node.js bindings directory not found, skipping"); - return; - } - - // Update main package.json directly - if let Err(e) = update_package_json("bindings/nodejs/package.json", version) { - eprintln!("āš ļø Warning: Failed to update Node.js package.json directly: {e}"); - } else { - println!("āœ… Updated Node.js package.json to {version}"); - } - - // ALSO update CPU/platform-specific packages under bindings/nodejs/npm - let npm_dir = nodejs_dir.join("npm"); - if npm_dir.exists() { - if let Ok(entries) = fs::read_dir(&npm_dir) { - for entry in entries.flatten() { - if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { - let package_json = entry.path().join("package.json"); - if package_json.exists() { - if let Err(e) = - update_package_json(&package_json.to_string_lossy(), version) - { - eprintln!( - "āš ļø Warning: Failed to update {}: {}", - package_json.display(), - e - ); - } else { - println!("šŸ“¦ Updated {}", entry.file_name().to_string_lossy()); - } - } - } - } - } - } - - // Run sync script if it exists (still useful for additional tasks like N-API metadata) - let original_dir = match env::current_dir() { - Ok(dir) => dir, - Err(e) => { - eprintln!("āŒ Could not get current directory: {e}"); - return; - } - }; - - if env::set_current_dir(nodejs_dir).is_ok() { - println!("šŸ”„ Running npm run sync-version..."); - if run_command("npm", &["run", "sync-version"]).is_ok() { - println!("āœ… Node.js sync script completed"); - } else { - eprintln!("āš ļø Warning: npm run sync-version failed"); - } - // Always change back to the original directory - if let Err(e) = env::set_current_dir(&original_dir) { - eprintln!("āŒ Failed to restore original directory: {e}"); - std::process::exit(1); // Exit if we can't get back, to avoid further errors - } - } else { - eprintln!("āš ļø Warning: Could not switch to Node.js directory"); - } -} - -fn sync_mcp_agent(version: &str) { - println!("šŸ“¦ Syncing MCP agent..."); - - let mcp_dir = Path::new("terminator-mcp-agent"); - if !mcp_dir.exists() { - return; - } - - // Update main package.json - if let Err(e) = update_package_json("terminator-mcp-agent/package.json", version) { - eprintln!("āš ļø Warning: Failed to update MCP agent package.json: {e}"); - return; - } - - // Update platform packages - let npm_dir = mcp_dir.join("npm"); - if npm_dir.exists() { - if let Ok(entries) = fs::read_dir(npm_dir) { - for entry in entries.flatten() { - if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { - let package_json = entry.path().join("package.json"); - if package_json.exists() { - if let Err(e) = - update_package_json(&package_json.to_string_lossy(), version) - { - eprintln!( - "āš ļø Warning: Failed to update {}: {}", - entry.path().display(), - e - ); - } else { - println!("šŸ“¦ Updated {}", entry.file_name().to_string_lossy()); - } - } - } - } - } - } - - // Update package-lock.json - let original_dir = match env::current_dir() { - Ok(dir) => dir, - Err(e) => { - eprintln!("āŒ Could not get current directory: {e}"); - return; - } - }; - - if env::set_current_dir(mcp_dir).is_ok() { - if run_command("npm", &["install", "--package-lock-only", "--silent"]).is_ok() { - println!("āœ… MCP package-lock.json updated."); - } else { - eprintln!("āš ļø Warning: Failed to update MCP agent package-lock.json"); - } - // Always change back to the original directory - if let Err(e) = env::set_current_dir(&original_dir) { - eprintln!("āŒ Failed to restore original directory: {e}"); - std::process::exit(1); - } - } - - println!("āœ… MCP agent synced"); -} - -fn sync_browser_extension(version: &str) { - println!("šŸ“¦ Syncing browser extension to version {version}..."); - - let ext_dir = Path::new("terminator/browser-extension"); - if !ext_dir.exists() { - println!("āš ļø Browser extension directory not found, skipping"); - return; - } - - let manifest_path = ext_dir.join("manifest.json"); - if manifest_path.exists() { - if let Err(e) = update_json_version(&manifest_path.to_string_lossy(), version) { - eprintln!( - "āš ļø Warning: Failed to update {}: {}", - manifest_path.display(), - e - ); - } else { - println!("āœ… Updated manifest.json to {version}"); - } - } - - let build_check_path = ext_dir.join("build_check.json"); - if build_check_path.exists() { - if let Err(e) = update_json_version(&build_check_path.to_string_lossy(), version) { - eprintln!( - "āš ļø Warning: Failed to update {}: {}", - build_check_path.display(), - e - ); - } else { - println!("āœ… Updated build_check.json to {version}"); - } - } -} - -fn update_package_json(path: &str, version: &str) -> Result<(), Box> { - let content = fs::read_to_string(path)?; - let mut pkg: serde_json::Value = serde_json::from_str(&content)?; - - // Update main version - pkg["version"] = serde_json::Value::String(version.to_string()); - - // Update optional dependencies that start with terminator-mcp- or terminator.js- - if let Some(deps) = pkg - .get_mut("optionalDependencies") - .and_then(|v| v.as_object_mut()) - { - for (key, value) in deps.iter_mut() { - if key.starts_with("terminator-mcp-") || key.starts_with("terminator.js-") { - *value = serde_json::Value::String(version.to_string()); - } - } - } - - // Write back with pretty formatting - let formatted = serde_json::to_string_pretty(&pkg)?; - fs::write(path, formatted + "\n")?; - - Ok(()) -} - -fn update_json_version(path: &str, version: &str) -> Result<(), Box> { - let content = fs::read_to_string(path)?; - let mut json_value: serde_json::Value = serde_json::from_str(&content)?; - - json_value["version"] = serde_json::Value::String(version.to_string()); - - let formatted = serde_json::to_string_pretty(&json_value)?; - fs::write(path, formatted + "\n")?; - - Ok(()) -} - -fn show_status() { - println!("šŸ“Š Terminator Project Status"); - println!("============================"); - - let workspace_version = get_workspace_version().unwrap_or_else(|_| "ERROR".to_string()); - println!("šŸ“¦ Workspace version: {workspace_version}"); - - // Show package versions - let nodejs_version = get_package_version("bindings/nodejs/package.json"); - let mcp_version = get_package_version("terminator-mcp-agent/package.json"); - let browser_extension_version = - get_package_version("terminator/browser-extension/manifest.json"); - - println!(); - println!("Package versions:"); - println!(" Node.js bindings: {nodejs_version}"); - println!(" MCP agent: {mcp_version}"); - println!(" Browser extension:{browser_extension_version}"); - - // Git status - println!(); - println!("Git status:"); - if let Ok(output) = Command::new("git").args(["status", "--porcelain"]).output() { - let status = String::from_utf8_lossy(&output.stdout); - if status.trim().is_empty() { - println!(" āœ… Working directory clean"); - } else { - println!(" āš ļø Uncommitted changes:"); - for line in status.lines().take(5) { - println!(" {line}"); - } - } - } -} - -fn get_package_version(path: &str) -> String { - match fs::read_to_string(path) { - Ok(content) => match serde_json::from_str::(&content) { - Ok(pkg) => pkg - .get("version") - .and_then(|v| v.as_str()) - .map(|s| s.to_string()) - .unwrap_or_else(|| "No version field".to_string()), - Err(_) => "Parse error".to_string(), - }, - Err(_) => "Not found".to_string(), - } -} - -fn tag_and_push() { - let version = match get_workspace_version() { - Ok(v) => v, - Err(e) => { - eprintln!("āŒ Failed to get current version: {e}"); - return; - } - }; - - println!("šŸ·ļø Tagging and pushing version {version}..."); - - // Check for uncommitted changes - if let Ok(output) = Command::new("git").args(["diff", "--name-only"]).output() { - let diff = String::from_utf8_lossy(&output.stdout); - if !diff.trim().is_empty() { - println!("āš ļø Uncommitted changes detected. Committing..."); - if let Err(e) = run_command("git", &["add", "."]) { - eprintln!("āŒ Failed to git add: {e}"); - return; - } - if let Err(e) = run_command( - "git", - &["commit", "-m", &format!("Bump version to {version}")], - ) { - eprintln!("āŒ Failed to git commit: {e}"); - return; - } - } - } - - // Create tag - let tag = format!("v{version}"); - if let Err(e) = run_command( - "git", - &[ - "tag", - "-a", - &tag, - "-m", - &format!("Release version {version}"), - ], - ) { - eprintln!("āŒ Failed to create tag: {e}"); - return; - } - - // Push changes and tag - if let Err(e) = run_command("git", &["push", "origin", "main"]) { - eprintln!("āŒ Failed to push changes: {e}"); - return; - } - - if let Err(e) = run_command("git", &["push", "origin", &tag]) { - eprintln!("āŒ Failed to push tag: {e}"); - return; - } - - println!("āœ… Successfully released version {version}!"); - println!("šŸ”— Check CI: https://github.com/mediar-ai/terminator/actions"); -} - -fn full_release(bump_type: &str) { - println!("šŸš€ Starting full release process with {bump_type} bump..."); - bump_version(bump_type); - tag_and_push(); -} - -fn run_command(program: &str, args: &[&str]) -> Result<(), Box> { - let output = Command::new(program) - .args(args) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .output()?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - return Err(format!( - "Command failed: {} {}\nError: {}", - program, - args.join(" "), - stderr - ) - .into()); - } - - Ok(()) -} - -fn handle_mcp_command(cmd: McpCommands) { - let transport = match cmd { - McpCommands::Chat(ref args) => parse_transport(args.url.clone(), args.command.clone()), - McpCommands::AiChat(ref args) => parse_transport(args.url.clone(), args.command.clone()), - McpCommands::Exec(ref args) => parse_transport(args.url.clone(), args.command.clone()), - McpCommands::Run(ref args) => parse_transport(args.url.clone(), args.command.clone()), - }; - - let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - - let result = rt.block_on(async { - match cmd { - McpCommands::Chat(_) => mcp_client::interactive_chat(transport).await, - McpCommands::AiChat(_) => mcp_client::natural_language_chat(transport).await, - McpCommands::Exec(args) => { - mcp_client::execute_command(transport, args.tool, args.args).await - } - McpCommands::Run(args) => run_workflow(transport, args).await, - } - }); - - if let Err(e) = result { - eprintln!("āŒ MCP command error: {e}"); - std::process::exit(1); - } -} - -fn parse_transport(url: Option, command: Option) -> mcp_client::Transport { - if let Some(url) = url { - mcp_client::Transport::Http(url) - } else if let Some(command) = command { - let parts = parse_command(&command); - mcp_client::Transport::Stdio(parts) - } else { - // Default to spawning local MCP agent via npx for convenience - let default_cmd = "npx -y terminator-mcp-agent@latest"; - println!("ā„¹ļø No --url or --command specified. Falling back to '{default_cmd}'"); - let parts = parse_command(default_cmd); - mcp_client::Transport::Stdio(parts) - } -} - -fn parse_command(command: &str) -> Vec { - // Simple command parsing - splits by spaces but respects quotes - let mut parts = Vec::new(); - let mut current = String::new(); - let mut in_quotes = false; - - for c in command.chars() { - match c { - '"' => in_quotes = !in_quotes, - ' ' if !in_quotes => { - if !current.is_empty() { - parts.push(current.clone()); - current.clear(); - } - } - _ => current.push(c), - } - } - - if !current.is_empty() { - parts.push(current); - } - - parts -} - -async fn run_workflow(transport: mcp_client::Transport, args: McpRunArgs) -> anyhow::Result<()> { - use tracing::info; - - if args.verbose { - // Keep rmcp quieter even in verbose mode unless user explicitly overrides - std::env::set_var("RUST_LOG", "debug,rmcp=warn"); - } - - // Initialize simple logging (only if not already initialized) - { - use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; - let _ = tracing_subscriber::registry() - .with( - tracing_subscriber::EnvFilter::try_from_default_env() - // Suppress noisy rmcp info logs by default while keeping our own at info - .unwrap_or_else(|_| "info,rmcp=warn".into()), - ) - .with(tracing_subscriber::fmt::layer()) - .try_init(); // Use try_init instead of init to avoid panics on duplicate initialization - } - - info!("Starting workflow execution via terminator CLI"); - info!(input = %args.input, ?args.input_type); - - // Resolve actual input type (auto-detect if needed) - let resolved_type = determine_input_type(&args.input, args.input_type); - - // Fetch workflow content - let content = match resolved_type { - InputType::File => { - info!("Reading local file"); - read_local_file(&args.input).await? - } - InputType::Gist => { - info!("Fetching GitHub gist"); - let raw_url = convert_gist_to_raw_url(&args.input)?; - fetch_remote_content(&raw_url).await? - } - InputType::Raw => { - info!("Fetching raw URL"); - fetch_remote_content(&args.input).await? - } - InputType::Auto => unreachable!(), - }; - - // Parse workflow using the same robust logic as gist_executor - let mut workflow_val = parse_workflow_content(&content) - .with_context(|| format!("Failed to parse workflow from {}", args.input))?; - - // Handle cron scheduling if specified in workflow - if let Some(cron_expr) = extract_cron_from_workflow(&workflow_val) { - info!( - "šŸ• Starting cron scheduler with workflow expression: {}", - cron_expr - ); - return run_workflow_with_cron(transport, args, &cron_expr).await; - } - - // Validate workflow structure early to catch issues - validate_workflow(&workflow_val).with_context(|| "Workflow validation failed")?; - - // Get steps count for logging - let steps_count = workflow_val - .get("steps") - .and_then(|v| v.as_array()) - .map(|arr| arr.len()) - .unwrap_or(0); - - info!( - "Successfully parsed and validated workflow with {} steps", - steps_count - ); - - // Apply overrides - if let Some(obj) = workflow_val.as_object_mut() { - if args.no_stop_on_error { - obj.insert("stop_on_error".into(), serde_json::Value::Bool(false)); - } - if args.no_detailed_results { - obj.insert( - "include_detailed_results".into(), - serde_json::Value::Bool(false), - ); - } - } - - if args.dry_run { - println!("āœ… Workflow validation successful!"); - println!("šŸ“Š Workflow Summary:"); - println!(" • Steps: {steps_count}"); - - if let Some(variables) = workflow_val.get("variables").and_then(|v| v.as_object()) { - println!(" • Variables: {}", variables.len()); - } else { - println!(" • Variables: 0"); - } - - if let Some(selectors) = workflow_val.get("selectors").and_then(|v| v.as_object()) { - println!(" • Selectors: {}", selectors.len()); - } else { - println!(" • Selectors: 0"); - } - - let stop_on_error = workflow_val - .get("stop_on_error") - .and_then(|v| v.as_bool()) - .unwrap_or(true); - println!(" • Stop on error: {stop_on_error}"); - - return Ok(()); - } - - info!("Executing workflow with {steps_count} steps via MCP"); - - let workflow_str = serde_json::to_string(&workflow_val)?; - - let result_json = mcp_client::execute_command_with_progress_and_retry( - transport, - "execute_sequence".to_string(), - Some(workflow_str), - true, // Show progress for workflow steps - args.no_retry, - ) - .await?; - - // Parse and display the workflow result - let workflow_result = WorkflowResult::from_mcp_response(&result_json)?; - - // Display result in user-friendly format - workflow_result.display(); - - // If verbose mode, also show raw JSON - if args.verbose { - println!("šŸ“ Raw MCP Response:"); - println!("{}", serde_json::to_string_pretty(&result_json)?); - } - - // Exit with appropriate code based on success - if !workflow_result.success { - std::process::exit(1); - } - - Ok(()) -} - -/// Extract cron expression from workflow YAML -fn extract_cron_from_workflow(workflow: &Value) -> Option { - // Primary format: cron field at root level (simpler format) - if let Some(cron) = workflow.get("cron") { - if let Some(cron_str) = cron.as_str() { - return Some(cron_str.to_string()); - } - } - - // Alternative: GitHub Actions style: on.schedule.cron - if let Some(on) = workflow.get("on") { - if let Some(schedule) = on.get("schedule") { - // Handle both single cron and array of crons - if let Some(cron_array) = schedule.as_array() { - // If it's an array, take the first cron expression - if let Some(first_schedule) = cron_array.first() { - if let Some(cron) = first_schedule.get("cron") { - if let Some(cron_str) = cron.as_str() { - return Some(cron_str.to_string()); - } - } - } - } else if let Some(cron) = schedule.get("cron") { - // Handle single cron expression - if let Some(cron_str) = cron.as_str() { - return Some(cron_str.to_string()); - } - } - } - } - - None -} - -/// Execute workflow with cron scheduling -async fn run_workflow_with_cron( - transport: mcp_client::Transport, - args: McpRunArgs, - cron_expr: &str, -) -> anyhow::Result<()> { - use tokio_cron_scheduler::{Job, JobScheduler}; - use tracing::error; - - println!("šŸ• Setting up cron scheduler..."); - println!("šŸ“… Cron expression: {cron_expr}"); - println!("šŸ”„ Workflow will run continuously at scheduled intervals"); - println!("šŸ’” Press Ctrl+C to stop the scheduler"); - - // Try to parse the cron expression to validate it (tokio-cron-scheduler will handle this) - // We'll let tokio-cron-scheduler validate it when we create the job - - // For preview, we'll just show a generic message since calculating next times - // with tokio-cron-scheduler is more complex - println!("šŸ“‹ Workflow will run according to cron schedule: {cron_expr}"); - println!("šŸ’” Note: Exact execution times depend on system clock and scheduler timing"); - - // Create scheduler - let mut sched = JobScheduler::new().await?; - - // Clone transport for the job closure - let transport_clone = transport.clone(); - let args_clone = args.clone(); - - // Create the scheduled job - let job = Job::new_async(cron_expr, move |_uuid, _lock| { - let transport = transport_clone.clone(); - let args = args_clone.clone(); - - Box::pin(async move { - let start_time = std::time::Instant::now(); - println!( - "\nšŸš€ Starting scheduled workflow execution at {}", - chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC") - ); - - match run_workflow_once(transport, args).await { - Ok(_) => { - let duration = start_time.elapsed(); - println!( - "āœ… Scheduled workflow completed successfully in {:.2}s", - duration.as_secs_f64() - ); - } - Err(e) => { - let duration = start_time.elapsed(); - println!( - "āŒ Scheduled workflow failed after {:.2}s: {}", - duration.as_secs_f64(), - e - ); - } - } - }) - })?; - - // Add job to scheduler - sched.add(job).await?; - println!("āœ… Cron job scheduled successfully"); - - // Start the scheduler - sched.start().await?; - println!("ā–¶ļø Scheduler started - workflow will run at scheduled intervals"); - - // Set up graceful shutdown - let (shutdown_tx, mut shutdown_rx) = tokio::sync::mpsc::channel(1); - - // Spawn a task to handle Ctrl+C - tokio::spawn(async move { - match tokio::signal::ctrl_c().await { - Ok(()) => { - println!("\nšŸ›‘ Received shutdown signal"); - let _ = shutdown_tx.send(()).await; - } - Err(e) => { - error!("Failed to listen for shutdown signal: {}", e); - } - } - }); - - // Wait for shutdown signal - let _ = shutdown_rx.recv().await; - - println!("šŸ›‘ Shutting down scheduler..."); - sched.shutdown().await?; - println!("āœ… Scheduler stopped successfully"); - - Ok(()) -} - -/// Execute a single workflow run (used by cron scheduler) -async fn run_workflow_once( - transport: mcp_client::Transport, - args: McpRunArgs, -) -> anyhow::Result<()> { - // Resolve actual input type (auto-detect if needed) - let resolved_type = determine_input_type(&args.input, args.input_type); - - // Fetch workflow content - let content = match resolved_type { - InputType::File => read_local_file(&args.input).await?, - InputType::Gist => { - let raw_url = convert_gist_to_raw_url(&args.input)?; - fetch_remote_content(&raw_url).await? - } - InputType::Raw => fetch_remote_content(&args.input).await?, - InputType::Auto => unreachable!(), - }; - - // Parse workflow using the same robust logic as gist_executor - let mut workflow_val = parse_workflow_content(&content) - .with_context(|| format!("Failed to parse workflow from {}", args.input))?; - - // Validate workflow structure early to catch issues - validate_workflow(&workflow_val).with_context(|| "Workflow validation failed")?; - - // Apply overrides - if let Some(obj) = workflow_val.as_object_mut() { - if args.no_stop_on_error { - obj.insert("stop_on_error".into(), serde_json::Value::Bool(false)); - } - if args.no_detailed_results { - obj.insert( - "include_detailed_results".into(), - serde_json::Value::Bool(false), - ); - } - } - - // For cron jobs, use simple execution to avoid connection spam - let workflow_str = serde_json::to_string(&workflow_val)?; - let result_json = mcp_client::execute_command_with_progress_and_retry( - transport, - "execute_sequence".to_string(), - Some(workflow_str), - true, // Show progress for workflow steps - args.no_retry, - ) - .await?; - - // Parse the workflow result - let workflow_result = WorkflowResult::from_mcp_response(&result_json)?; - - // For cron jobs, log success/failure/skipped - use workflow_result::WorkflowState; - match workflow_result.state { - WorkflowState::Success => { - println!(" āœ… {}", workflow_result.message); - if let Some(Value::Array(arr)) = &workflow_result.data { - println!(" šŸ“Š Extracted {} items", arr.len()); - } - } - WorkflowState::Skipped => { - println!(" ā­ļø {}", workflow_result.message); - if let Some(Value::Object(data)) = &workflow_result.data { - if let Some(reason) = data.get("reason").and_then(|r| r.as_str()) { - println!(" šŸ“ Reason: {reason}"); - } - } - } - WorkflowState::Failure => { - println!(" āŒ {}", workflow_result.message); - if let Some(error) = &workflow_result.error { - println!(" āš ļø {error}"); - } - } - } - - Ok(()) -} - -fn determine_input_type(input: &str, specified_type: InputType) -> InputType { - match specified_type { - InputType::Auto => { - if input.starts_with("https://gist.github.com/") { - InputType::Gist - } else if input.starts_with("https://gist.githubusercontent.com/") - || input.starts_with("http://") - || input.starts_with("https://") - { - InputType::Raw - } else { - InputType::File - } - } - other => other, - } -} - -fn convert_gist_to_raw_url(gist_url: &str) -> anyhow::Result { - if !gist_url.starts_with("https://gist.github.com/") { - return Err(anyhow::anyhow!("Invalid GitHub gist URL format")); - } - - let raw_url = gist_url.replace( - "https://gist.github.com/", - "https://gist.githubusercontent.com/", - ); - - Ok(if raw_url.ends_with("/raw") { - raw_url - } else { - format!("{raw_url}/raw") - }) -} - -async fn read_local_file(path: &str) -> anyhow::Result { - use std::path::Path; - use tokio::fs; - - let p = Path::new(path); - if !p.exists() { - return Err(anyhow::anyhow!("File not found: {}", p.display())); - } - if !p.is_file() { - return Err(anyhow::anyhow!("Not a file: {}", p.display())); - } - - fs::read_to_string(p).await.map_err(|e| e.into()) -} - -async fn fetch_remote_content(url: &str) -> anyhow::Result { - let client = reqwest::Client::new(); - let res = client - .get(url) - .header("User-Agent", "terminator-cli-workflow/1.0") - .send() - .await?; - if !res.status().is_success() { - return Err(anyhow::anyhow!( - "HTTP request failed: {} for {}", - res.status(), - url - )); - } - Ok(res.text().await?) -} - -/// Parse workflow content using robust parsing strategies from gist_executor.rs -fn parse_workflow_content(content: &str) -> anyhow::Result { - // Strategy 1: Try direct JSON workflow - if let Ok(val) = serde_json::from_str::(content) { - // Check if it's a valid workflow (has steps field) - if val.get("steps").is_some() { - return Ok(val); - } - - // Check if it's a wrapper object - if let Some(extracted) = extract_workflow_from_wrapper(&val)? { - return Ok(extracted); - } - } - - // Strategy 2: Try direct YAML workflow - if let Ok(val) = serde_yaml::from_str::(content) { - // Check if it's a valid workflow (has steps field) - if val.get("steps").is_some() { - return Ok(val); - } - - // Check if it's a wrapper object - if let Some(extracted) = extract_workflow_from_wrapper(&val)? { - return Ok(extracted); - } - } - - // Strategy 3: Try parsing as JSON wrapper first, then extract - if let Ok(val) = serde_json::from_str::(content) { - if let Some(extracted) = extract_workflow_from_wrapper(&val)? { - return Ok(extracted); - } - } - - // Strategy 4: Try parsing as YAML wrapper first, then extract - if let Ok(val) = serde_yaml::from_str::(content) { - if let Some(extracted) = extract_workflow_from_wrapper(&val)? { - return Ok(extracted); - } - } - - Err(anyhow::anyhow!( - "Unable to parse content as JSON or YAML workflow or wrapper object. Content must either be:\n\ - 1. A workflow with 'steps' field\n\ - 2. A wrapper object with tool_name='execute_sequence' and 'arguments' field\n\ - 3. Valid JSON or YAML format" - )) -} - -/// Extract workflow from wrapper object if it has tool_name: execute_sequence -fn extract_workflow_from_wrapper( - value: &serde_json::Value, -) -> anyhow::Result> { - if let Some(tool_name) = value.get("tool_name") { - if tool_name == "execute_sequence" { - if let Some(arguments) = value.get("arguments") { - return Ok(Some(arguments.clone())); - } else { - return Err(anyhow::anyhow!("Tool call missing 'arguments' field")); - } - } - } - Ok(None) -} - -/// Validate workflow structure to provide early error detection -fn validate_workflow(workflow: &serde_json::Value) -> anyhow::Result<()> { - // Check that it's an object - let obj = workflow - .as_object() - .ok_or_else(|| anyhow::anyhow!("Workflow must be a JSON object"))?; - - // Check that steps exists and is an array - let steps = obj - .get("steps") - .ok_or_else(|| anyhow::anyhow!("Workflow must contain a 'steps' field"))?; - - let steps_array = steps - .as_array() - .ok_or_else(|| anyhow::anyhow!("'steps' field must be an array"))?; - - if steps_array.is_empty() { - return Err(anyhow::anyhow!("Workflow must contain at least one step")); - } - - // Validate each step - for (i, step) in steps_array.iter().enumerate() { - let step_obj = step - .as_object() - .ok_or_else(|| anyhow::anyhow!("Step {} must be an object", i))?; - - let has_tool_name = step_obj.contains_key("tool_name"); - let has_group_name = step_obj.contains_key("group_name"); - - if !has_tool_name && !has_group_name { - return Err(anyhow::anyhow!( - "Step {} must have either 'tool_name' or 'group_name'", - i - )); - } - - if has_tool_name && has_group_name { - return Err(anyhow::anyhow!( - "Step {} cannot have both 'tool_name' and 'group_name'", - i - )); - } - } - - // Validate variables if present - if let Some(variables) = obj.get("variables") { - if let Some(vars_obj) = variables.as_object() { - for (name, def) in vars_obj { - if name.is_empty() { - return Err(anyhow::anyhow!("Variable name cannot be empty")); - } - - if let Some(def_obj) = def.as_object() { - // Ensure label exists and is non-empty - if let Some(label) = def_obj.get("label") { - if let Some(label_str) = label.as_str() { - if label_str.is_empty() { - return Err(anyhow::anyhow!( - "Variable '{}' must have a non-empty label", - name - )); - } - } - } else { - return Err(anyhow::anyhow!( - "Variable '{}' must have a 'label' field", - name - )); - } - - // --------------------- NEW VALIDATION --------------------- - // Enforce `required` property logic - let is_required = def_obj - .get("required") - .and_then(|v| v.as_bool()) - .unwrap_or(true); - - if is_required { - // Check for default value in definition - let has_default = def_obj.contains_key("default"); - - // Check if inputs provide a value for this variable - let input_has_value = obj - .get("inputs") - .and_then(|v| v.as_object()) - .map(|inputs_obj| inputs_obj.contains_key(name)) - .unwrap_or(false); - - if !has_default && !input_has_value { - return Err(anyhow::anyhow!( - "Required variable '{}' is missing and has no default value", - name - )); - } - } - // ---------------------------------------------------------------- - } - } - } - } - - Ok(()) -} diff --git a/terminator-cli/src/mcp_client.rs b/terminator-cli/src/mcp_client.rs deleted file mode 100644 index e0daa646..00000000 --- a/terminator-cli/src/mcp_client.rs +++ /dev/null @@ -1,1222 +0,0 @@ -use anyhow::Result; -use rmcp::{ - model::{CallToolRequestParam, ClientCapabilities, ClientInfo, Implementation}, - object, - transport::{StreamableHttpClientTransport, TokioChildProcess}, - ServiceExt, -}; -use std::io::{self, Write}; -use std::time::Duration; -use tokio::process::Command; -use tokio::time::sleep; -use tracing::info; - -use anthropic_sdk::{Client as AnthropicClient, ToolChoice}; -use serde_json::json; -use std::sync::{Arc, Mutex}; - -#[derive(Clone)] -pub enum Transport { - Http(String), - Stdio(Vec), -} - -/// Check if the path is a Windows batch file -fn is_batch_file(path: &str) -> bool { - path.ends_with(".bat") || path.ends_with(".cmd") -} - -/// Create command with proper handling for batch files on Windows -fn create_command(executable: &str, args: &[String]) -> Command { - let mut cmd = if cfg!(windows) && is_batch_file(executable) { - // For batch files on Windows, use cmd.exe /c - let mut cmd = Command::new("cmd"); - cmd.arg("/c"); - cmd.arg(executable); - cmd - } else { - Command::new(executable) - }; - - if !args.is_empty() { - cmd.args(args); - } - - cmd -} - -/// Find executable with cross-platform path resolution -fn find_executable(name: &str) -> Option { - use std::env; - use std::path::Path; - - // On Windows, try multiple extensions, prioritizing executable types - let candidates = if cfg!(windows) { - vec![ - format!("{}.exe", name), - format!("{}.cmd", name), - format!("{}.bat", name), - name.to_string(), - ] - } else { - vec![name.to_string()] - }; - - // Check each candidate in PATH - if let Ok(path_var) = env::var("PATH") { - let separator = if cfg!(windows) { ";" } else { ":" }; - - for path_dir in path_var.split(separator) { - let path_dir = Path::new(path_dir); - - for candidate in &candidates { - let full_path = path_dir.join(candidate); - if full_path.exists() && full_path.is_file() { - return Some(full_path.to_string_lossy().to_string()); - } - } - } - } - - // Fallback: try the name as-is (might work on some systems) - Some(name.to_string()) -} - -pub async fn interactive_chat(transport: Transport) -> Result<()> { - println!("šŸ¤– Terminator MCP Chat Client"); - println!("============================="); - - match transport { - Transport::Http(url) => { - println!("Connecting to: {url}"); - let transport = StreamableHttpClientTransport::from_uri(url.as_str()); - let client_info = ClientInfo { - protocol_version: Default::default(), - capabilities: ClientCapabilities::default(), - client_info: Implementation { - name: "terminator-cli".to_string(), - version: env!("CARGO_PKG_VERSION").to_string(), - }, - }; - let service = client_info.serve(transport).await?; - - // Get server info - let server_info = service.peer_info(); - if let Some(info) = server_info { - println!("āœ… Connected to server: {}", info.server_info.name); - println!(" Version: {}", info.server_info.version); - } - - // List available tools - let tools = service.list_all_tools().await?; - println!("\nšŸ“‹ Available tools ({}):", tools.len()); - for (i, tool) in tools.iter().enumerate() { - if i < 10 { - println!( - " šŸ”§ {} - {}", - tool.name, - tool.description.as_deref().unwrap_or("No description") - ); - } else if i == 10 { - println!(" ... and {} more tools", tools.len() - 10); - break; - } - } - - println!("\nšŸ’” Examples:"); - println!(" - get_desktop_info"); - println!(" - list_applications"); - println!(" - open_application notepad"); - println!(" - type_text 'Hello from Terminator!'"); - println!(" - take_screenshot"); - println!("\nType 'help' to see all tools, 'exit' to quit"); - println!("=====================================\n"); - - let stdin = io::stdin(); - let mut stdout = io::stdout(); - - loop { - print!("šŸ”§ Tool (or command): "); - stdout.flush()?; - - let mut input = String::new(); - stdin.read_line(&mut input)?; - let input = input.trim(); - - if input.is_empty() { - continue; - } - - if input == "exit" || input == "quit" { - println!("šŸ‘‹ Goodbye!"); - break; - } - - if input == "help" { - println!("\nšŸ“š All available tools:"); - for tool in &tools { - println!( - " {} - {}", - tool.name, - tool.description.as_deref().unwrap_or("No description") - ); - if let Some(props) = tool.input_schema.get("properties") { - println!(" Parameters: {}", serde_json::to_string(props)?); - } - } - println!(); - continue; - } - - // Parse tool call - let parts: Vec<&str> = input.splitn(2, ' ').collect(); - let tool_name = parts[0].to_string(); - - // Build arguments - let arguments = if parts.len() > 1 { - let args_part = parts[1]; - // Try to parse as JSON first - if let Ok(json) = serde_json::from_str::(args_part) { - json.as_object().cloned() - } else { - // Otherwise, try to build simple arguments - match tool_name.as_str() { - "open_application" => Some(object!({ "name": args_part.to_string() })), - "type_text" => Some(object!({ "text": args_part.to_string() })), - _ => None, - } - } - } else { - None - }; - - println!( - "\n⚔ Calling {} with args: {}", - tool_name, - arguments - .as_ref() - .map(|a| serde_json::to_string(a).unwrap_or_default()) - .unwrap_or_else(|| "{}".to_string()) - ); - - match service - .call_tool(CallToolRequestParam { - name: tool_name.into(), - arguments, - }) - .await - { - Ok(result) => { - println!("āœ… Result:"); - if !result.content.is_empty() { - for content in &result.content { - match &content.raw { - rmcp::model::RawContent::Text(text) => { - println!("{}", text.text); - } - rmcp::model::RawContent::Image(image) => { - println!("[Image: {}]", image.mime_type); - } - rmcp::model::RawContent::Resource(resource) => { - println!("[Resource: {:?}]", resource.resource); - } - rmcp::model::RawContent::Audio(audio) => { - println!("[Audio: {}]", audio.mime_type); - } - rmcp::model::RawContent::ResourceLink(resource) => { - println!("[ResourceLink: {resource:?}]"); - } - } - } - } - println!(); - } - Err(e) => { - println!("āŒ Error: {e}\n"); - } - } - } - - // Cancel the service connection - service.cancel().await?; - } - Transport::Stdio(command) => { - println!("Starting: {}", command.join(" ")); - let executable = find_executable(&command[0]).unwrap_or_else(|| command[0].clone()); - let command_args: Vec = if command.len() > 1 { - command[1..].to_vec() - } else { - vec![] - }; - let mut cmd = create_command(&executable, &command_args); - // Ensure server prints useful logs if not set by user - if std::env::var("LOG_LEVEL").is_err() && std::env::var("RUST_LOG").is_err() { - cmd.env("LOG_LEVEL", "info"); - } - let transport = TokioChildProcess::new(cmd)?; - let service = ().serve(transport).await?; - // Get server info - let server_info = service.peer_info(); - if let Some(info) = server_info { - println!("āœ… Connected to server: {}", info.server_info.name); - println!(" Version: {}", info.server_info.version); - } - - // List available tools - let tools = service.list_all_tools().await?; - println!("\nšŸ“‹ Available tools ({}):", tools.len()); - for (i, tool) in tools.iter().enumerate() { - if i < 10 { - println!( - " šŸ”§ {} - {}", - tool.name, - tool.description.as_deref().unwrap_or("No description") - ); - } else if i == 10 { - println!(" ... and {} more tools", tools.len() - 10); - break; - } - } - - println!("\nšŸ’” Examples:"); - println!(" - get_desktop_info"); - println!(" - list_applications"); - println!(" - open_application notepad"); - println!(" - type_text 'Hello from Terminator!'"); - println!(" - take_screenshot"); - println!("\nType 'help' to see all tools, 'exit' to quit"); - println!("=====================================\n"); - - let stdin = io::stdin(); - let mut stdout = io::stdout(); - - loop { - print!("šŸ”§ Tool (or command): "); - stdout.flush()?; - - let mut input = String::new(); - stdin.read_line(&mut input)?; - let input = input.trim(); - - if input.is_empty() { - continue; - } - - if input == "exit" || input == "quit" { - println!("šŸ‘‹ Goodbye!"); - break; - } - - if input == "help" { - println!("\nšŸ“š All available tools:"); - for tool in &tools { - println!( - " {} - {}", - tool.name, - tool.description.as_deref().unwrap_or("No description") - ); - if let Some(props) = tool.input_schema.get("properties") { - println!(" Parameters: {}", serde_json::to_string(props)?); - } - } - println!(); - continue; - } - - // Parse tool call - let parts: Vec<&str> = input.splitn(2, ' ').collect(); - let tool_name = parts[0].to_string(); - - // Build arguments - let arguments = if parts.len() > 1 { - let args_part = parts[1]; - // Try to parse as JSON first - if let Ok(json) = serde_json::from_str::(args_part) { - json.as_object().cloned() - } else { - // Otherwise, try to build simple arguments - match tool_name.as_str() { - "open_application" => Some(object!({ "name": args_part.to_string() })), - "type_text" => Some(object!({ "text": args_part.to_string() })), - _ => None, - } - } - } else { - None - }; - - println!( - "\n⚔ Calling {} with args: {}", - tool_name, - arguments - .as_ref() - .map(|a| serde_json::to_string(a).unwrap_or_default()) - .unwrap_or_else(|| "{}".to_string()) - ); - - match service - .call_tool(CallToolRequestParam { - name: tool_name.into(), - arguments, - }) - .await - { - Ok(result) => { - println!("āœ… Result:"); - if !result.content.is_empty() { - for content in &result.content { - match &content.raw { - rmcp::model::RawContent::Text(text) => { - println!("{}", text.text); - } - rmcp::model::RawContent::Image(image) => { - println!("[Image: {}]", image.mime_type); - } - rmcp::model::RawContent::Resource(resource) => { - println!("[Resource: {:?}]", resource.resource); - } - rmcp::model::RawContent::Audio(audio) => { - println!("[Audio: {}]", audio.mime_type); - } - rmcp::model::RawContent::ResourceLink(resource) => { - println!("[ResourceLink: {resource:?}]"); - } - } - } - } - println!(); - } - Err(e) => { - println!("āŒ Error: {e}\n"); - } - } - } - - // Cancel the service connection - service.cancel().await?; - } - } - Ok(()) -} - -pub async fn execute_command( - transport: Transport, - tool: String, - args: Option, -) -> Result<()> { - // Initialize logging for non-interactive mode - init_logging(); - - match transport { - Transport::Http(url) => { - info!("Connecting to server: {}", url); - let transport = StreamableHttpClientTransport::from_uri(url.as_str()); - let client_info = ClientInfo { - protocol_version: Default::default(), - capabilities: ClientCapabilities::default(), - client_info: Implementation { - name: "terminator-cli".to_string(), - version: env!("CARGO_PKG_VERSION").to_string(), - }, - }; - let service = client_info.serve(transport).await?; - - let arguments = if let Some(args_str) = args { - serde_json::from_str::(&args_str) - .ok() - .and_then(|v| v.as_object().cloned()) - } else { - None - }; - - println!( - "⚔ Calling {} with args: {}", - tool, - arguments - .as_ref() - .map(|a| serde_json::to_string(a).unwrap_or_default()) - .unwrap_or_else(|| "{}".to_string()) - ); - - let result = service - .call_tool(CallToolRequestParam { - name: tool.into(), - arguments, - }) - .await?; - - println!("āœ… Result:"); - if !result.content.is_empty() { - for content in &result.content { - match &content.raw { - rmcp::model::RawContent::Text(text) => { - println!("{}", text.text); - } - rmcp::model::RawContent::Image(image) => { - println!("[Image: {}]", image.mime_type); - } - rmcp::model::RawContent::Resource(resource) => { - println!("[Resource: {:?}]", resource.resource); - } - rmcp::model::RawContent::Audio(audio) => { - println!("[Audio: {}]", audio.mime_type); - } - rmcp::model::RawContent::ResourceLink(resource) => { - println!("[ResourceLink: {resource:?}]"); - } - } - } - } - - // Cancel the service connection - service.cancel().await?; - } - Transport::Stdio(command) => { - info!("Starting MCP server: {}", command.join(" ")); - let executable = find_executable(&command[0]).unwrap_or_else(|| command[0].clone()); - let command_args: Vec = if command.len() > 1 { - command[1..].to_vec() - } else { - vec![] - }; - let mut cmd = create_command(&executable, &command_args); - // Default server log level to info if not provided by the user - if std::env::var("LOG_LEVEL").is_err() && std::env::var("RUST_LOG").is_err() { - cmd.env("LOG_LEVEL", "info"); - } - let transport = TokioChildProcess::new(cmd)?; - let service = ().serve(transport).await?; - - let arguments = if let Some(args_str) = args { - serde_json::from_str::(&args_str) - .ok() - .and_then(|v| v.as_object().cloned()) - } else { - None - }; - - println!( - "⚔ Calling {} with args: {}", - tool, - arguments - .as_ref() - .map(|a| serde_json::to_string(a).unwrap_or_default()) - .unwrap_or_else(|| "{}".to_string()) - ); - - let result = service - .call_tool(CallToolRequestParam { - name: tool.into(), - arguments, - }) - .await?; - - println!("āœ… Result:"); - if !result.content.is_empty() { - for content in &result.content { - match &content.raw { - rmcp::model::RawContent::Text(text) => { - println!("{}", text.text); - } - rmcp::model::RawContent::Image(image) => { - println!("[Image: {}]", image.mime_type); - } - rmcp::model::RawContent::Resource(resource) => { - println!("[Resource: {:?}]", resource.resource); - } - rmcp::model::RawContent::Audio(audio) => { - println!("[Audio: {}]", audio.mime_type); - } - rmcp::model::RawContent::ResourceLink(resource) => { - println!("[ResourceLink: {resource:?}]"); - } - } - } - } - - // Cancel the service connection - service.cancel().await?; - } - } - Ok(()) -} - -fn init_logging() { - use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; - - let _ = tracing_subscriber::registry() - .with( - // Respect RUST_LOG if provided, else default to info - tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info".into()), - ) - .with(tracing_subscriber::fmt::layer()) - .try_init(); -} - -// Helper function to parse step start logs -#[allow(dead_code)] -fn parse_step_log(line: &str) -> Option<(String, String, String)> { - // Parse lines like: "Step 0 BEGIN tool='open_application' id='open_notepad' ..." - if let Some(step_idx) = line.find("Step ") { - let after_step = &line[step_idx + 5..]; - if let Some(space_idx) = after_step.find(' ') { - let step_num = &after_step[..space_idx]; - if let Some(tool_idx) = line.find("tool='") { - let after_tool = &line[tool_idx + 6..]; - if let Some(quote_idx) = after_tool.find('\'') { - let tool_name = &after_tool[..quote_idx]; - return Some(( - step_num.to_string(), - "?".to_string(), // We don't have total from logs - tool_name.to_string(), - )); - } - } else if let Some(group_idx) = line.find("group='") { - let after_group = &line[group_idx + 7..]; - if let Some(quote_idx) = after_group.find('\'') { - let group_name = &after_group[..quote_idx]; - return Some(( - step_num.to_string(), - "?".to_string(), - format!("[{group_name}]"), - )); - } - } - } - } - None -} - -// Helper function to parse step end logs -#[allow(dead_code)] -fn parse_step_end_log(line: &str) -> Option<(String, String)> { - // Parse lines like: "Step 0 END tool='open_application' id='open_notepad' status=success" - if let Some(step_idx) = line.find("Step ") { - let after_step = &line[step_idx + 5..]; - if let Some(space_idx) = after_step.find(' ') { - let step_num = &after_step[..space_idx]; - if let Some(status_idx) = line.find("status=") { - let after_status = &line[status_idx + 7..]; - let status = after_status.split_whitespace().next().unwrap_or("unknown"); - return Some((step_num.to_string(), status.to_string())); - } - } - } - None -} - -pub async fn natural_language_chat(transport: Transport) -> Result<()> { - println!("šŸ¤– Terminator Natural Language Chat Client"); - println!("=========================================="); - - // Load Anthropic API Key - dotenvy::dotenv().ok(); - let api_key = match std::env::var("ANTHROPIC_API_KEY") { - Ok(key) => key, - Err(_) => { - println!("āŒ ANTHROPIC_API_KEY environment variable not set."); - println!("Please set it in a .env file or export it:"); - println!(" export ANTHROPIC_API_KEY='your-api-key-here'"); - return Ok(()); - } - }; - - // Connect to MCP Server - let service = match transport { - Transport::Http(url) => { - println!("Connecting to MCP server: {url}"); - let transport = StreamableHttpClientTransport::from_uri(url.as_str()); - let client_info = ClientInfo { - protocol_version: Default::default(), - capabilities: ClientCapabilities::default(), - client_info: Implementation { - name: "terminator-cli-ai".to_string(), - version: env!("CARGO_PKG_VERSION").to_string(), - }, - }; - client_info.serve(transport).await? - } - Transport::Stdio(command) => { - println!("Starting MCP server: {}", command.join(" ")); - let executable = find_executable(&command[0]).unwrap_or_else(|| command[0].clone()); - let command_args: Vec = if command.len() > 1 { - command[1..].to_vec() - } else { - vec![] - }; - let mut cmd = create_command(&executable, &command_args); - // Default server log level to info if not provided by the user - if std::env::var("LOG_LEVEL").is_err() && std::env::var("RUST_LOG").is_err() { - cmd.env("LOG_LEVEL", "info"); - } - let transport = TokioChildProcess::new(cmd)?; - let client_info = ClientInfo { - protocol_version: Default::default(), - capabilities: ClientCapabilities::default(), - client_info: Implementation { - name: "terminator-cli-ai".to_string(), - version: env!("CARGO_PKG_VERSION").to_string(), - }, - }; - client_info.serve(transport).await? - } - }; - - if let Some(info) = service.peer_info() { - println!("āœ… Connected to MCP server: {}", info.server_info.name); - } - - // Get MCP tools and convert to Anthropic format - let mcp_tools = service.list_all_tools().await?; - let anthropic_tools: Vec = mcp_tools - .into_iter() - .map(|t| { - json!({ - "name": t.name, - "description": t.description.unwrap_or_default(), - "input_schema": t.input_schema - }) - }) - .collect(); - - println!("āœ… Found {} tools.", anthropic_tools.len()); - println!("\nšŸ’” Type your command in natural language. Examples:"); - println!(" - 'Open notepad and type hello world'"); - println!(" - 'Take a screenshot of the desktop'"); - println!(" - 'Show me all running applications'"); - println!("\nType 'exit' or 'quit' to end the session."); - println!("========================================================================================\n"); - - let mut messages = Vec::new(); - - loop { - print!("šŸ’¬ You: "); - std::io::stdout().flush()?; - - let mut input = String::new(); - std::io::stdin().read_line(&mut input)?; - let input = input.trim(); - - if input.eq_ignore_ascii_case("exit") || input.eq_ignore_ascii_case("quit") { - println!("šŸ‘‹ Goodbye!"); - break; - } - - if input.is_empty() { - continue; - } - - // Add user message - messages.push(json!({ - "role": "user", - "content": input - })); - - println!("šŸ¤” Thinking..."); - - // Process with Claude and handle tool calls in a loop - loop { - // Create request - let mut request_builder = AnthropicClient::new() - .auth(api_key.as_str()) - .version("2023-06-01") - .model("claude-3-opus-20240229") - .messages(&json!(messages)) - .max_tokens(1000) - .stream(false); // Disable streaming for simplicity - - // Add tools if available - if !anthropic_tools.is_empty() { - request_builder = request_builder.tools(&json!(anthropic_tools)); - request_builder = request_builder.tool_choice(ToolChoice::Auto); - } - - let request = request_builder.build()?; - - // Execute request and collect the response - let response_text = Arc::new(Mutex::new(String::new())); - let response_text_clone = response_text.clone(); - - let execute_result = request - .execute(move |response| { - let response_text = response_text_clone.clone(); - async move { - // Collect the full response - if let Ok(mut text) = response_text.lock() { - text.push_str(&response); - } - } - }) - .await; - - if let Err(error) = execute_result { - eprintln!("āŒ Error: {error}"); - break; // Break inner loop on error - } - - // Get the collected response - let full_response = response_text.lock().unwrap().clone(); - - // Try to parse as JSON (the SDK should return JSON when not in streaming mode) - if let Ok(json) = serde_json::from_str::(&full_response) { - // Extract content from the response - let mut assistant_content = Vec::new(); - let mut tool_calls = Vec::new(); - let mut text_parts = Vec::new(); - - if let Some(content_array) = json.get("content").and_then(|v| v.as_array()) { - for content in content_array { - if let Some(content_type) = content.get("type").and_then(|v| v.as_str()) { - match content_type { - "text" => { - if let Some(text) = content.get("text").and_then(|v| v.as_str()) - { - text_parts.push(text.to_string()); - assistant_content.push(json!({ - "type": "text", - "text": text - })); - } - } - "tool_use" => { - let tool_call = content.clone(); - tool_calls.push(tool_call.clone()); - assistant_content.push(tool_call); - } - _ => {} - } - } - } - } - - // Print the text response - if !text_parts.is_empty() { - println!("{}", text_parts.join("\n")); - } - - // Add assistant's response to messages - if !assistant_content.is_empty() { - messages.push(json!({ - "role": "assistant", - "content": assistant_content - })); - } - - // If no tool calls, we're done with this query - if tool_calls.is_empty() { - break; - } - - // Execute tool calls - println!("\nšŸ”§ Executing {} tool(s)...", tool_calls.len()); - let mut tool_results = Vec::new(); - - // Consume `tool_calls` to avoid holding an iterator borrow across the `await` boundary - for tool_call in tool_calls { - let tool_name = tool_call - .get("name") - .and_then(|v| v.as_str()) - .unwrap_or("") - .to_string(); - let tool_id = tool_call - .get("id") - .and_then(|v| v.as_str()) - .unwrap_or("") - .to_string(); - let tool_input = tool_call.get("input").cloned().unwrap_or(json!({})); - - println!(" - Calling `{tool_name}` with args: {tool_input}"); - - let result = service - .call_tool(CallToolRequestParam { - name: tool_name.into(), - arguments: tool_input.as_object().cloned(), - }) - .await; - - let result_content = match result { - Ok(res) => { - let text_results: Vec = res - .content - .iter() - .filter_map(|c| match &c.raw { - rmcp::model::RawContent::Text(text) => Some(text.text.clone()), - _ => None, - }) - .collect(); - if text_results.is_empty() { - "Tool executed successfully.".to_string() - } else { - text_results.join("\n") - } - } - Err(e) => format!("Error: {e}"), - }; - - let display_result = if result_content.len() > 100 { - format!("{}...", &result_content[..100]) - } else { - result_content.clone() - }; - println!(" āœ… Result: {display_result}"); - - tool_results.push(json!({ - "type": "tool_result", - "tool_use_id": tool_id, - "content": result_content - })); - } - - // Add tool results to messages - messages.push(json!({ - "role": "user", - "content": tool_results - })); - - println!("\nšŸ¤” Processing results..."); - // Continue the loop to get Claude's response about the tool results - } else { - // If not JSON, just print the response - println!("{full_response}"); - break; - } - } - } - - service.cancel().await?; - Ok(()) -} - -#[allow(dead_code)] -pub async fn execute_command_with_result( - transport: Transport, - tool: String, - args: Option, -) -> Result { - execute_command_with_progress(transport, tool, args, false).await -} - -pub async fn execute_command_with_progress( - transport: Transport, - tool: String, - args: Option, - show_progress: bool, -) -> Result { - execute_command_with_progress_and_retry(transport, tool, args, show_progress, false).await -} - -pub async fn execute_command_with_progress_and_retry( - transport: Transport, - tool: String, - args: Option, - show_progress: bool, - no_retry: bool, -) -> Result { - use colored::Colorize; - use tracing::debug; - - // Start telemetry receiver if showing progress for workflows - let telemetry_handle = if show_progress && tool == "execute_sequence" { - match crate::telemetry_receiver::start_telemetry_receiver().await { - Ok(handle) => { - debug!("Started telemetry receiver on port 4318"); - Some(handle) - } - Err(e) => { - debug!("Failed to start telemetry receiver: {}", e); - None - } - } - } else { - None - }; - - // Special handling for execute_sequence to capture full result - if tool == "execute_sequence" { - match transport { - Transport::Http(url) => { - debug!("Connecting to server: {}", url); - let transport = StreamableHttpClientTransport::from_uri(url.as_str()); - let client_info = ClientInfo { - protocol_version: Default::default(), - capabilities: ClientCapabilities::default(), - client_info: Implementation { - name: "terminator-cli".to_string(), - version: env!("CARGO_PKG_VERSION").to_string(), - }, - }; - - // Connection setup - no retry here as StreamableHttpClientTransport doesn't support cloning - // Retries will be handled at the tool call level - let service = client_info.serve(transport).await?; - - let arguments = if let Some(args_str) = args { - serde_json::from_str::(&args_str) - .ok() - .and_then(|v| v.as_object().cloned()) - } else { - None - }; - - // Parse workflow to get step count if showing progress - if show_progress { - if let Some(args_obj) = &arguments { - if let Some(steps) = args_obj.get("steps").and_then(|v| v.as_array()) { - let total_steps = steps.len(); - println!( - "\n{} {} {}", - "šŸŽÆ".cyan(), - "WORKFLOW START:".bold().cyan(), - format!("{total_steps} steps").dimmed() - ); - - // List the steps that will be executed - for (i, step) in steps.iter().enumerate() { - let tool_name = step - .get("tool_name") - .and_then(|v| v.as_str()) - .or_else(|| step.get("group_name").and_then(|v| v.as_str())) - .unwrap_or("unknown"); - let step_id = step.get("id").and_then(|v| v.as_str()).unwrap_or(""); - - println!( - " {} Step {}/{}: {} {}", - "šŸ“‹".dimmed(), - i + 1, - total_steps, - tool_name.yellow(), - if !step_id.is_empty() { - format!("[{step_id}]").dimmed().to_string() - } else { - String::new() - } - ); - } - println!("\n{} Executing workflow...\n", "⚔".cyan()); - } - } - } - - // Retry logic for tool execution - let mut retry_count = 0; - let max_retries = if no_retry { 0 } else { 3 }; - let mut _last_error = None; - - let result = loop { - match service - .call_tool(CallToolRequestParam { - name: tool.clone().into(), - arguments: arguments.clone(), - }) - .await - { - Ok(res) => break res, - Err(e) => { - let error_str = e.to_string(); - let is_retryable = error_str.contains("401") - || error_str.contains("Unauthorized") - || error_str.contains("500") - || error_str.contains("502") - || error_str.contains("503") - || error_str.contains("504") - || error_str.contains("timeout"); - - if is_retryable && retry_count < max_retries { - retry_count += 1; - let delay = Duration::from_secs(2u64.pow(retry_count)); - eprintln!("āš ļø Tool execution failed: {}. Retrying in {} seconds... (attempt {}/{})", - error_str, delay.as_secs(), retry_count, max_retries); - sleep(delay).await; - _last_error = Some(e); - } else { - return Err(e.into()); - } - } - } - }; - - // Parse the result content as JSON - if !result.content.is_empty() { - for content in &result.content { - if let rmcp::model::RawContent::Text(text) = &content.raw { - // Try to parse as JSON - if let Ok(json_result) = - serde_json::from_str::(&text.text) - { - service.cancel().await?; - - // Stop telemetry receiver if it was started - if let Some(handle) = telemetry_handle { - handle.abort(); - } - - return Ok(json_result); - } - } - } - } - - service.cancel().await?; - - // Stop telemetry receiver if it was started - if let Some(handle) = telemetry_handle { - handle.abort(); - } - - Ok(json!({"status": "unknown", "message": "No parseable result from workflow"})) - } - Transport::Stdio(command) => { - debug!("Starting MCP server: {}", command.join(" ")); - let executable = find_executable(&command[0]).unwrap_or_else(|| command[0].clone()); - let command_args: Vec = if command.len() > 1 { - command[1..].to_vec() - } else { - vec![] - }; - let mut cmd = create_command(&executable, &command_args); - - // Set up logging for the server to capture step progress - if std::env::var("LOG_LEVEL").is_err() && std::env::var("RUST_LOG").is_err() { - if show_progress { - // Enable info level logging to see step progress - cmd.env("RUST_LOG", "terminator_mcp_agent=info"); - } else { - cmd.env("LOG_LEVEL", "info"); - } - } - - // Enable telemetry if showing progress - if show_progress { - cmd.env("OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4318"); - cmd.env("OTEL_SERVICE_NAME", "terminator-mcp"); - cmd.env("ENABLE_TELEMETRY", "true"); - } - - // For now, just use the standard transport without stderr parsing - // TODO: Add proper step streaming once MCP protocol supports it - let transport = TokioChildProcess::new(cmd)?; - let service = ().serve(transport).await?; - - let arguments = if let Some(args_str) = args { - // Parse workflow to show initial progress - if show_progress { - if let Ok(workflow) = serde_json::from_str::(&args_str) { - if let Some(steps) = workflow.get("steps").and_then(|v| v.as_array()) { - let total_steps = steps.len(); - println!( - "\n{} {} {}", - "šŸŽÆ".cyan(), - "WORKFLOW START:".bold().cyan(), - format!("{total_steps} steps").dimmed() - ); - - // List the steps that will be executed - for (i, step) in steps.iter().enumerate() { - let tool_name = step - .get("tool_name") - .and_then(|v| v.as_str()) - .or_else(|| step.get("group_name").and_then(|v| v.as_str())) - .unwrap_or("unknown"); - let step_id = - step.get("id").and_then(|v| v.as_str()).unwrap_or(""); - - println!( - " {} Step {}/{}: {} {}", - "šŸ“‹".dimmed(), - i + 1, - total_steps, - tool_name.yellow(), - if !step_id.is_empty() { - format!("[{step_id}]").dimmed().to_string() - } else { - String::new() - } - ); - } - println!("\n{} Executing workflow...\n", "⚔".cyan()); - } - } - } - - serde_json::from_str::(&args_str) - .ok() - .and_then(|v| v.as_object().cloned()) - } else { - None - }; - - // Retry logic for tool execution (stdio) - let mut retry_count = 0; - let max_retries = if no_retry { 0 } else { 3 }; - let mut _last_error = None; - - let result = loop { - match service - .call_tool(CallToolRequestParam { - name: tool.clone().into(), - arguments: arguments.clone(), - }) - .await - { - Ok(res) => break res, - Err(e) => { - let error_str = e.to_string(); - let is_retryable = error_str.contains("401") - || error_str.contains("Unauthorized") - || error_str.contains("500") - || error_str.contains("502") - || error_str.contains("503") - || error_str.contains("504") - || error_str.contains("timeout"); - - if is_retryable && retry_count < max_retries { - retry_count += 1; - let delay = Duration::from_secs(2u64.pow(retry_count)); - eprintln!("āš ļø Tool execution failed: {}. Retrying in {} seconds... (attempt {}/{})", - error_str, delay.as_secs(), retry_count, max_retries); - sleep(delay).await; - _last_error = Some(e); - } else { - return Err(e.into()); - } - } - } - }; - - // Parse the result content as JSON - if !result.content.is_empty() { - for content in &result.content { - if let rmcp::model::RawContent::Text(text) = &content.raw { - // Try to parse as JSON - if let Ok(json_result) = - serde_json::from_str::(&text.text) - { - service.cancel().await?; - - // Stop telemetry receiver if it was started - if let Some(handle) = telemetry_handle { - handle.abort(); - } - - return Ok(json_result); - } - } - } - } - - service.cancel().await?; - - // Stop telemetry receiver if it was started - if let Some(handle) = telemetry_handle { - handle.abort(); - } - - Ok(json!({"status": "unknown", "message": "No parseable result from workflow"})) - } - } - } else { - // For other tools, just execute normally - execute_command(transport, tool.clone(), args).await?; - Ok(json!({"status": "success", "message": format!("Tool {} executed", tool)})) - } -} diff --git a/terminator-cli/src/mpc_client/anthropic.rs b/terminator-cli/src/mpc_client/anthropic.rs new file mode 100644 index 00000000..f0e14683 --- /dev/null +++ b/terminator-cli/src/mpc_client/anthropic.rs @@ -0,0 +1,255 @@ +use anyhow::Result; +use std::io::Write; +use serde_json::json; +use std::sync::{Arc, Mutex}; +use rmcp::model::CallToolRequestParam; +use anthropic_sdk::{ + ToolChoice, + Client as AnthropicClient, +}; +use super::utils::connect_to_mcp; +use crate::workflow_exec::workflow::Transport; + +pub async fn anthropic_chat(transport: Transport) -> Result<()> { + println!("šŸ¤– Terminator AI Chat Client"); + println!("=========================================="); + + // Load Anthropic API Key + dotenvy::dotenv().ok(); + let api_key = match std::env::var("ANTHROPIC_API_KEY") { + Ok(key) => key, + Err(_) => { + println!("āŒ ANTHROPIC_API_KEY environment variable not set."); + println!("Please set it in a .env file or export it:"); + println!(" export ANTHROPIC_API_KEY='your-api-key-here'"); + return Ok(()); + } + }; + + // Connect to MCP Server + let service = connect_to_mcp(transport).await?; + + if let Some(info) = service.peer_info() { + println!("āœ… Connected to MCP server: {}", info.server_info.name); + } + + // Get MCP tools and convert to Anthropic format + let mcp_tools = service.list_all_tools().await?; + let anthropic_tools: Vec = mcp_tools + .into_iter() + .map(|t| { + json!({ + "name": t.name, + "description": t.description.unwrap_or_default(), + "input_schema": t.input_schema + }) + }) + .collect(); + + println!("āœ… Found {} tools.", anthropic_tools.len()); + println!("\nšŸ’” Type your command in natural language. Examples:"); + println!(" - 'Open notepad and type hello world'"); + println!(" - 'Take a screenshot of the desktop'"); + println!(" - 'Show me all running applications'"); + println!("\nType 'exit' or 'quit' to end the session."); + println!("========================================================================================\n"); + + let mut messages = Vec::new(); + + loop { + print!("šŸ’¬ You: "); + std::io::stdout().flush()?; + + let mut input = String::new(); + std::io::stdin().read_line(&mut input)?; + let input = input.trim(); + + if input.eq_ignore_ascii_case("exit") || input.eq_ignore_ascii_case("quit") { + println!("šŸ‘‹ Goodbye!"); + break; + } + + if input.is_empty() { + continue; + } + + // Add user message + messages.push(json!({ + "role": "user", + "content": input + })); + + println!("šŸ¤” Thinking..."); + + // Process with Claude and handle tool calls in a loop + loop { + // Create request + let mut request_builder = AnthropicClient::new() + .auth(api_key.as_str()) + .version("2023-06-01") + .model("claude-3-opus-20240229") + .messages(&json!(messages)) + .max_tokens(1000) + .stream(false); // Disable streaming for simplicity + + // Add tools if available + if !anthropic_tools.is_empty() { + request_builder = request_builder.tools(&json!(anthropic_tools)); + request_builder = request_builder.tool_choice(ToolChoice::Auto); + } + + let request = request_builder.build()?; + + // Execute request and collect the response + let response_text = Arc::new(Mutex::new(String::new())); + let response_text_clone = response_text.clone(); + + let execute_result = request + .execute(move |response| { + let response_text = response_text_clone.clone(); + async move { + // Collect the full response + if let Ok(mut text) = response_text.lock() { + text.push_str(&response); + } + } + }) + .await; + + if let Err(error) = execute_result { + eprintln!("āŒ Error: {error}"); + break; // Break inner loop on error + } + + // Get the collected response + let full_response = response_text.lock().unwrap().clone(); + + // Try to parse as JSON (the SDK should return JSON when not in streaming mode) + if let Ok(json) = serde_json::from_str::(&full_response) { + // Extract content from the response + let mut assistant_content = Vec::new(); + let mut tool_calls = Vec::new(); + let mut text_parts = Vec::new(); + + if let Some(content_array) = json.get("content").and_then(|v| v.as_array()) { + for content in content_array { + if let Some(content_type) = content.get("type").and_then(|v| v.as_str()) { + match content_type { + "text" => { + if let Some(text) = content.get("text").and_then(|v| v.as_str()) + { + text_parts.push(text.to_string()); + assistant_content.push(json!({ + "type": "text", + "text": text + })); + } + } + "tool_use" => { + let tool_call = content.clone(); + tool_calls.push(tool_call.clone()); + assistant_content.push(tool_call); + } + _ => {} + } + } + } + } + + // Print the text response + if !text_parts.is_empty() { + println!("{}", text_parts.join("\n")); + } + + // Add assistant's response to messages + if !assistant_content.is_empty() { + messages.push(json!({ + "role": "assistant", + "content": assistant_content + })); + } + + // If no tool calls, we're done with this query + if tool_calls.is_empty() { + break; + } + + // Execute tool calls + println!("\nšŸ”§ Executing {} tool(s)...", tool_calls.len()); + let mut tool_results = Vec::new(); + + // Consume `tool_calls` to avoid holding an iterator borrow across the `await` boundary + for tool_call in tool_calls { + let tool_name = tool_call + .get("name") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let tool_id = tool_call + .get("id") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let tool_input = tool_call.get("input").cloned().unwrap_or(json!({})); + + println!(" - Calling `{tool_name}` with args: {tool_input}"); + + let result = service + .call_tool(CallToolRequestParam { + name: tool_name.into(), + arguments: tool_input.as_object().cloned(), + }) + .await; + + let result_content = match result { + Ok(res) => { + let text_results: Vec = res + .content + .iter() + .filter_map(|c| match &c.raw { + rmcp::model::RawContent::Text(text) => Some(text.text.clone()), + _ => None, + }) + .collect(); + if text_results.is_empty() { + "Tool executed successfully.".to_string() + } else { + text_results.join("\n") + } + } + Err(e) => format!("Error: {e}"), + }; + + let display_result = if result_content.len() > 100 { + format!("{}...", &result_content[..100]) + } else { + result_content.clone() + }; + println!(" āœ… Result: {display_result}"); + + tool_results.push(json!({ + "type": "tool_result", + "tool_use_id": tool_id, + "content": result_content + })); + } + + // Add tool results to messages + messages.push(json!({ + "role": "user", + "content": tool_results + })); + + println!("\nšŸ¤” Processing results..."); + // Continue the loop to get Claude's response about the tool results + } else { + // If not JSON, just print the response + println!("{full_response}"); + break; + } + } + } + + service.cancel().await?; + Ok(()) +} diff --git a/terminator-cli/src/mpc_client/gemini.rs b/terminator-cli/src/mpc_client/gemini.rs new file mode 100644 index 00000000..e6cd4e07 --- /dev/null +++ b/terminator-cli/src/mpc_client/gemini.rs @@ -0,0 +1,192 @@ +use anyhow::Result; +use serde_json::Value; +use serde_json::json; +use std::io::Write; + +use gemini_rs::{ + Client, + types::{Content, FunctionDeclaration, + FunctionCall, Part, Role, Tools + }, +}; +use rmcp::model::CallToolRequestParam; + +use super::utils::connect_to_mcp; +use crate::workflow_exec::workflow::Transport; + +pub async fn gemini_chat(transport: Transport) -> Result<()> { + println!("šŸ¤– Gemini AI Chat Client"); + println!("=========================================="); + + dotenvy::dotenv().ok(); + if std::env::var("GEMINI_API_KEY").is_err() { + println!("āŒ GEMINI_API_KEY or environment variable not set."); + println!("Please set one in a .env file or export it."); + println!(" export GEMINI_API_KEY='your-api-key-here'"); + return Ok(()); + } + + let service = connect_to_mcp(transport).await?; + + if let Some(info) = service.peer_info() { + println!("āœ… Connected to MCP server: {}", info.server_info.name); + } + + let mcp_tools = service.list_all_tools().await?; + let gemini_fn_declarations: Vec = mcp_tools + .into_iter() + .map(|t| FunctionDeclaration { + name: t.name.to_string(), + description: t.description.unwrap().to_string(), + parameters: Value::Object(t.input_schema.as_ref().clone()), + }) + .collect(); + + let gemini_tools = if gemini_fn_declarations.is_empty() { + None + } else { + Some(vec![Tools { + function_declarations: Some(gemini_fn_declarations), + google_search: None, + code_execution: None, + }]) + }; + + if let Some(tools) = &gemini_tools { + if let Some(decls) = &tools[0].function_declarations { + println!("āœ… Found {} tools.", decls.len()); + } + } else { + println!("āœ… No tools found or parsed."); + } + + println!("\nšŸ’” Type your command in natural language. Examples:"); + println!(" - 'Open notepad and type hello world'"); + println!(" - 'Take a screenshot of the desktop'"); + println!("\nType 'exit' or 'quit' to end the session."); + println!("========================================================================================\n"); + + let mut messages: Vec = Vec::new(); + // singleton client + let client = Client::instance(); + // model that supports tool calling + let model_name = "gemini-2.5-flash-preview-04-17"; + + loop { + print!("šŸ’¬ You: "); + std::io::stdout().flush()?; + let mut input = String::new(); + std::io::stdin().read_line(&mut input)?; + let input = input.trim(); + + if input.eq_ignore_ascii_case("exit") || input.eq_ignore_ascii_case("quit") { + break; + } + if input.is_empty() { + continue; + } + + messages.push(Content { + role: Role::User, + parts: vec![Part { + text: Some(input.to_string()), + ..Default::default() + }], + }); + + println!("šŸ¤” Thinking..."); + + loop { + let mut req = client.generate_content(model_name); + + if let Some(tools) = &gemini_tools { + req.tools(tools.clone()); + } + + req.body.contents = messages.clone(); + + let response = match req.await { + Ok(resp) => resp, + Err(e) => { + println!("āŒ Error calling Gemini API: {e}"); + messages.pop(); + break; + } + }; + + let candidate = match response.candidates.get(0) { + Some(c) => c, + None => { + println!("šŸ¤– No response from model."); + break; + } + }; + + messages.push(candidate.content.clone()); + + let mut function_calls_to_execute = Vec::new(); + let mut has_text_response = false; + + for part in &candidate.content.parts { + if let Some(text) = &part.text { + print!("{}", text); + has_text_response = true; + } + if let Some(fc) = &part.function_call { + function_calls_to_execute.push(fc.clone()); + } + } + if has_text_response { + println!(); + } + + if function_calls_to_execute.is_empty() { + break; + } + + println!( + "\nšŸ”§ Executing {} tool(s)...", + function_calls_to_execute.len() + ); + let mut tool_results: Vec = Vec::new(); + + for fc in function_calls_to_execute { + println!(" - Calling `{}` with args: {}", fc.name, fc.args); + + let result = service + .call_tool(CallToolRequestParam { + name: fc.name.clone().into(), + arguments: fc.args.as_object().cloned(), + }) + .await; + + let result_content = match result { + Ok(res) => json!({ "result": res }), + Err(e) => json!({ "error": format!("{e}") }), + }; + + println!(" āœ… Result: {}", result_content.to_string()); + + tool_results.push(Part { + function_call: Some(FunctionCall { + id: None, + name: fc.name, + args: result_content, + }), + ..Default::default() + }); + } + + messages.push(Content { + role: Role::User, + parts: tool_results, + }); + + println!("\nšŸ¤” Processing results..."); + } + } + + println!("šŸ‘‹ Goodbye!"); + service.cancel().await?; + Ok(()) +} diff --git a/terminator-cli/src/mpc_client/interactive_chat.rs b/terminator-cli/src/mpc_client/interactive_chat.rs new file mode 100644 index 00000000..b02b1ceb --- /dev/null +++ b/terminator-cli/src/mpc_client/interactive_chat.rs @@ -0,0 +1,325 @@ +use anyhow::Result; +use std::io::{self, Write}; +use rmcp::{ + object, ServiceExt, + model::{CallToolRequestParam, ClientCapabilities, ClientInfo, Implementation}, + transport::{StreamableHttpClientTransport, TokioChildProcess}, +}; +use crate::{utils, workflow_exec::workflow::Transport}; + +pub async fn interactive_chat(transport: Transport) -> Result<()> { + println!("šŸ¤– Terminator MCP Chat Client"); + println!("============================="); + + match transport { + Transport::Http(url) => { + println!("Connecting to: {url}"); + let transport = StreamableHttpClientTransport::from_uri(url.as_str()); + let client_info = ClientInfo { + protocol_version: Default::default(), + capabilities: ClientCapabilities::default(), + client_info: Implementation { + name: "terminator-cli".to_string(), + version: env!("CARGO_PKG_VERSION").to_string(), + }, + }; + let service = client_info.serve(transport).await?; + + // Get server info + let server_info = service.peer_info(); + if let Some(info) = server_info { + println!("āœ… Connected to server: {}", info.server_info.name); + println!(" Version: {}", info.server_info.version); + } + + // List available tools + let tools = service.list_all_tools().await?; + println!("\nšŸ“‹ Available tools ({}):", tools.len()); + for (i, tool) in tools.iter().enumerate() { + if i < 10 { + println!( + " šŸ”§ {} - {}", + tool.name, + tool.description.as_deref().unwrap_or("No description") + ); + } else if i == 10 { + println!(" ... and {} more tools", tools.len() - 10); + break; + } + } + + println!("\nšŸ’” Examples:"); + println!(" - get_desktop_info"); + println!(" - list_applications"); + println!(" - open_application notepad"); + println!(" - type_text 'Hello from Terminator!'"); + println!(" - take_screenshot"); + println!("\nType 'help' to see all tools, 'exit' to quit"); + println!("=====================================\n"); + + let stdin = io::stdin(); + let mut stdout = io::stdout(); + + loop { + print!("šŸ”§ Tool (or command): "); + stdout.flush()?; + + let mut input = String::new(); + stdin.read_line(&mut input)?; + let input = input.trim(); + + if input.is_empty() { + continue; + } + + if input == "exit" || input == "quit" { + println!("šŸ‘‹ Goodbye!"); + break; + } + + if input == "help" { + println!("\nšŸ“š All available tools:"); + for tool in &tools { + println!( + " {} - {}", + tool.name, + tool.description.as_deref().unwrap_or("No description") + ); + if let Some(props) = tool.input_schema.get("properties") { + println!(" Parameters: {}", serde_json::to_string(props)?); + } + } + println!(); + continue; + } + + // Parse tool call + let parts: Vec<&str> = input.splitn(2, ' ').collect(); + let tool_name = parts[0].to_string(); + + // Build arguments + let arguments = if parts.len() > 1 { + let args_part = parts[1]; + // Try to parse as JSON first + if let Ok(json) = serde_json::from_str::(args_part) { + json.as_object().cloned() + } else { + // Otherwise, try to build simple arguments + match tool_name.as_str() { + "open_application" => Some(object!({ "name": args_part.to_string() })), + "type_text" => Some(object!({ "text": args_part.to_string() })), + _ => None, + } + } + } else { + None + }; + + println!( + "\n⚔ Calling {} with args: {}", + tool_name, + arguments + .as_ref() + .map(|a| serde_json::to_string(a).unwrap_or_default()) + .unwrap_or_else(|| "{}".to_string()) + ); + + match service + .call_tool(CallToolRequestParam { + name: tool_name.into(), + arguments, + }) + .await + { + Ok(result) => { + println!("āœ… Result:"); + if !result.content.is_empty() { + for content in &result.content { + match &content.raw { + rmcp::model::RawContent::Text(text) => { + println!("{}", text.text); + } + rmcp::model::RawContent::Image(image) => { + println!("[Image: {}]", image.mime_type); + } + rmcp::model::RawContent::Resource(resource) => { + println!("[Resource: {:?}]", resource.resource); + } + rmcp::model::RawContent::Audio(audio) => { + println!("[Audio: {}]", audio.mime_type); + } + rmcp::model::RawContent::ResourceLink(resource) => { + println!("[ResourceLink: {resource:?}]"); + } + } + } + } + println!(); + } + Err(e) => { + println!("āŒ Error: {e}\n"); + } + } + } + + // Cancel the service connection + service.cancel().await?; + } + Transport::Stdio(command) => { + println!("Starting: {}", command.join(" ")); + let executable = utils::find_executable(&command[0]).unwrap_or_else(|| command[0].clone()); + let command_args: Vec = if command.len() > 1 { + command[1..].to_vec() + } else { + vec![] + }; + let mut cmd = utils::create_command(&executable, &command_args); + // Ensure server prints useful logs if not set by user + if std::env::var("LOG_LEVEL").is_err() && std::env::var("RUST_LOG").is_err() { + cmd.env("LOG_LEVEL", "info"); + } + let transport = TokioChildProcess::new(cmd)?; + let service = ().serve(transport).await?; + // Get server info + let server_info = service.peer_info(); + if let Some(info) = server_info { + println!("āœ… Connected to server: {}", info.server_info.name); + println!(" Version: {}", info.server_info.version); + } + + // List available tools + let tools = service.list_all_tools().await?; + println!("\nšŸ“‹ Available tools ({}):", tools.len()); + for (i, tool) in tools.iter().enumerate() { + if i < 10 { + println!( + " šŸ”§ {} - {}", + tool.name, + tool.description.as_deref().unwrap_or("No description") + ); + } else if i == 10 { + println!(" ... and {} more tools", tools.len() - 10); + break; + } + } + + println!("\nšŸ’” Examples:"); + println!(" - get_desktop_info"); + println!(" - list_applications"); + println!(" - open_application notepad"); + println!(" - type_text 'Hello from Terminator!'"); + println!(" - take_screenshot"); + println!("\nType 'help' to see all tools, 'exit' to quit"); + println!("=====================================\n"); + + let stdin = io::stdin(); + let mut stdout = io::stdout(); + + loop { + print!("šŸ”§ Tool (or command): "); + stdout.flush()?; + + let mut input = String::new(); + stdin.read_line(&mut input)?; + let input = input.trim(); + + if input.is_empty() { + continue; + } + + if input == "exit" || input == "quit" { + println!("šŸ‘‹ Goodbye!"); + break; + } + + if input == "help" { + println!("\nšŸ“š All available tools:"); + for tool in &tools { + println!( + " {} - {}", + tool.name, + tool.description.as_deref().unwrap_or("No description") + ); + if let Some(props) = tool.input_schema.get("properties") { + println!(" Parameters: {}", serde_json::to_string(props)?); + } + } + println!(); + continue; + } + + // Parse tool call + let parts: Vec<&str> = input.splitn(2, ' ').collect(); + let tool_name = parts[0].to_string(); + + // Build arguments + let arguments = if parts.len() > 1 { + let args_part = parts[1]; + // Try to parse as JSON first + if let Ok(json) = serde_json::from_str::(args_part) { + json.as_object().cloned() + } else { + // Otherwise, try to build simple arguments + match tool_name.as_str() { + "open_application" => Some(object!({ "name": args_part.to_string() })), + "type_text" => Some(object!({ "text": args_part.to_string() })), + _ => None, + } + } + } else { + None + }; + + println!( + "\n⚔ Calling {} with args: {}", + tool_name, + arguments + .as_ref() + .map(|a| serde_json::to_string(a).unwrap_or_default()) + .unwrap_or_else(|| "{}".to_string()) + ); + + match service + .call_tool(CallToolRequestParam { + name: tool_name.into(), + arguments, + }) + .await + { + Ok(result) => { + println!("āœ… Result:"); + if !result.content.is_empty() { + for content in &result.content { + match &content.raw { + rmcp::model::RawContent::Text(text) => { + println!("{}", text.text); + } + rmcp::model::RawContent::Image(image) => { + println!("[Image: {}]", image.mime_type); + } + rmcp::model::RawContent::Resource(resource) => { + println!("[Resource: {:?}]", resource.resource); + } + rmcp::model::RawContent::Audio(audio) => { + println!("[Audio: {}]", audio.mime_type); + } + rmcp::model::RawContent::ResourceLink(resource) => { + println!("[ResourceLink: {resource:?}]"); + } + } + } + } + println!(); + } + Err(e) => { + println!("āŒ Error: {e}\n"); + } + } + } + + // Cancel the service connection + service.cancel().await?; + } + } + Ok(()) +} diff --git a/terminator-cli/src/mpc_client/mod.rs b/terminator-cli/src/mpc_client/mod.rs new file mode 100644 index 00000000..7442e71d --- /dev/null +++ b/terminator-cli/src/mpc_client/mod.rs @@ -0,0 +1,6 @@ +pub mod utils; +pub mod gemini; +pub mod openai; +pub mod anthropic; +pub mod natural_lang; +pub mod interactive_chat; diff --git a/terminator-cli/src/mpc_client/natural_lang.rs b/terminator-cli/src/mpc_client/natural_lang.rs new file mode 100644 index 00000000..4da89095 --- /dev/null +++ b/terminator-cli/src/mpc_client/natural_lang.rs @@ -0,0 +1,13 @@ +use anyhow::Result; +use crate::{ + workflow_exec::workflow::Transport, + cli::AIProvider, +}; + +pub async fn aichat(transport: Transport, provider: AIProvider,) -> Result<()> { + match provider { + AIProvider::Anthropic => super::anthropic::anthropic_chat(transport).await, + AIProvider::OpenAI => super::openai::openai_chat(transport).await, + AIProvider::Gemini => super::gemini::gemini_chat(transport).await, + } +} diff --git a/terminator-cli/src/mpc_client/openai.rs b/terminator-cli/src/mpc_client/openai.rs new file mode 100644 index 00000000..54f12cbe --- /dev/null +++ b/terminator-cli/src/mpc_client/openai.rs @@ -0,0 +1,197 @@ +use anyhow::Result; +use serde_json::{from_str, Value as JsonValue}; +use std::collections::HashMap; +use std::io::Write; +use openai_api_rs::v1::{ + api::OpenAIClient, + chat_completion::{ + self, ChatCompletionRequest, Content, FinishReason, MessageRole, Tool, ToolChoiceType, + ToolType, + }, + common::GPT4_O, + types::{self, FunctionParameters, JSONSchemaDefine, JSONSchemaType}, +}; +use super::utils::connect_to_mcp; +use crate::workflow_exec::workflow::Transport; +use rmcp::model::CallToolRequestParam; + +pub async fn openai_chat(transport: Transport) -> Result<()> { + println!("šŸ¤– OpenAI GPT-4o Chat Client (Modern API)"); + println!("=========================================="); + + dotenvy::dotenv().ok(); + let api_key = match std::env::var("OPENAI_API_KEY") { + Ok(key) => key, + Err(_) => { + println!("āŒ OPENAI_API_KEY environment variable not set."); + return Ok(()); + } + }; + let mut client = OpenAIClient::builder().with_api_key(api_key).build().unwrap(); + + let service = connect_to_mcp(transport).await?; + if let Some(info) = service.peer_info() { + println!("āœ… Connected to MCP server: {}", info.server_info.name); + } + + let mcp_tools = service.list_all_tools().await?; + let openai_tools: Vec = mcp_tools + .into_iter() + .filter_map(|t| { + // into the strongly-typed structures openai's library requires. + let schema = t.input_schema; + let properties_map = schema.get("properties").and_then(|p| p.as_object())?; + let required_vec = schema + .get("required") + .and_then(|r| r.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }); + + let mut properties = HashMap::new(); + for (key, value) in properties_map { + let description = value.get("description").and_then(|d| d.as_str()).map(String::from); + let property_schema = Box::new(JSONSchemaDefine { + schema_type: Some(JSONSchemaType::String), + description, + ..Default::default() + }); + properties.insert(key.clone(), property_schema); + } + + Some(Tool { + r#type: ToolType::Function, + function: types::Function { + name: t.name.to_string(), + description: Some(t.description.unwrap().to_string()), + parameters: FunctionParameters { + schema_type: JSONSchemaType::Object, + properties: Some(properties), + required: required_vec, + }, + }, + }) + }) + .collect(); + + if !openai_tools.is_empty() { + println!("āœ… Found {} tools.", openai_tools.len()); + } else { + println!("āœ… No tools found or parsed."); + } + println!("\nšŸ’” Type your command in natural language."); + println!("Type 'exit' or 'quit' to end the session.\n"); + + let mut messages: Vec = Vec::new(); + + loop { + print!("šŸ’¬ You: "); + std::io::stdout().flush()?; + let mut input = String::new(); + std::io::stdin().read_line(&mut input)?; + let input = input.trim(); + + if input.eq_ignore_ascii_case("exit") || input.eq_ignore_ascii_case("quit") { + break; + } + if input.is_empty() { + continue; + } + + messages.push(chat_completion::ChatCompletionMessage { + role: MessageRole::user, + content: Content::Text(input.to_string()), + name: None, + tool_calls: None, + tool_call_id: None, + }); + + println!("šŸ¤” Thinking..."); + + loop { + let mut req_builder = ChatCompletionRequest::new(GPT4_O.to_string(), messages.clone()); + + if !openai_tools.is_empty() { + req_builder = req_builder + .tools(openai_tools.clone()) + .tool_choice(ToolChoiceType::Auto); + } + let req = req_builder; + + let result = client.chat_completion(req).await?; + let choice = &result.choices[0]; + let response_message = choice.message.clone(); + + messages.push(chat_completion::ChatCompletionMessage { + name: response_message.name, + role: MessageRole::assistant, + content: response_message.content.map(Content::Text).unwrap_or(Content::Text("".to_string())), + tool_calls: response_message.tool_calls.map(|calls| { + calls + .into_iter() + .map(|call| chat_completion::ToolCall { + id: call.id, + r#type: call.r#type, + function: chat_completion::ToolCallFunction { + name: call.function.name, + arguments: call.function.arguments, + }, + }) + .collect() + }), + tool_call_id: None, + }); + + // check if the model stopped because it wants to call a tool + if let Some(FinishReason::tool_calls) = choice.finish_reason { + let tool_calls = choice.message.tool_calls.as_ref().unwrap(); + println!("\nšŸ”§ Executing {} tool(s)...", tool_calls.len()); + + for tool_call in tool_calls { + let func = &tool_call.function; + let args = &func.arguments; + println!(" - Calling `{:?}` with args: {:?}", func.name.clone(), args.clone()); + + let arguments_json: JsonValue = from_str(&args.clone().unwrap())?; + + let tool_result = service + .call_tool(CallToolRequestParam { + name: func.name.as_ref().unwrap().clone().into(), + arguments: arguments_json.as_object().cloned(), + }) + .await; + + let result_content = match tool_result { + Ok(res) => format!("{:?}", res), + Err(e) => format!("Error: {e}"), + }; + println!(" āœ… Result: {}", result_content); + + messages.push(chat_completion::ChatCompletionMessage { + role: MessageRole::tool, + tool_call_id: Some(tool_call.id.clone()), + content: Content::Text(result_content), + name: func.name.clone(), + tool_calls: None, + }); + } + println!("\nšŸ¤” Processing results..."); + // continue the inner loop to send the tool results back to the model + continue; + } + + // If it was a normal text response, print it and break the inner loop + if let Some(text) = &choice.message.content { + println!("{}", text); + } + break; + } + } + + println!("\nšŸ‘‹ Goodbye!"); + service.cancel().await?; + Ok(()) +} + diff --git a/terminator-cli/src/mpc_client/utils.rs b/terminator-cli/src/mpc_client/utils.rs new file mode 100644 index 00000000..5c9ae478 --- /dev/null +++ b/terminator-cli/src/mpc_client/utils.rs @@ -0,0 +1,59 @@ +use anyhow::Result; +use rmcp::{ + RoleClient, + ServiceExt, + service::RunningService, + transport::{ + TokioChildProcess, + StreamableHttpClientTransport, + }, + model::{ + ClientInfo, + Implementation, + ClientCapabilities, + InitializeRequestParam + }, +}; +use crate::{ + workflow_exec::workflow::Transport, + utils::{find_executable, create_command}, + +}; + +pub async fn connect_to_mcp(transport: Transport) -> Result> { + match transport { + Transport::Http(url) => { + println!("Connecting to MCP server: {url}"); + let transport = StreamableHttpClientTransport::from_uri(url.as_str()); + let client_info = ClientInfo { + protocol_version: Default::default(), + capabilities: ClientCapabilities::default(), + client_info: Implementation { + name: "terminator-cli-ai".to_string(), + version: env!("CARGO_PKG_VERSION").to_string(), + }, + }; + Ok(client_info.serve(transport).await?) + } + Transport::Stdio(command) => { + println!("Starting MCP server: {}", command.join(" ")); + let executable = find_executable(&command[0]).unwrap_or_else(|| command[0].clone()); + let command_args: Vec = if command.len() > 1 { + command[1..].to_vec() + } else { + vec![] + }; + let cmd = create_command(&executable, &command_args); + let transport = TokioChildProcess::new(cmd)?; + let client_info = ClientInfo { + protocol_version: Default::default(), + capabilities: ClientCapabilities::default(), + client_info: Implementation { + name: "terminator-cli-ai".to_string(), + version: env!("CARGO_PKG_VERSION").to_string(), + }, + }; + Ok(client_info.serve(transport).await?) + } + } +} diff --git a/terminator-cli/src/telemetry/mod.rs b/terminator-cli/src/telemetry/mod.rs new file mode 100644 index 00000000..4a306bcd --- /dev/null +++ b/terminator-cli/src/telemetry/mod.rs @@ -0,0 +1,3 @@ +pub mod traces; +pub mod process; +pub mod receiver; diff --git a/terminator-cli/src/telemetry/process.rs b/terminator-cli/src/telemetry/process.rs new file mode 100644 index 00000000..155f0511 --- /dev/null +++ b/terminator-cli/src/telemetry/process.rs @@ -0,0 +1,185 @@ +use anyhow::Result; +use colored::Colorize; +use std::sync::Arc; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use super::receiver::TelemetryReceiver; +use super::traces::StepsTracker; + +pub async fn process_span(span: &serde_json::Value, tracker: &Arc>) { + let name = span.get("name").and_then(|v| v.as_str()).unwrap_or(""); + + // Parse attributes + let mut attributes = std::collections::HashMap::new(); + if let Some(attrs) = span.get("attributes").and_then(|v| v.as_array()) { + for attr in attrs { + if let (Some(key), Some(value)) = + (attr.get("key").and_then(|v| v.as_str()), attr.get("value")) + { + let val_str = extract_attribute_value(value); + attributes.insert(key.to_string(), val_str); + } + } + } + + // Parse events (step starts/completes) + if let Some(events_array) = span.get("events").and_then(|v| v.as_array()) { + for event in events_array { + if let Some(event_name) = event.get("name").and_then(|v| v.as_str()) { + let mut event_attrs = std::collections::HashMap::new(); + if let Some(attrs) = event.get("attributes").and_then(|v| v.as_array()) { + for attr in attrs { + if let (Some(key), Some(value)) = + (attr.get("key").and_then(|v| v.as_str()), attr.get("value")) + { + let val_str = extract_attribute_value(value); + event_attrs.insert(key.to_string(), val_str); + } + } + } + + // Display step progress + match event_name { + "workflow.started" => { + if let Some(total) = event_attrs.get("workflow.total_steps") { + let mut tracker = tracker.lock().await; + tracker.total_steps = total.parse().ok(); + + println!( + "\n{} {} {}", + "šŸŽÆ".cyan(), + "WORKFLOW STARTED:".bold().cyan(), + format!("{total} steps").dimmed() + ); + } + } + "step.started" => { + if let Some(tool) = event_attrs.get("step.tool") { + let step_index = event_attrs + .get("step.index") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + let mut tracker = tracker.lock().await; + tracker.current_step = step_index + 1; + let total = tracker.total_steps.unwrap_or(0); + + println!( + " {} Step {}/{}: {} {}", + "ā–¶".blue(), + tracker.current_step, + total, + tool.yellow(), + "[running...]".dimmed() + ); + } + } + "step.completed" => { + if let Some(status) = event_attrs.get("step.status") { + let icon = if status == "success" { + "āœ“".green() + } else if status == "skipped" { + "ā­".yellow() + } else { + "āœ—".red() + }; + println!(" {icon} Status: {status}"); + } + } + "workflow.completed" => { + let had_errors = event_attrs + .get("workflow.had_errors") + .and_then(|s| s.parse::().ok()) + .unwrap_or(false); + + if had_errors { + println!("\n{} Workflow completed with errors", "⚠".yellow()); + } else { + println!("\n{} Workflow completed successfully", "āœ…".green()); + } + } + _ => {} + } + } + } + } + + // Handle span-level info + if name.starts_with("workflow.") { + if let Some(total) = attributes.get("workflow.total_steps") { + let mut tracker = tracker.lock().await; + tracker.total_steps = total.parse().ok(); + } + } else if name.starts_with("step.") { + // Step span started + if let Some(tool) = attributes.get("tool.name") { + let step_num = attributes + .get("step.number") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + let step_total = attributes + .get("step.total") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + println!( + " {} Step {}/{}: {} {}", + "šŸ“".green(), + step_num, + step_total, + tool.yellow(), + "[executing...]".dimmed() + ); + } + } +} + +pub fn extract_attribute_value(value: &serde_json::Value) -> String { + if let Some(s) = value.get("stringValue").and_then(|v| v.as_str()) { + s.to_string() + } else if let Some(i) = value.get("intValue").and_then(|v| v.as_i64()) { + i.to_string() + } else if let Some(f) = value.get("doubleValue").and_then(|v| v.as_f64()) { + f.to_string() + } else if let Some(b) = value.get("boolValue").and_then(|v| v.as_bool()) { + b.to_string() + } else { + value.to_string() + } +} + + +// Extract value from protobuf attribute +pub fn extract_proto_attr_value( + value: &Option, +) -> String { + if let Some(val) = value { + if let Some(v) = &val.value { + match v { + opentelemetry_proto::tonic::common::v1::any_value::Value::StringValue(s) => { + s.clone() + } + opentelemetry_proto::tonic::common::v1::any_value::Value::IntValue(i) => { + i.to_string() + } + opentelemetry_proto::tonic::common::v1::any_value::Value::DoubleValue(f) => { + f.to_string() + } + opentelemetry_proto::tonic::common::v1::any_value::Value::BoolValue(b) => { + b.to_string() + } + _ => String::new(), + } + } else { + String::new() + } + } else { + String::new() + } +} + +// Start the telemetry receiver +pub async fn start_telemetry_receiver() -> Result> { + let receiver = TelemetryReceiver::new(4318); + receiver.start().await +} diff --git a/terminator-cli/src/telemetry/receiver.rs b/terminator-cli/src/telemetry/receiver.rs new file mode 100644 index 00000000..46c88f94 --- /dev/null +++ b/terminator-cli/src/telemetry/receiver.rs @@ -0,0 +1,37 @@ +use anyhow::Result; +use axum::{routing::post, Router}; +use std::sync::Arc; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use super::traces::StepsTracker; +use super::traces::handle_traces; + +pub struct TelemetryReceiver { + port: u16, +} + +impl TelemetryReceiver { + pub fn new(port: u16) -> Self { + Self { port } + } + + pub async fn start(self) -> Result> { + let steps_state = Arc::new(Mutex::new(StepsTracker::new())); + + let app = Router::new() + .route("/v1/traces", post(handle_traces)) + .with_state(steps_state); + + let addr = format!("127.0.0.1:{}", self.port); + + let handle = tokio::spawn(async move { + let listener = tokio::net::TcpListener::bind(&addr).await.unwrap(); + axum::serve(listener, app).await.unwrap(); + }); + + // Give it a moment to start + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + Ok(handle) + } +} diff --git a/terminator-cli/src/telemetry/traces.rs b/terminator-cli/src/telemetry/traces.rs new file mode 100644 index 00000000..87ee3d84 --- /dev/null +++ b/terminator-cli/src/telemetry/traces.rs @@ -0,0 +1,174 @@ +use bytes::Bytes; +use colored::Colorize; +use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; +use axum::{extract::State, http::StatusCode, response::Json}; +use prost::Message; +use serde_json::json; +use std::sync::Arc; +use tokio::sync::Mutex; +use super::process::{process_span, extract_proto_attr_value}; + +pub struct StepsTracker { + pub total_steps: Option, + pub current_step: usize, +} + +impl StepsTracker { + pub fn new() -> Self { + Self { + total_steps: None, + current_step: 0, + } + } +} + +pub async fn handle_traces( + State(steps): State>>, + body: Bytes, +) -> (StatusCode, Json) { + // Try to parse as protobuf first (most common) + if let Ok(request) = ExportTraceServiceRequest::decode(&body[..]) { + process_protobuf_traces(request, steps).await; + } else if let Ok(json_data) = serde_json::from_slice::(&body) { + // Fallback to JSON parsing + process_json_traces(json_data, steps).await; + } + + (StatusCode::OK, Json(json!({"partialSuccess": {}}))) +} + +pub async fn process_json_traces(data: serde_json::Value, steps: Arc>) { + if let Some(resource_spans) = data.get("resourceSpans").and_then(|v| v.as_array()) { + for resource_span in resource_spans { + if let Some(scope_spans) = resource_span.get("scopeSpans").and_then(|v| v.as_array()) { + for scope_span in scope_spans { + if let Some(spans_array) = scope_span.get("spans").and_then(|v| v.as_array()) { + for span in spans_array { + process_span(span, &steps).await; + } + } + } + } + } + } +} + +// Process protobuf traces +pub async fn process_protobuf_traces( + request: ExportTraceServiceRequest, + tracker: Arc>, +) { + for resource_span in request.resource_spans { + for scope_span in resource_span.scope_spans { + for span in scope_span.spans { + let span_name = span.name.clone(); + + // Process events in the span + for event in &span.events { + let event_name = event.name.clone(); + let mut event_attrs = std::collections::HashMap::new(); + + // Extract event attributes + for attr in &event.attributes { + let key = attr.key.clone(); + let value = extract_proto_attr_value(&attr.value); + event_attrs.insert(key, value); + } + + // Display step progress based on events + match event_name.as_str() { + "workflow.started" => { + if let Some(total) = event_attrs.get("workflow.total_steps") { + let mut t = tracker.lock().await; + t.total_steps = total.parse().ok(); + + println!( + "\n{} {} {}", + "šŸŽÆ".cyan(), + "WORKFLOW STARTED:".bold().cyan(), + format!("{total} steps").dimmed() + ); + } + } + "step.started" => { + if let Some(tool) = event_attrs.get("step.tool") { + let step_index = event_attrs + .get("step.index") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + let mut t = tracker.lock().await; + t.current_step = step_index + 1; + let total = t.total_steps.unwrap_or(0); + + println!( + " {} Step {}/{}: {} {}", + "ā–¶".blue(), + t.current_step, + total, + tool.yellow(), + "[running...]".dimmed() + ); + } + } + "step.completed" => { + if let Some(status) = event_attrs.get("step.status") { + let icon = if status == "success" { + "āœ“".green() + } else if status == "skipped" { + "ā­".yellow() + } else { + "āœ—".red() + }; + println!(" {icon} Status: {status}"); + } + } + "workflow.completed" => { + let had_errors = event_attrs + .get("workflow.had_errors") + .and_then(|s| s.parse::().ok()) + .unwrap_or(false); + + if had_errors { + println!("\n{} Workflow completed with errors", "⚠".yellow()); + } else { + println!("\n{} Workflow completed successfully", "āœ…".green()); + } + } + _ => {} + } + } + + // Also check span-level attributes for step info + if span_name.starts_with("step.") { + let mut span_attrs = std::collections::HashMap::new(); + for attr in &span.attributes { + let key = attr.key.clone(); + let value = extract_proto_attr_value(&attr.value); + span_attrs.insert(key, value); + } + + if let Some(tool) = span_attrs.get("tool.name") { + let step_num = span_attrs + .get("step.number") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + let step_total = span_attrs + .get("step.total") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0); + + println!( + " {} Step {}/{}: {} {}", + "šŸ“".green(), + step_num, + step_total, + tool.yellow(), + "[executing...]".dimmed() + ); + } + } + } + } + } +} diff --git a/terminator-cli/src/telemetry_receiver.rs b/terminator-cli/src/telemetry_receiver.rs deleted file mode 100644 index 57411f4b..00000000 --- a/terminator-cli/src/telemetry_receiver.rs +++ /dev/null @@ -1,383 +0,0 @@ -// Simple OTLP receiver for capturing workflow telemetry -use anyhow::Result; -use axum::{extract::State, http::StatusCode, response::Json, routing::post, Router}; -use bytes::Bytes; -use colored::Colorize; -use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; -use prost::Message; -use serde_json::json; -use std::sync::Arc; -use tokio::sync::Mutex; -use tokio::task::JoinHandle; - -pub struct TelemetryReceiver { - port: u16, -} - -impl TelemetryReceiver { - pub fn new(port: u16) -> Self { - Self { port } - } - - pub async fn start(self) -> Result> { - let steps_state = Arc::new(Mutex::new(StepsTracker::new())); - - let app = Router::new() - .route("/v1/traces", post(handle_traces)) - .with_state(steps_state); - - let addr = format!("127.0.0.1:{}", self.port); - - let handle = tokio::spawn(async move { - let listener = tokio::net::TcpListener::bind(&addr).await.unwrap(); - axum::serve(listener, app).await.unwrap(); - }); - - // Give it a moment to start - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - Ok(handle) - } -} - -struct StepsTracker { - total_steps: Option, - current_step: usize, -} - -impl StepsTracker { - fn new() -> Self { - Self { - total_steps: None, - current_step: 0, - } - } -} - -async fn handle_traces( - State(steps): State>>, - body: Bytes, -) -> (StatusCode, Json) { - // Try to parse as protobuf first (most common) - if let Ok(request) = ExportTraceServiceRequest::decode(&body[..]) { - process_protobuf_traces(request, steps).await; - } else if let Ok(json_data) = serde_json::from_slice::(&body) { - // Fallback to JSON parsing - process_json_traces(json_data, steps).await; - } - - (StatusCode::OK, Json(json!({"partialSuccess": {}}))) -} - -async fn process_json_traces(data: serde_json::Value, steps: Arc>) { - if let Some(resource_spans) = data.get("resourceSpans").and_then(|v| v.as_array()) { - for resource_span in resource_spans { - if let Some(scope_spans) = resource_span.get("scopeSpans").and_then(|v| v.as_array()) { - for scope_span in scope_spans { - if let Some(spans_array) = scope_span.get("spans").and_then(|v| v.as_array()) { - for span in spans_array { - process_span(span, &steps).await; - } - } - } - } - } - } -} - -async fn process_span(span: &serde_json::Value, tracker: &Arc>) { - let name = span.get("name").and_then(|v| v.as_str()).unwrap_or(""); - - // Parse attributes - let mut attributes = std::collections::HashMap::new(); - if let Some(attrs) = span.get("attributes").and_then(|v| v.as_array()) { - for attr in attrs { - if let (Some(key), Some(value)) = - (attr.get("key").and_then(|v| v.as_str()), attr.get("value")) - { - let val_str = extract_attribute_value(value); - attributes.insert(key.to_string(), val_str); - } - } - } - - // Parse events (step starts/completes) - if let Some(events_array) = span.get("events").and_then(|v| v.as_array()) { - for event in events_array { - if let Some(event_name) = event.get("name").and_then(|v| v.as_str()) { - let mut event_attrs = std::collections::HashMap::new(); - if let Some(attrs) = event.get("attributes").and_then(|v| v.as_array()) { - for attr in attrs { - if let (Some(key), Some(value)) = - (attr.get("key").and_then(|v| v.as_str()), attr.get("value")) - { - let val_str = extract_attribute_value(value); - event_attrs.insert(key.to_string(), val_str); - } - } - } - - // Display step progress - match event_name { - "workflow.started" => { - if let Some(total) = event_attrs.get("workflow.total_steps") { - let mut tracker = tracker.lock().await; - tracker.total_steps = total.parse().ok(); - - println!( - "\n{} {} {}", - "šŸŽÆ".cyan(), - "WORKFLOW STARTED:".bold().cyan(), - format!("{total} steps").dimmed() - ); - } - } - "step.started" => { - if let Some(tool) = event_attrs.get("step.tool") { - let step_index = event_attrs - .get("step.index") - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); - - let mut tracker = tracker.lock().await; - tracker.current_step = step_index + 1; - let total = tracker.total_steps.unwrap_or(0); - - println!( - " {} Step {}/{}: {} {}", - "ā–¶".blue(), - tracker.current_step, - total, - tool.yellow(), - "[running...]".dimmed() - ); - } - } - "step.completed" => { - if let Some(status) = event_attrs.get("step.status") { - let icon = if status == "success" { - "āœ“".green() - } else if status == "skipped" { - "ā­".yellow() - } else { - "āœ—".red() - }; - println!(" {icon} Status: {status}"); - } - } - "workflow.completed" => { - let had_errors = event_attrs - .get("workflow.had_errors") - .and_then(|s| s.parse::().ok()) - .unwrap_or(false); - - if had_errors { - println!("\n{} Workflow completed with errors", "⚠".yellow()); - } else { - println!("\n{} Workflow completed successfully", "āœ…".green()); - } - } - _ => {} - } - } - } - } - - // Handle span-level info - if name.starts_with("workflow.") { - if let Some(total) = attributes.get("workflow.total_steps") { - let mut tracker = tracker.lock().await; - tracker.total_steps = total.parse().ok(); - } - } else if name.starts_with("step.") { - // Step span started - if let Some(tool) = attributes.get("tool.name") { - let step_num = attributes - .get("step.number") - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); - let step_total = attributes - .get("step.total") - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); - - println!( - " {} Step {}/{}: {} {}", - "šŸ“".green(), - step_num, - step_total, - tool.yellow(), - "[executing...]".dimmed() - ); - } - } -} - -fn extract_attribute_value(value: &serde_json::Value) -> String { - if let Some(s) = value.get("stringValue").and_then(|v| v.as_str()) { - s.to_string() - } else if let Some(i) = value.get("intValue").and_then(|v| v.as_i64()) { - i.to_string() - } else if let Some(f) = value.get("doubleValue").and_then(|v| v.as_f64()) { - f.to_string() - } else if let Some(b) = value.get("boolValue").and_then(|v| v.as_bool()) { - b.to_string() - } else { - value.to_string() - } -} - -// Process protobuf traces -async fn process_protobuf_traces( - request: ExportTraceServiceRequest, - tracker: Arc>, -) { - for resource_span in request.resource_spans { - for scope_span in resource_span.scope_spans { - for span in scope_span.spans { - let span_name = span.name.clone(); - - // Process events in the span - for event in &span.events { - let event_name = event.name.clone(); - let mut event_attrs = std::collections::HashMap::new(); - - // Extract event attributes - for attr in &event.attributes { - let key = attr.key.clone(); - let value = extract_proto_attr_value(&attr.value); - event_attrs.insert(key, value); - } - - // Display step progress based on events - match event_name.as_str() { - "workflow.started" => { - if let Some(total) = event_attrs.get("workflow.total_steps") { - let mut t = tracker.lock().await; - t.total_steps = total.parse().ok(); - - println!( - "\n{} {} {}", - "šŸŽÆ".cyan(), - "WORKFLOW STARTED:".bold().cyan(), - format!("{total} steps").dimmed() - ); - } - } - "step.started" => { - if let Some(tool) = event_attrs.get("step.tool") { - let step_index = event_attrs - .get("step.index") - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); - - let mut t = tracker.lock().await; - t.current_step = step_index + 1; - let total = t.total_steps.unwrap_or(0); - - println!( - " {} Step {}/{}: {} {}", - "ā–¶".blue(), - t.current_step, - total, - tool.yellow(), - "[running...]".dimmed() - ); - } - } - "step.completed" => { - if let Some(status) = event_attrs.get("step.status") { - let icon = if status == "success" { - "āœ“".green() - } else if status == "skipped" { - "ā­".yellow() - } else { - "āœ—".red() - }; - println!(" {icon} Status: {status}"); - } - } - "workflow.completed" => { - let had_errors = event_attrs - .get("workflow.had_errors") - .and_then(|s| s.parse::().ok()) - .unwrap_or(false); - - if had_errors { - println!("\n{} Workflow completed with errors", "⚠".yellow()); - } else { - println!("\n{} Workflow completed successfully", "āœ…".green()); - } - } - _ => {} - } - } - - // Also check span-level attributes for step info - if span_name.starts_with("step.") { - let mut span_attrs = std::collections::HashMap::new(); - for attr in &span.attributes { - let key = attr.key.clone(); - let value = extract_proto_attr_value(&attr.value); - span_attrs.insert(key, value); - } - - if let Some(tool) = span_attrs.get("tool.name") { - let step_num = span_attrs - .get("step.number") - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); - let step_total = span_attrs - .get("step.total") - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); - - println!( - " {} Step {}/{}: {} {}", - "šŸ“".green(), - step_num, - step_total, - tool.yellow(), - "[executing...]".dimmed() - ); - } - } - } - } - } -} - -// Extract value from protobuf attribute -fn extract_proto_attr_value( - value: &Option, -) -> String { - if let Some(val) = value { - if let Some(v) = &val.value { - match v { - opentelemetry_proto::tonic::common::v1::any_value::Value::StringValue(s) => { - s.clone() - } - opentelemetry_proto::tonic::common::v1::any_value::Value::IntValue(i) => { - i.to_string() - } - opentelemetry_proto::tonic::common::v1::any_value::Value::DoubleValue(f) => { - f.to_string() - } - opentelemetry_proto::tonic::common::v1::any_value::Value::BoolValue(b) => { - b.to_string() - } - _ => String::new(), - } - } else { - String::new() - } - } else { - String::new() - } -} - -// Start the telemetry receiver -pub async fn start_telemetry_receiver() -> Result> { - let receiver = TelemetryReceiver::new(4318); - receiver.start().await -} diff --git a/terminator-cli/src/utils.rs b/terminator-cli/src/utils.rs new file mode 100644 index 00000000..e2318fd6 --- /dev/null +++ b/terminator-cli/src/utils.rs @@ -0,0 +1,127 @@ +use tokio::process::Command; + +/// Check if the path is a Windows batch file +pub fn is_batch_file(path: &str) -> bool { + path.ends_with(".bat") || path.ends_with(".cmd") +} + +/// Create command with proper handling for batch files on Windows +pub fn create_command(executable: &str, args: &[String]) -> Command { + let mut cmd = if cfg!(windows) && is_batch_file(executable) { + // For batch files on Windows, use cmd.exe /c + let mut cmd = Command::new("cmd"); + cmd.arg("/c"); + cmd.arg(executable); + cmd + } else { + Command::new(executable) + }; + + if !args.is_empty() { + cmd.args(args); + } + + cmd +} + +/// Find executable with cross-platform path resolution +pub fn find_executable(name: &str) -> Option { + use std::env; + use std::path::Path; + + // On Windows, try multiple extensions, prioritizing executable types + let candidates = if cfg!(windows) { + vec![ + format!("{}.exe", name), + format!("{}.cmd", name), + format!("{}.bat", name), + name.to_string(), + ] + } else { + vec![name.to_string()] + }; + + // Check each candidate in PATH + if let Ok(path_var) = env::var("PATH") { + let separator = if cfg!(windows) { ";" } else { ":" }; + + for path_dir in path_var.split(separator) { + let path_dir = Path::new(path_dir); + + for candidate in &candidates { + let full_path = path_dir.join(candidate); + if full_path.exists() && full_path.is_file() { + return Some(full_path.to_string_lossy().to_string()); + } + } + } + } + + // Fallback: try the name as-is (might work on some systems) + Some(name.to_string()) +} + +pub fn init_logging() { + use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + + let _ = tracing_subscriber::registry() + .with( + // Respect RUST_LOG if provided, else default to info + tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info".into()), + ) + .with(tracing_subscriber::fmt::layer()) + .try_init(); +} + +// Helper function to parse step start logs +#[allow(dead_code)] +fn parse_step_log(line: &str) -> Option<(String, String, String)> { + // Parse lines like: "Step 0 BEGIN tool='open_application' id='open_notepad' ..." + if let Some(step_idx) = line.find("Step ") { + let after_step = &line[step_idx + 5..]; + if let Some(space_idx) = after_step.find(' ') { + let step_num = &after_step[..space_idx]; + if let Some(tool_idx) = line.find("tool='") { + let after_tool = &line[tool_idx + 6..]; + if let Some(quote_idx) = after_tool.find('\'') { + let tool_name = &after_tool[..quote_idx]; + return Some(( + step_num.to_string(), + "?".to_string(), // We don't have total from logs + tool_name.to_string(), + )); + } + } else if let Some(group_idx) = line.find("group='") { + let after_group = &line[group_idx + 7..]; + if let Some(quote_idx) = after_group.find('\'') { + let group_name = &after_group[..quote_idx]; + return Some(( + step_num.to_string(), + "?".to_string(), + format!("[{group_name}]"), + )); + } + } + } + } + None +} + +// Helper function to parse step end logs +#[allow(dead_code)] +fn parse_step_end_log(line: &str) -> Option<(String, String)> { + // Parse lines like: "Step 0 END tool='open_application' id='open_notepad' status=success" + if let Some(step_idx) = line.find("Step ") { + let after_step = &line[step_idx + 5..]; + if let Some(space_idx) = after_step.find(' ') { + let step_num = &after_step[..space_idx]; + if let Some(status_idx) = line.find("status=") { + let after_status = &line[status_idx + 7..]; + let status = after_status.split_whitespace().next().unwrap_or("unknown"); + return Some((step_num.to_string(), status.to_string())); + } + } + } + None +} + diff --git a/terminator-cli/src/version_control.rs b/terminator-cli/src/version_control.rs new file mode 100644 index 00000000..c77c5430 --- /dev/null +++ b/terminator-cli/src/version_control.rs @@ -0,0 +1,557 @@ +use std::fs; +use std::env; +use std::path::Path; +use std::process::Command; +use crate::command::run_command; + +pub fn full_release(bump_type: &str) { + println!("šŸš€ Starting full release process with {bump_type} bump..."); + bump_version(bump_type); + tag_and_push(); +} + +pub fn ensure_project_root() { + // Check if we're already in the project root + if Path::new("Cargo.toml").exists() && Path::new("terminator").exists() { + return; + } + + // If we're in terminator-cli, go up one level + if env::current_dir() + .map(|p| { + p.file_name() + .map(|n| n == "terminator-cli") + .unwrap_or(false) + }) + .unwrap_or(false) + && env::set_current_dir("..").is_err() + { + eprintln!("āŒ Failed to change to project root directory"); + std::process::exit(1); + } + + // Final check + if !Path::new("Cargo.toml").exists() || !Path::new("terminator").exists() { + eprintln!("āŒ Not in Terminator project root. Please run from workspace root."); + eprintln!("šŸ’” Usage: terminator "); + std::process::exit(1); + } +} + +pub fn parse_version(version: &str) -> Result<(u32, u32, u32), Box> { + let parts: Vec<&str> = version.split('.').collect(); + if parts.len() != 3 { + return Err("Invalid version format".into()); + } + + let major = parts[0].parse::()?; + let minor = parts[1].parse::()?; + let patch = parts[2].parse::()?; + + Ok((major, minor, patch)) +} + +pub fn tag_and_push() { + let version = match get_workspace_version() { + Ok(v) => v, + Err(e) => { + eprintln!("āŒ Failed to get current version: {e}"); + return; + } + }; + + println!("šŸ·ļø Tagging and pushing version {version}..."); + + // Check for uncommitted changes + if let Ok(output) = Command::new("git").args(["diff", "--name-only"]).output() { + let diff = String::from_utf8_lossy(&output.stdout); + if !diff.trim().is_empty() { + println!("āš ļø Uncommitted changes detected. Committing..."); + if let Err(e) = run_command("git", &["add", "."]) { + eprintln!("āŒ Failed to git add: {e}"); + return; + } + if let Err(e) = run_command( + "git", + &["commit", "-m", &format!("Bump version to {version}")], + ) { + eprintln!("āŒ Failed to git commit: {e}"); + return; + } + } + } + + // Create tag + let tag = format!("v{version}"); + if let Err(e) = run_command( + "git", + &[ + "tag", + "-a", + &tag, + "-m", + &format!("Release version {version}"), + ], + ) { + eprintln!("āŒ Failed to create tag: {e}"); + return; + } + + // Push changes and tag + if let Err(e) = run_command("git", &["push", "origin", "main"]) { + eprintln!("āŒ Failed to push changes: {e}"); + return; + } + + if let Err(e) = run_command("git", &["push", "origin", &tag]) { + eprintln!("āŒ Failed to push tag: {e}"); + return; + } + + println!("āœ… Successfully released version {version}!"); + println!("šŸ”— Check CI: https://github.com/mediar-ai/terminator/actions"); +} + +pub fn sync_browser_extension(version: &str) { + println!("šŸ“¦ Syncing browser extension to version {version}..."); + + let ext_dir = Path::new("terminator/browser-extension"); + if !ext_dir.exists() { + println!("āš ļø Browser extension directory not found, skipping"); + return; + } + + let manifest_path = ext_dir.join("manifest.json"); + if manifest_path.exists() { + if let Err(e) = update_json_version(&manifest_path.to_string_lossy(), version) { + eprintln!( + "āš ļø Warning: Failed to update {}: {}", + manifest_path.display(), + e + ); + } else { + println!("āœ… Updated manifest.json to {version}"); + } + } + + let build_check_path = ext_dir.join("build_check.json"); + if build_check_path.exists() { + if let Err(e) = update_json_version(&build_check_path.to_string_lossy(), version) { + eprintln!( + "āš ļø Warning: Failed to update {}: {}", + build_check_path.display(), + e + ); + } else { + println!("āœ… Updated build_check.json to {version}"); + } + } +} + +pub fn update_package_json(path: &str, version: &str) -> Result<(), Box> { + let content = fs::read_to_string(path)?; + let mut pkg: serde_json::Value = serde_json::from_str(&content)?; + + // Update main version + pkg["version"] = serde_json::Value::String(version.to_string()); + + // Update optional dependencies that start with terminator-mcp- or terminator.js- + if let Some(deps) = pkg + .get_mut("optionalDependencies") + .and_then(|v| v.as_object_mut()) + { + for (key, value) in deps.iter_mut() { + if key.starts_with("terminator-mcp-") || key.starts_with("terminator.js-") { + *value = serde_json::Value::String(version.to_string()); + } + } + } + + // Write back with pretty formatting + let formatted = serde_json::to_string_pretty(&pkg)?; + fs::write(path, formatted + "\n")?; + + Ok(()) +} + +pub fn update_json_version(path: &str, version: &str) -> Result<(), Box> { + let content = fs::read_to_string(path)?; + let mut json_value: serde_json::Value = serde_json::from_str(&content)?; + + json_value["version"] = serde_json::Value::String(version.to_string()); + + let formatted = serde_json::to_string_pretty(&json_value)?; + fs::write(path, formatted + "\n")?; + + Ok(()) +} + +pub fn show_status() { + println!("šŸ“Š Terminator Project Status"); + println!("============================"); + + let workspace_version = get_workspace_version().unwrap_or_else(|_| "ERROR".to_string()); + println!("šŸ“¦ Workspace version: {workspace_version}"); + + // Show package versions + let nodejs_version = get_package_version("bindings/nodejs/package.json"); + let mcp_version = get_package_version("terminator-mcp-agent/package.json"); + let browser_extension_version = + get_package_version("terminator/browser-extension/manifest.json"); + + println!(); + println!("Package versions:"); + println!(" Node.js bindings: {nodejs_version}"); + println!(" MCP agent: {mcp_version}"); + println!(" Browser extension:{browser_extension_version}"); + + // Git status + println!(); + println!("Git status:"); + if let Ok(output) = Command::new("git").args(["status", "--porcelain"]).output() { + let status = String::from_utf8_lossy(&output.stdout); + if status.trim().is_empty() { + println!(" āœ… Working directory clean"); + } else { + println!(" āš ļø Uncommitted changes:"); + for line in status.lines().take(5) { + println!(" {line}"); + } + } + } +} + +pub fn get_package_version(path: &str) -> String { + match fs::read_to_string(path) { + Ok(content) => match serde_json::from_str::(&content) { + Ok(pkg) => pkg + .get("version") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + .unwrap_or_else(|| "No version field".to_string()), + Err(_) => "Parse error".to_string(), + }, + Err(_) => "Not found".to_string(), + } +} + +pub fn sync_mcp_agent(version: &str) { + println!("šŸ“¦ Syncing MCP agent..."); + + let mcp_dir = Path::new("terminator-mcp-agent"); + if !mcp_dir.exists() { + return; + } + + // Update main package.json + if let Err(e) = update_package_json("terminator-mcp-agent/package.json", version) { + eprintln!("āš ļø Warning: Failed to update MCP agent package.json: {e}"); + return; + } + + // Update platform packages + let npm_dir = mcp_dir.join("npm"); + if npm_dir.exists() { + if let Ok(entries) = fs::read_dir(npm_dir) { + for entry in entries.flatten() { + if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { + let package_json = entry.path().join("package.json"); + if package_json.exists() { + if let Err(e) = + update_package_json(&package_json.to_string_lossy(), version) + { + eprintln!( + "āš ļø Warning: Failed to update {}: {}", + entry.path().display(), + e + ); + } else { + println!("šŸ“¦ Updated {}", entry.file_name().to_string_lossy()); + } + } + } + } + } + } + + // Update package-lock.json + let original_dir = match env::current_dir() { + Ok(dir) => dir, + Err(e) => { + eprintln!("āŒ Could not get current directory: {e}"); + return; + } + }; + + if env::set_current_dir(mcp_dir).is_ok() { + if run_command("npm", &["install", "--package-lock-only", "--silent"]).is_ok() { + println!("āœ… MCP package-lock.json updated."); + } else { + eprintln!("āš ļø Warning: Failed to update MCP agent package-lock.json"); + } + // Always change back to the original directory + if let Err(e) = env::set_current_dir(&original_dir) { + eprintln!("āŒ Failed to restore original directory: {e}"); + std::process::exit(1); + } + } + + println!("āœ… MCP agent synced"); +} + +pub fn sync_nodejs_bindings(version: &str) { + println!("šŸ“¦ Syncing Node.js bindings to version {version}..."); + + let nodejs_dir = Path::new("bindings/nodejs"); + if !nodejs_dir.exists() { + println!("āš ļø Node.js bindings directory not found, skipping"); + return; + } + + // Update main package.json directly + if let Err(e) = update_package_json("bindings/nodejs/package.json", version) { + eprintln!("āš ļø Warning: Failed to update Node.js package.json directly: {e}"); + } else { + println!("āœ… Updated Node.js package.json to {version}"); + } + + // ALSO update CPU/platform-specific packages under bindings/nodejs/npm + let npm_dir = nodejs_dir.join("npm"); + if npm_dir.exists() { + if let Ok(entries) = fs::read_dir(&npm_dir) { + for entry in entries.flatten() { + if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { + let package_json = entry.path().join("package.json"); + if package_json.exists() { + if let Err(e) = + update_package_json(&package_json.to_string_lossy(), version) + { + eprintln!( + "āš ļø Warning: Failed to update {}: {}", + package_json.display(), + e + ); + } else { + println!("šŸ“¦ Updated {}", entry.file_name().to_string_lossy()); + } + } + } + } + } + } + + // Run sync script if it exists (still useful for additional tasks like N-API metadata) + let original_dir = match env::current_dir() { + Ok(dir) => dir, + Err(e) => { + eprintln!("āŒ Could not get current directory: {e}"); + return; + } + }; + + if env::set_current_dir(nodejs_dir).is_ok() { + println!("šŸ”„ Running npm run sync-version..."); + if run_command("npm", &["run", "sync-version"]).is_ok() { + println!("āœ… Node.js sync script completed"); + } else { + eprintln!("āš ļø Warning: npm run sync-version failed"); + } + // Always change back to the original directory + if let Err(e) = env::set_current_dir(&original_dir) { + eprintln!("āŒ Failed to restore original directory: {e}"); + std::process::exit(1); // Exit if we can't get back, to avoid further errors + } + } else { + eprintln!("āš ļø Warning: Could not switch to Node.js directory"); + } +} + +pub fn sync_all_versions() { + println!("šŸ”„ Syncing all package versions..."); + + // First, sync versions within Cargo.toml + if let Err(e) = sync_cargo_versions() { + eprintln!("āŒ Failed to sync versions in Cargo.toml: {e}"); + return; + } + + let workspace_version = match get_workspace_version() { + Ok(v) => v, + Err(e) => { + eprintln!("āŒ Failed to get workspace version: {e}"); + return; + } + }; + + println!("šŸ“¦ Workspace version: {workspace_version}"); + + // Sync Node.js bindings + sync_nodejs_bindings(&workspace_version); + + // Sync MCP agent + sync_mcp_agent(&workspace_version); + + // Sync Browser Extension + sync_browser_extension(&workspace_version); + + // Update Cargo.lock + println!("šŸ”’ Updating Cargo.lock..."); + if let Err(e) = run_command("cargo", &["check", "--quiet"]) { + eprintln!("āš ļø Warning: Failed to update Cargo.lock: {e}"); + } + + println!("āœ… All versions synchronized!"); +} + +pub fn bump_version(bump_type: &str) { + println!("šŸ”„ Bumping {bump_type} version..."); + + let current_version = match get_workspace_version() { + Ok(v) => v, + Err(e) => { + eprintln!("āŒ Failed to get current version: {e}"); + return; + } + }; + + let (major, minor, patch) = match parse_version(¤t_version) { + Ok(v) => v, + Err(e) => { + eprintln!("āŒ Failed to parse version {current_version}: {e}"); + return; + } + }; + + let new_version = match bump_type { + "patch" => format!("{}.{}.{}", major, minor, patch + 1), + "minor" => format!("{}.{}.0", major, minor + 1), + "major" => format!("{}.0.0", major + 1), + _ => { + eprintln!("āŒ Invalid bump type: {bump_type}"); + return; + } + }; + + println!("šŸ“ {current_version} → {new_version}"); + + if let Err(e) = set_workspace_version(&new_version) { + eprintln!("āŒ Failed to update workspace version: {e}"); + return; + } + + println!("āœ… Updated workspace version to {new_version}"); + sync_all_versions(); +} + +pub fn set_workspace_version(new_version: &str) -> Result<(), Box> { + let cargo_toml = fs::read_to_string("Cargo.toml")?; + let mut lines: Vec = cargo_toml.lines().map(|s| s.to_string()).collect(); + let mut in_workspace_package = false; + let mut package_version_updated = false; + + let tmp = 0..lines.len(); + for i in tmp { + let line = &lines[i]; + let trimmed_line = line.trim(); + + if trimmed_line.starts_with('[') { + in_workspace_package = trimmed_line == "[workspace.package]"; + continue; + } + + if in_workspace_package && trimmed_line.starts_with("version =") { + let indentation = line.len() - line.trim_start().len(); + lines[i] = format!("{}version = \"{}\"", " ".repeat(indentation), new_version); + package_version_updated = true; + break; // Exit after finding and updating the version + } + } + + if !package_version_updated { + return Err("version key not found in [workspace.package] in Cargo.toml".into()); + } + + fs::write("Cargo.toml", lines.join("\n") + "\n")?; + Ok(()) +} + +pub fn get_workspace_version() -> Result> { + let cargo_toml = fs::read_to_string("Cargo.toml")?; + let mut in_workspace_package = false; + + for line in cargo_toml.lines() { + let trimmed_line = line.trim(); + if trimmed_line == "[workspace.package]" { + in_workspace_package = true; + continue; + } + + if in_workspace_package { + if trimmed_line.starts_with('[') { + // We've left the workspace.package section + break; + } + if trimmed_line.starts_with("version") { + if let Some(version_part) = trimmed_line.split('=').nth(1) { + if let Some(version) = version_part.trim().split('"').nth(1) { + return Ok(version.to_string()); + } + } + } + } + } + + Err("Version not found in [workspace.package] in Cargo.toml".into()) +} + +pub fn sync_cargo_versions() -> Result<(), Box> { + println!("šŸ“¦ Syncing Cargo.toml dependency versions..."); + let workspace_version = get_workspace_version()?; + + let cargo_toml = fs::read_to_string("Cargo.toml")?; + let mut lines: Vec = cargo_toml.lines().map(|s| s.to_string()).collect(); + let mut in_workspace_deps = false; + let mut deps_version_updated = false; + + let tmp = 0..lines.len(); + for i in tmp { + let line = &lines[i]; + let trimmed_line = line.trim(); + + if trimmed_line.starts_with('[') { + in_workspace_deps = trimmed_line == "[workspace.dependencies]"; + continue; + } + + if in_workspace_deps && trimmed_line.starts_with("terminator =") { + let line_clone = line.clone(); + if let Some(start) = line_clone.find("version = \"") { + let version_start = start + "version = \"".len(); + if let Some(end_quote_offset) = line_clone[version_start..].find('"') { + let range = version_start..(version_start + end_quote_offset); + if &line_clone[range.clone()] != workspace_version.as_str() { + lines[i].replace_range(range, &workspace_version); + println!( + "āœ… Updated 'terminator' dependency version to {workspace_version}." + ); + deps_version_updated = true; + } else { + println!("āœ… 'terminator' dependency version is already up to date."); + deps_version_updated = true; // Mark as done + } + } + } + break; // Assume only one terminator dependency to update + } + } + + if deps_version_updated { + fs::write("Cargo.toml", lines.join("\n") + "\n")?; + } else { + eprintln!( + "āš ļø Warning: Could not find 'terminator' in [workspace.dependencies] to sync version." + ); + } + Ok(()) +} + diff --git a/terminator-cli/src/workflow_exec/cron.rs b/terminator-cli/src/workflow_exec/cron.rs new file mode 100644 index 00000000..0d81b82f --- /dev/null +++ b/terminator-cli/src/workflow_exec/cron.rs @@ -0,0 +1,35 @@ +use serde_json::Value; + +/// Extract cron expression from workflow YAML +pub fn extract_cron_from_workflow(workflow: &Value) -> Option { + // Primary format: cron field at root level (simpler format) + if let Some(cron) = workflow.get("cron") { + if let Some(cron_str) = cron.as_str() { + return Some(cron_str.to_string()); + } + } + + // Alternative: GitHub Actions style: on.schedule.cron + if let Some(on) = workflow.get("on") { + if let Some(schedule) = on.get("schedule") { + // Handle both single cron and array of crons + if let Some(cron_array) = schedule.as_array() { + // If it's an array, take the first cron expression + if let Some(first_schedule) = cron_array.first() { + if let Some(cron) = first_schedule.get("cron") { + if let Some(cron_str) = cron.as_str() { + return Some(cron_str.to_string()); + } + } + } + } else if let Some(cron) = schedule.get("cron") { + // Handle single cron expression + if let Some(cron_str) = cron.as_str() { + return Some(cron_str.to_string()); + } + } + } + } + + None +} diff --git a/terminator-cli/src/workflow_exec/exec.rs b/terminator-cli/src/workflow_exec/exec.rs new file mode 100644 index 00000000..3f579fe5 --- /dev/null +++ b/terminator-cli/src/workflow_exec/exec.rs @@ -0,0 +1,491 @@ +use anyhow::Result; +use serde_json::json; +use rmcp::{ + ServiceExt, + transport::{StreamableHttpClientTransport, TokioChildProcess}, + model::{CallToolRequestParam, + ClientCapabilities, ClientInfo, Implementation}, +}; +use tracing::info; +use tokio::time::sleep; +use std::time::Duration; +use super::workflow::Transport; +use crate::{utils, telemetry::process}; + + +#[allow(dead_code)] +pub async fn execute_command_with_result( + transport: Transport, + tool: String, + args: Option, +) -> Result { + execute_command_with_progress(transport, tool, args, false).await +} + +pub async fn execute_command_with_progress( + transport: Transport, + tool: String, + args: Option, + show_progress: bool, +) -> Result { + execute_command_with_progress_and_retry(transport, tool, args, show_progress, false).await +} + +pub async fn execute_command_with_progress_and_retry( + transport: Transport, + tool: String, + args: Option, + show_progress: bool, + no_retry: bool, +) -> Result { + use colored::Colorize; + use tracing::debug; + + // Start telemetry receiver if showing progress for workflows + let telemetry_handle = if show_progress && tool == "execute_sequence" { + match process::start_telemetry_receiver().await { + Ok(handle) => { + debug!("Started telemetry receiver on port 4318"); + Some(handle) + } + Err(e) => { + debug!("Failed to start telemetry receiver: {}", e); + None + } + } + } else { + None + }; + + // Special handling for execute_sequence to capture full result + if tool == "execute_sequence" { + match transport { + Transport::Http(url) => { + debug!("Connecting to server: {}", url); + let transport = StreamableHttpClientTransport::from_uri(url.as_str()); + let client_info = ClientInfo { + protocol_version: Default::default(), + capabilities: ClientCapabilities::default(), + client_info: Implementation { + name: "terminator-cli".to_string(), + version: env!("CARGO_PKG_VERSION").to_string(), + }, + }; + + // Connection setup - no retry here as StreamableHttpClientTransport doesn't support cloning + // Retries will be handled at the tool call level + let service = client_info.serve(transport).await?; + + let arguments = if let Some(args_str) = args { + serde_json::from_str::(&args_str) + .ok() + .and_then(|v| v.as_object().cloned()) + } else { + None + }; + + // Parse workflow to get step count if showing progress + if show_progress { + if let Some(args_obj) = &arguments { + if let Some(steps) = args_obj.get("steps").and_then(|v| v.as_array()) { + let total_steps = steps.len(); + println!( + "\n{} {} {}", + "šŸŽÆ".cyan(), + "WORKFLOW START:".bold().cyan(), + format!("{total_steps} steps").dimmed() + ); + + // List the steps that will be executed + for (i, step) in steps.iter().enumerate() { + let tool_name = step + .get("tool_name") + .and_then(|v| v.as_str()) + .or_else(|| step.get("group_name").and_then(|v| v.as_str())) + .unwrap_or("unknown"); + let step_id = step.get("id").and_then(|v| v.as_str()).unwrap_or(""); + + println!( + " {} Step {}/{}: {} {}", + "šŸ“‹".dimmed(), + i + 1, + total_steps, + tool_name.yellow(), + if !step_id.is_empty() { + format!("[{step_id}]").dimmed().to_string() + } else { + String::new() + } + ); + } + println!("\n{} Executing workflow...\n", "⚔".cyan()); + } + } + } + + // Retry logic for tool execution + let mut retry_count = 0; + let max_retries = if no_retry { 0 } else { 3 }; + let mut _last_error = None; + + let result = loop { + match service + .call_tool(CallToolRequestParam { + name: tool.clone().into(), + arguments: arguments.clone(), + }) + .await + { + Ok(res) => break res, + Err(e) => { + let error_str = e.to_string(); + let is_retryable = error_str.contains("401") + || error_str.contains("Unauthorized") + || error_str.contains("500") + || error_str.contains("502") + || error_str.contains("503") + || error_str.contains("504") + || error_str.contains("timeout"); + + if is_retryable && retry_count < max_retries { + retry_count += 1; + let delay = Duration::from_secs(2u64.pow(retry_count)); + eprintln!("āš ļø Tool execution failed: {}. Retrying in {} seconds... (attempt {}/{})", + error_str, delay.as_secs(), retry_count, max_retries); + sleep(delay).await; + _last_error = Some(e); + } else { + return Err(e.into()); + } + } + } + }; + + // Parse the result content as JSON + if !result.content.is_empty() { + for content in &result.content { + if let rmcp::model::RawContent::Text(text) = &content.raw { + // Try to parse as JSON + if let Ok(json_result) = + serde_json::from_str::(&text.text) + { + service.cancel().await?; + + // Stop telemetry receiver if it was started + if let Some(handle) = telemetry_handle { + handle.abort(); + } + + return Ok(json_result); + } + } + } + } + + service.cancel().await?; + + // Stop telemetry receiver if it was started + if let Some(handle) = telemetry_handle { + handle.abort(); + } + + Ok(json!({"status": "unknown", "message": "No parseable result from workflow"})) + } + Transport::Stdio(command) => { + debug!("Starting MCP server: {}", command.join(" ")); + let executable = utils::find_executable(&command[0]).unwrap_or_else(|| command[0].clone()); + let command_args: Vec = if command.len() > 1 { + command[1..].to_vec() + } else { + vec![] + }; + let mut cmd = utils::create_command(&executable, &command_args); + + // Set up logging for the server to capture step progress + if std::env::var("LOG_LEVEL").is_err() && std::env::var("RUST_LOG").is_err() { + if show_progress { + // Enable info level logging to see step progress + cmd.env("RUST_LOG", "terminator_mcp_agent=info"); + } else { + cmd.env("LOG_LEVEL", "info"); + } + } + + // Enable telemetry if showing progress + if show_progress { + cmd.env("OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4318"); + cmd.env("OTEL_SERVICE_NAME", "terminator-mcp"); + cmd.env("ENABLE_TELEMETRY", "true"); + } + + // For now, just use the standard transport without stderr parsing + // TODO: Add proper step streaming once MCP protocol supports it + let transport = TokioChildProcess::new(cmd)?; + let service = ().serve(transport).await?; + + let arguments = if let Some(args_str) = args { + // Parse workflow to show initial progress + if show_progress { + if let Ok(workflow) = serde_json::from_str::(&args_str) { + if let Some(steps) = workflow.get("steps").and_then(|v| v.as_array()) { + let total_steps = steps.len(); + println!( + "\n{} {} {}", + "šŸŽÆ".cyan(), + "WORKFLOW START:".bold().cyan(), + format!("{total_steps} steps").dimmed() + ); + + // List the steps that will be executed + for (i, step) in steps.iter().enumerate() { + let tool_name = step + .get("tool_name") + .and_then(|v| v.as_str()) + .or_else(|| step.get("group_name").and_then(|v| v.as_str())) + .unwrap_or("unknown"); + let step_id = + step.get("id").and_then(|v| v.as_str()).unwrap_or(""); + + println!( + " {} Step {}/{}: {} {}", + "šŸ“‹".dimmed(), + i + 1, + total_steps, + tool_name.yellow(), + if !step_id.is_empty() { + format!("[{step_id}]").dimmed().to_string() + } else { + String::new() + } + ); + } + println!("\n{} Executing workflow...\n", "⚔".cyan()); + } + } + } + + serde_json::from_str::(&args_str) + .ok() + .and_then(|v| v.as_object().cloned()) + } else { + None + }; + + // Retry logic for tool execution (stdio) + let mut retry_count = 0; + let max_retries = if no_retry { 0 } else { 3 }; + let mut _last_error = None; + + let result = loop { + match service + .call_tool(CallToolRequestParam { + name: tool.clone().into(), + arguments: arguments.clone(), + }) + .await + { + Ok(res) => break res, + Err(e) => { + let error_str = e.to_string(); + let is_retryable = error_str.contains("401") + || error_str.contains("Unauthorized") + || error_str.contains("500") + || error_str.contains("502") + || error_str.contains("503") + || error_str.contains("504") + || error_str.contains("timeout"); + + if is_retryable && retry_count < max_retries { + retry_count += 1; + let delay = Duration::from_secs(2u64.pow(retry_count)); + eprintln!("āš ļø Tool execution failed: {}. Retrying in {} seconds... (attempt {}/{})", + error_str, delay.as_secs(), retry_count, max_retries); + sleep(delay).await; + _last_error = Some(e); + } else { + return Err(e.into()); + } + } + } + }; + + // Parse the result content as JSON + if !result.content.is_empty() { + for content in &result.content { + if let rmcp::model::RawContent::Text(text) = &content.raw { + // Try to parse as JSON + if let Ok(json_result) = + serde_json::from_str::(&text.text) + { + service.cancel().await?; + + // Stop telemetry receiver if it was started + if let Some(handle) = telemetry_handle { + handle.abort(); + } + + return Ok(json_result); + } + } + } + } + + service.cancel().await?; + + // Stop telemetry receiver if it was started + if let Some(handle) = telemetry_handle { + handle.abort(); + } + + Ok(json!({"status": "unknown", "message": "No parseable result from workflow"})) + } + } + } else { + // For other tools, just execute normally + execute_command(transport, tool.clone(), args).await?; + Ok(json!({"status": "success", "message": format!("Tool {} executed", tool)})) + } +} + +pub async fn execute_command( + transport: Transport, + tool: String, + args: Option, +) -> Result<()> { + // Initialize logging for non-interactive mode + utils::init_logging(); + + match transport { + Transport::Http(url) => { + info!("Connecting to server: {}", url); + let transport = StreamableHttpClientTransport::from_uri(url.as_str()); + let client_info = ClientInfo { + protocol_version: Default::default(), + capabilities: ClientCapabilities::default(), + client_info: Implementation { + name: "terminator-cli".to_string(), + version: env!("CARGO_PKG_VERSION").to_string(), + }, + }; + let service = client_info.serve(transport).await?; + + let arguments = if let Some(args_str) = args { + serde_json::from_str::(&args_str) + .ok() + .and_then(|v| v.as_object().cloned()) + } else { + None + }; + + println!( + "⚔ Calling {} with args: {}", + tool, + arguments + .as_ref() + .map(|a| serde_json::to_string(a).unwrap_or_default()) + .unwrap_or_else(|| "{}".to_string()) + ); + + let result = service + .call_tool(CallToolRequestParam { + name: tool.into(), + arguments, + }) + .await?; + + println!("āœ… Result:"); + if !result.content.is_empty() { + for content in &result.content { + match &content.raw { + rmcp::model::RawContent::Text(text) => { + println!("{}", text.text); + } + rmcp::model::RawContent::Image(image) => { + println!("[Image: {}]", image.mime_type); + } + rmcp::model::RawContent::Resource(resource) => { + println!("[Resource: {:?}]", resource.resource); + } + rmcp::model::RawContent::Audio(audio) => { + println!("[Audio: {}]", audio.mime_type); + } + rmcp::model::RawContent::ResourceLink(resource) => { + println!("[ResourceLink: {resource:?}]"); + } + } + } + } + + // Cancel the service connection + service.cancel().await?; + } + Transport::Stdio(command) => { + info!("Starting MCP server: {}", command.join(" ")); + let executable = utils::find_executable(&command[0]).unwrap_or_else(|| command[0].clone()); + let command_args: Vec = if command.len() > 1 { + command[1..].to_vec() + } else { + vec![] + }; + let mut cmd = utils::create_command(&executable, &command_args); + // Default server log level to info if not provided by the user + if std::env::var("LOG_LEVEL").is_err() && std::env::var("RUST_LOG").is_err() { + cmd.env("LOG_LEVEL", "info"); + } + let transport = TokioChildProcess::new(cmd)?; + let service = ().serve(transport).await?; + + let arguments = if let Some(args_str) = args { + serde_json::from_str::(&args_str) + .ok() + .and_then(|v| v.as_object().cloned()) + } else { + None + }; + + println!( + "⚔ Calling {} with args: {}", + tool, + arguments + .as_ref() + .map(|a| serde_json::to_string(a).unwrap_or_default()) + .unwrap_or_else(|| "{}".to_string()) + ); + + let result = service + .call_tool(CallToolRequestParam { + name: tool.into(), + arguments, + }) + .await?; + + println!("āœ… Result:"); + if !result.content.is_empty() { + for content in &result.content { + match &content.raw { + rmcp::model::RawContent::Text(text) => { + println!("{}", text.text); + } + rmcp::model::RawContent::Image(image) => { + println!("[Image: {}]", image.mime_type); + } + rmcp::model::RawContent::Resource(resource) => { + println!("[Resource: {:?}]", resource.resource); + } + rmcp::model::RawContent::Audio(audio) => { + println!("[Audio: {}]", audio.mime_type); + } + rmcp::model::RawContent::ResourceLink(resource) => { + println!("[ResourceLink: {resource:?}]"); + } + } + } + } + + // Cancel the service connection + service.cancel().await?; + } + } + Ok(()) +} + diff --git a/terminator-cli/src/workflow_exec/input.rs b/terminator-cli/src/workflow_exec/input.rs new file mode 100644 index 00000000..754565c6 --- /dev/null +++ b/terminator-cli/src/workflow_exec/input.rs @@ -0,0 +1,68 @@ +use crate::cli::InputType; + +pub fn determine_input_type(input: &str, specified_type: InputType) -> InputType { + match specified_type { + InputType::Auto => { + if input.starts_with("https://gist.github.com/") { + InputType::Gist + } else if input.starts_with("https://gist.githubusercontent.com/") + || input.starts_with("http://") + || input.starts_with("https://") + { + InputType::Raw + } else { + InputType::File + } + } + other => other, + } +} + +pub fn convert_gist_to_raw_url(gist_url: &str) -> anyhow::Result { + if !gist_url.starts_with("https://gist.github.com/") { + return Err(anyhow::anyhow!("Invalid GitHub gist URL format")); + } + + let raw_url = gist_url.replace( + "https://gist.github.com/", + "https://gist.githubusercontent.com/", + ); + + Ok(if raw_url.ends_with("/raw") { + raw_url + } else { + format!("{raw_url}/raw") + }) +} + +pub async fn read_local_file(path: &str) -> anyhow::Result { + use std::path::Path; + use tokio::fs; + + let p = Path::new(path); + if !p.exists() { + return Err(anyhow::anyhow!("File not found: {}", p.display())); + } + if !p.is_file() { + return Err(anyhow::anyhow!("Not a file: {}", p.display())); + } + + fs::read_to_string(p).await.map_err(|e| e.into()) +} + +pub async fn fetch_remote_content(url: &str) -> anyhow::Result { + let client = reqwest::Client::new(); + let res = client + .get(url) + .header("User-Agent", "terminator-cli-workflow/1.0") + .send() + .await?; + if !res.status().is_success() { + return Err(anyhow::anyhow!( + "HTTP request failed: {} for {}", + res.status(), + url + )); + } + Ok(res.text().await?) +} diff --git a/terminator-cli/src/workflow_exec/mod.rs b/terminator-cli/src/workflow_exec/mod.rs new file mode 100644 index 00000000..713ad329 --- /dev/null +++ b/terminator-cli/src/workflow_exec/mod.rs @@ -0,0 +1,7 @@ +pub mod exec; +pub mod cron; +pub mod input; +pub mod result; +pub mod parsing; +pub mod workflow; +pub mod validation; diff --git a/terminator-cli/src/workflow_exec/parsing.rs b/terminator-cli/src/workflow_exec/parsing.rs new file mode 100644 index 00000000..2b42ba00 --- /dev/null +++ b/terminator-cli/src/workflow_exec/parsing.rs @@ -0,0 +1,65 @@ +/// Parse workflow content using robust parsing strategies from gist_executor.rs +pub fn parse_workflow_content(content: &str) -> anyhow::Result { + // Strategy 1: Try direct JSON workflow + if let Ok(val) = serde_json::from_str::(content) { + // Check if it's a valid workflow (has steps field) + if val.get("steps").is_some() { + return Ok(val); + } + + // Check if it's a wrapper object + if let Some(extracted) = extract_workflow_from_wrapper(&val)? { + return Ok(extracted); + } + } + + // Strategy 2: Try direct YAML workflow + if let Ok(val) = serde_yaml::from_str::(content) { + // Check if it's a valid workflow (has steps field) + if val.get("steps").is_some() { + return Ok(val); + } + + // Check if it's a wrapper object + if let Some(extracted) = extract_workflow_from_wrapper(&val)? { + return Ok(extracted); + } + } + + // Strategy 3: Try parsing as JSON wrapper first, then extract + if let Ok(val) = serde_json::from_str::(content) { + if let Some(extracted) = extract_workflow_from_wrapper(&val)? { + return Ok(extracted); + } + } + + // Strategy 4: Try parsing as YAML wrapper first, then extract + if let Ok(val) = serde_yaml::from_str::(content) { + if let Some(extracted) = extract_workflow_from_wrapper(&val)? { + return Ok(extracted); + } + } + + Err(anyhow::anyhow!( + "Unable to parse content as JSON or YAML workflow or wrapper object. Content must either be:\n\ + 1. A workflow with 'steps' field\n\ + 2. A wrapper object with tool_name='execute_sequence' and 'arguments' field\n\ + 3. Valid JSON or YAML format" + )) +} + +/// Extract workflow from wrapper object if it has tool_name: execute_sequence +pub fn extract_workflow_from_wrapper( + value: &serde_json::Value, +) -> anyhow::Result> { + if let Some(tool_name) = value.get("tool_name") { + if tool_name == "execute_sequence" { + if let Some(arguments) = value.get("arguments") { + return Ok(Some(arguments.clone())); + } else { + return Err(anyhow::anyhow!("Tool call missing 'arguments' field")); + } + } + } + Ok(None) +} diff --git a/terminator-cli/src/workflow_result.rs b/terminator-cli/src/workflow_exec/result.rs similarity index 100% rename from terminator-cli/src/workflow_result.rs rename to terminator-cli/src/workflow_exec/result.rs diff --git a/terminator-cli/src/workflow_exec/validation.rs b/terminator-cli/src/workflow_exec/validation.rs new file mode 100644 index 00000000..afb7ca46 --- /dev/null +++ b/terminator-cli/src/workflow_exec/validation.rs @@ -0,0 +1,103 @@ +/// Validate workflow structure to provide early error detection +pub fn validate_workflow(workflow: &serde_json::Value) -> anyhow::Result<()> { + // Check that it's an object + let obj = workflow + .as_object() + .ok_or_else(|| anyhow::anyhow!("Workflow must be a JSON object"))?; + + // Check that steps exists and is an array + let steps = obj + .get("steps") + .ok_or_else(|| anyhow::anyhow!("Workflow must contain a 'steps' field"))?; + + let steps_array = steps + .as_array() + .ok_or_else(|| anyhow::anyhow!("'steps' field must be an array"))?; + + if steps_array.is_empty() { + return Err(anyhow::anyhow!("Workflow must contain at least one step")); + } + + // Validate each step + for (i, step) in steps_array.iter().enumerate() { + let step_obj = step + .as_object() + .ok_or_else(|| anyhow::anyhow!("Step {} must be an object", i))?; + + let has_tool_name = step_obj.contains_key("tool_name"); + let has_group_name = step_obj.contains_key("group_name"); + + if !has_tool_name && !has_group_name { + return Err(anyhow::anyhow!( + "Step {} must have either 'tool_name' or 'group_name'", + i + )); + } + + if has_tool_name && has_group_name { + return Err(anyhow::anyhow!( + "Step {} cannot have both 'tool_name' and 'group_name'", + i + )); + } + } + + // Validate variables if present + if let Some(variables) = obj.get("variables") { + if let Some(vars_obj) = variables.as_object() { + for (name, def) in vars_obj { + if name.is_empty() { + return Err(anyhow::anyhow!("Variable name cannot be empty")); + } + + if let Some(def_obj) = def.as_object() { + // Ensure label exists and is non-empty + if let Some(label) = def_obj.get("label") { + if let Some(label_str) = label.as_str() { + if label_str.is_empty() { + return Err(anyhow::anyhow!( + "Variable '{}' must have a non-empty label", + name + )); + } + } + } else { + return Err(anyhow::anyhow!( + "Variable '{}' must have a 'label' field", + name + )); + } + + // --------------------- NEW VALIDATION --------------------- + // Enforce `required` property logic + let is_required = def_obj + .get("required") + .and_then(|v| v.as_bool()) + .unwrap_or(true); + + if is_required { + // Check for default value in definition + let has_default = def_obj.contains_key("default"); + + // Check if inputs provide a value for this variable + let input_has_value = obj + .get("inputs") + .and_then(|v| v.as_object()) + .map(|inputs_obj| inputs_obj.contains_key(name)) + .unwrap_or(false); + + if !has_default && !input_has_value { + return Err(anyhow::anyhow!( + "Required variable '{}' is missing and has no default value", + name + )); + } + } + // ---------------------------------------------------------------- + } + } + } + } + + Ok(()) +} diff --git a/terminator-cli/src/workflow_exec/workflow.rs b/terminator-cli/src/workflow_exec/workflow.rs new file mode 100644 index 00000000..c6ae3dc3 --- /dev/null +++ b/terminator-cli/src/workflow_exec/workflow.rs @@ -0,0 +1,343 @@ +use serde_json::Value; +use anyhow::Context; +use crate::cli::{InputType, McpRunArgs}; +use super::{ + result::{WorkflowResult, WorkflowState}, + cron::extract_cron_from_workflow, + validation::validate_workflow, + parsing::parse_workflow_content, + exec::execute_command_with_progress_and_retry, + input::{ + determine_input_type, + read_local_file, + convert_gist_to_raw_url, + fetch_remote_content + } +}; + +#[derive(Clone)] +pub enum Transport { + Http(String), + Stdio(Vec), +} + +pub async fn run_workflow(transport: Transport, args: McpRunArgs) -> anyhow::Result<()> { + use tracing::info; + + if args.verbose { + // Keep rmcp quieter even in verbose mode unless user explicitly overrides + std::env::set_var("RUST_LOG", "debug,rmcp=warn"); + } + + // Initialize simple logging (only if not already initialized) + { + use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + let _ = tracing_subscriber::registry() + .with( + tracing_subscriber::EnvFilter::try_from_default_env() + // Suppress noisy rmcp info logs by default while keeping our own at info + .unwrap_or_else(|_| "info,rmcp=warn".into()), + ) + .with(tracing_subscriber::fmt::layer()) + .try_init(); // Use try_init instead of init to avoid panics on duplicate initialization + } + + info!("Starting workflow execution via terminator CLI"); + info!(input = %args.input, ?args.input_type); + + // Resolve actual input type (auto-detect if needed) + let resolved_type = determine_input_type(&args.input, args.input_type); + + // Fetch workflow content + let content = match resolved_type { + InputType::File => { + info!("Reading local file"); + read_local_file(&args.input).await? + } + InputType::Gist => { + info!("Fetching GitHub gist"); + let raw_url = convert_gist_to_raw_url(&args.input)?; + fetch_remote_content(&raw_url).await? + } + InputType::Raw => { + info!("Fetching raw URL"); + fetch_remote_content(&args.input).await? + } + InputType::Auto => unreachable!(), + }; + + // Parse workflow using the same robust logic as gist_executor + let mut workflow_val = parse_workflow_content(&content) + .with_context(|| format!("Failed to parse workflow from {}", args.input))?; + + // Handle cron scheduling if specified in workflow + if let Some(cron_expr) = extract_cron_from_workflow(&workflow_val) { + info!( + "šŸ• Starting cron scheduler with workflow expression: {}", + cron_expr + ); + return run_workflow_with_cron(transport, args, &cron_expr).await; + } + + // Validate workflow structure early to catch issues + validate_workflow(&workflow_val).with_context(|| "Workflow validation failed")?; + + // Get steps count for logging + let steps_count = workflow_val + .get("steps") + .and_then(|v| v.as_array()) + .map(|arr| arr.len()) + .unwrap_or(0); + + info!( + "Successfully parsed and validated workflow with {} steps", + steps_count + ); + + // Apply overrides + if let Some(obj) = workflow_val.as_object_mut() { + if args.no_stop_on_error { + obj.insert("stop_on_error".into(), serde_json::Value::Bool(false)); + } + if args.no_detailed_results { + obj.insert( + "include_detailed_results".into(), + serde_json::Value::Bool(false), + ); + } + } + + if args.dry_run { + println!("āœ… Workflow validation successful!"); + println!("šŸ“Š Workflow Summary:"); + println!(" • Steps: {steps_count}"); + + if let Some(variables) = workflow_val.get("variables").and_then(|v| v.as_object()) { + println!(" • Variables: {}", variables.len()); + } else { + println!(" • Variables: 0"); + } + + if let Some(selectors) = workflow_val.get("selectors").and_then(|v| v.as_object()) { + println!(" • Selectors: {}", selectors.len()); + } else { + println!(" • Selectors: 0"); + } + + let stop_on_error = workflow_val + .get("stop_on_error") + .and_then(|v| v.as_bool()) + .unwrap_or(true); + println!(" • Stop on error: {stop_on_error}"); + + return Ok(()); + } + + info!("Executing workflow with {steps_count} steps via MCP"); + + let workflow_str = serde_json::to_string(&workflow_val)?; + + let result_json = execute_command_with_progress_and_retry( + transport, + "execute_sequence".to_string(), + Some(workflow_str), + true, // Show progress for workflow steps + args.no_retry, + ) + .await?; + + // Parse and display the workflow result + let workflow_result = WorkflowResult::from_mcp_response(&result_json)?; + + // Display result in user-friendly format + workflow_result.display(); + + // If verbose mode, also show raw JSON + if args.verbose { + println!("šŸ“ Raw MCP Response:"); + println!("{}", serde_json::to_string_pretty(&result_json)?); + } + + // Exit with appropriate code based on success + if !workflow_result.success { + std::process::exit(1); + } + + Ok(()) +} + +/// Execute workflow with cron scheduling +async fn run_workflow_with_cron( + transport: Transport, + args: McpRunArgs, + cron_expr: &str, +) -> anyhow::Result<()> { + use tokio_cron_scheduler::{Job, JobScheduler}; + use tracing::error; + + println!("šŸ• Setting up cron scheduler..."); + println!("šŸ“… Cron expression: {cron_expr}"); + println!("šŸ”„ Workflow will run continuously at scheduled intervals"); + println!("šŸ’” Press Ctrl+C to stop the scheduler"); + + // Try to parse the cron expression to validate it (tokio-cron-scheduler will handle this) + // We'll let tokio-cron-scheduler validate it when we create the job + + // For preview, we'll just show a generic message since calculating next times + // with tokio-cron-scheduler is more complex + println!("šŸ“‹ Workflow will run according to cron schedule: {cron_expr}"); + println!("šŸ’” Note: Exact execution times depend on system clock and scheduler timing"); + + // Create scheduler + let mut sched = JobScheduler::new().await?; + + // Clone transport for the job closure + let transport_clone = transport.clone(); + let args_clone = args.clone(); + + // Create the scheduled job + let job = Job::new_async(cron_expr, move |_uuid, _lock| { + let transport = transport_clone.clone(); + let args = args_clone.clone(); + + Box::pin(async move { + let start_time = std::time::Instant::now(); + println!( + "\nšŸš€ Starting scheduled workflow execution at {}", + chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC") + ); + + match run_workflow_once(transport, args).await { + Ok(_) => { + let duration = start_time.elapsed(); + println!( + "āœ… Scheduled workflow completed successfully in {:.2}s", + duration.as_secs_f64() + ); + } + Err(e) => { + let duration = start_time.elapsed(); + println!( + "āŒ Scheduled workflow failed after {:.2}s: {}", + duration.as_secs_f64(), + e + ); + } + } + }) + })?; + + // Add job to scheduler + sched.add(job).await?; + println!("āœ… Cron job scheduled successfully"); + + // Start the scheduler + sched.start().await?; + println!("ā–¶ļø Scheduler started - workflow will run at scheduled intervals"); + + // Set up graceful shutdown + let (shutdown_tx, mut shutdown_rx) = tokio::sync::mpsc::channel(1); + + // Spawn a task to handle Ctrl+C + tokio::spawn(async move { + match tokio::signal::ctrl_c().await { + Ok(()) => { + println!("\nšŸ›‘ Received shutdown signal"); + let _ = shutdown_tx.send(()).await; + } + Err(e) => { + error!("Failed to listen for shutdown signal: {}", e); + } + } + }); + + // Wait for shutdown signal + let _ = shutdown_rx.recv().await; + + println!("šŸ›‘ Shutting down scheduler..."); + sched.shutdown().await?; + println!("āœ… Scheduler stopped successfully"); + + Ok(()) +} + + +/// Execute a single workflow run (used by cron scheduler) +async fn run_workflow_once( + transport: Transport, + args: McpRunArgs, +) -> anyhow::Result<()> { + // Resolve actual input type (auto-detect if needed) + let resolved_type = determine_input_type(&args.input, args.input_type); + + // Fetch workflow content + let content = match resolved_type { + InputType::File => read_local_file(&args.input).await?, + InputType::Gist => { + let raw_url = convert_gist_to_raw_url(&args.input)?; + fetch_remote_content(&raw_url).await? + } + InputType::Raw => fetch_remote_content(&args.input).await?, + InputType::Auto => unreachable!(), + }; + + // Parse workflow using the same robust logic as gist_executor + let mut workflow_val = parse_workflow_content(&content) + .with_context(|| format!("Failed to parse workflow from {}", args.input))?; + + // Validate workflow structure early to catch issues + validate_workflow(&workflow_val).with_context(|| "Workflow validation failed")?; + + // Apply overrides + if let Some(obj) = workflow_val.as_object_mut() { + if args.no_stop_on_error { + obj.insert("stop_on_error".into(), serde_json::Value::Bool(false)); + } + if args.no_detailed_results { + obj.insert( + "include_detailed_results".into(), + serde_json::Value::Bool(false), + ); + } + } + + // For cron jobs, use simple execution to avoid connection spam + let workflow_str = serde_json::to_string(&workflow_val)?; + let result_json = execute_command_with_progress_and_retry( + transport, + "execute_sequence".to_string(), + Some(workflow_str), + true, // Show progress for workflow steps + args.no_retry, + ) + .await?; + + // Parse the workflow result + let workflow_result = WorkflowResult::from_mcp_response(&result_json)?; + + // For cron jobs, log success/failure/skipped + match workflow_result.state { + WorkflowState::Success => { + println!(" āœ… {}", workflow_result.message); + if let Some(Value::Array(arr)) = &workflow_result.data { + println!(" šŸ“Š Extracted {} items", arr.len()); + } + } + WorkflowState::Skipped => { + println!(" ā­ļø {}", workflow_result.message); + if let Some(Value::Object(data)) = &workflow_result.data { + if let Some(reason) = data.get("reason").and_then(|r| r.as_str()) { + println!(" šŸ“ Reason: {reason}"); + } + } + } + WorkflowState::Failure => { + println!(" āŒ {}", workflow_result.message); + if let Some(error) = &workflow_result.error { + println!(" āš ļø {error}"); + } + } + } + + Ok(()) +}