diff --git a/scripts/dev.sh b/scripts/dev.sh index edbec851d..0cfcdb78d 100755 --- a/scripts/dev.sh +++ b/scripts/dev.sh @@ -21,6 +21,87 @@ set -euo pipefail repo_root="$(cd "$(dirname "$0")/.." && pwd)" cd "$repo_root" +state_dir="$repo_root/target/.claudette-dev" +pid_file="$state_dir/dev.pid" +mkdir -p "$state_dir" + +collect_tree_pids() { + local pid="$1" + local child + echo "$pid" + while IFS= read -r child; do + [ -n "$child" ] || continue + collect_tree_pids "$child" + done < <(pgrep -P "$pid" 2>/dev/null || true) +} + +kill_tree() { + local pid="$1" + local pids + pids="$(collect_tree_pids "$pid" | sort -rn | tr '\n' ' ')" + [ -n "$pids" ] || return + # shellcheck disable=SC2086 + kill $pids 2>/dev/null || true +} + +kill_tree_now() { + local pid="$1" + local pids + pids="$(collect_tree_pids "$pid" | sort -rn | tr '\n' ' ')" + [ -n "$pids" ] || return + # Give children one short chance to run their cleanup, then force the + # remaining process tree down. Ctrl+C in dev should return control + # immediately, not leave cargo-tauri/Vite/app children alive. + # shellcheck disable=SC2086 + kill $pids 2>/dev/null || true + sleep 0.15 + # shellcheck disable=SC2086 + kill -9 $pids 2>/dev/null || true +} + +is_expected_dev_pid() { + local pid="$1" + local command + command="$(ps -p "$pid" -o command= 2>/dev/null || true)" + case "$command" in + *"scripts/dev.sh"*|*"cargo tauri dev"*) return 0 ;; + *) return 1 ;; + esac +} + +terminate_existing_dev() { + if [ ! -f "$pid_file" ]; then + return + fi + local old_pid + old_pid="$(cat "$pid_file" 2>/dev/null || true)" + if [ -z "$old_pid" ] || [ "$old_pid" = "$$" ]; then + rm -f "$pid_file" + return + fi + if ! kill -0 "$old_pid" 2>/dev/null; then + rm -f "$pid_file" + return + fi + if ! is_expected_dev_pid "$old_pid"; then + echo "▸ Ignoring stale dev pid file for unrelated process (pid $old_pid)" + rm -f "$pid_file" + return + fi + + echo "▸ Stopping existing dev process tree (pid $old_pid)" + kill_tree "$old_pid" + for _ in {1..80}; do + if ! kill -0 "$old_pid" 2>/dev/null; then + rm -f "$pid_file" + return + fi + sleep 0.1 + done + kill -9 "$old_pid" 2>/dev/null || true + rm -f "$pid_file" +} + find_free_port() { local p=$1 while lsof -iTCP:"$p" -sTCP:LISTEN -n -P >/dev/null 2>&1; do @@ -29,6 +110,9 @@ find_free_port() { echo "$p" } +terminate_existing_dev +echo "$$" >"$pid_file" + # Default Vite port is 14253 — deliberately moved off Tauri's stock 1420 # to avoid the cross-app dev-port hijack scenario where another Tauri # starter template (which also defaults to 1420) launches and rebinds @@ -67,8 +151,17 @@ with open(out, "w") as f: }, f) ' "$discovery_file" "$$" "$debug_port" "$vite_port" "$started" "$cwd" "$branch" -cleanup() { rm -f "$discovery_file"; } -trap cleanup EXIT INT TERM +cleanup() { + rm -f "$discovery_file" + if [ -f "$pid_file" ] && [ "$(cat "$pid_file" 2>/dev/null || true)" = "$$" ]; then + rm -f "$pid_file" + fi + if [ -n "${cargo_pid:-}" ] && kill -0 "$cargo_pid" 2>/dev/null; then + kill_tree_now "$cargo_pid" + fi +} +trap cleanup EXIT +trap 'cleanup; exit 130' INT TERM echo "▸ Branch: $branch" echo "▸ Vite dev server: http://localhost:$vite_port" @@ -77,12 +170,17 @@ echo "▸ Discovery file: $discovery_file" (cd src/ui && bun install) +host_triple="$(rustc -vV | awk '/host:/ {print $2}')" +scripts/stage-cli-sidecar.sh "$host_triple" --profile debug + features="${CARGO_TAURI_FEATURES:-devtools,server,voice}" runner_args=() if [[ "$(uname -s)" == "Darwin" ]]; then runner_args=(--runner "$repo_root/scripts/macos-dev-app-runner.sh") fi -exec cargo tauri dev --features "$features" \ +cargo tauri dev --features "$features" \ "${runner_args[@]}" \ - -c "{\"build\":{\"devUrl\":\"http://localhost:$vite_port\"}}" + -c "{\"build\":{\"devUrl\":\"http://localhost:$vite_port\"}}" & +cargo_pid=$! +wait "$cargo_pid" diff --git a/scripts/macos-dev-app-runner.sh b/scripts/macos-dev-app-runner.sh index 6691535c7..76dc754b4 100755 --- a/scripts/macos-dev-app-runner.sh +++ b/scripts/macos-dev-app-runner.sh @@ -22,6 +22,34 @@ fi repo_root="$(cd "$(dirname "$0")/.." && pwd)" +lock_dir="$repo_root/target/.claudette-dev-app-runner.lock" +lock_acquired=false +acquire_launch_lock() { + mkdir -p "$(dirname "$lock_dir")" + while ! mkdir "$lock_dir" 2>/dev/null; do + local lock_pid="" + if [ -f "$lock_dir/pid" ]; then + lock_pid="$(cat "$lock_dir/pid" 2>/dev/null || true)" + fi + if [ -z "$lock_pid" ] || ! kill -0 "$lock_pid" 2>/dev/null; then + rm -rf "$lock_dir" 2>/dev/null || true + continue + fi + sleep 0.1 + done + echo "$$" >"$lock_dir/pid" + lock_acquired=true +} + +release_launch_lock() { + if [ "$lock_acquired" = true ]; then + rm -rf "$lock_dir" 2>/dev/null || true + lock_acquired=false + fi +} + +acquire_launch_lock + if [ -f "$1" ]; then binary="$1" shift @@ -103,6 +131,43 @@ macos_dir="$contents_dir/MacOS" resources_dir="$contents_dir/Resources" bundle_executable="$macos_dir/claudette-app" +app_pids() { + pgrep -f -- "$bundle_executable" 2>/dev/null || true +} + +kill_pids_now() { + local pids="$1" + [ -n "$pids" ] || return + # shellcheck disable=SC2086 + kill $pids 2>/dev/null || true + sleep 0.15 + # shellcheck disable=SC2086 + kill -9 $pids 2>/dev/null || true +} + +terminate_existing_app() { + local pids + pids="$(app_pids)" + if [ -z "$pids" ]; then + return + fi + echo "▸ Stopping existing Claudette Dev instance" + kill_pids_now "$pids" + for _ in {1..50}; do + if [ -z "$(app_pids)" ]; then + return + fi + sleep 0.1 + done + pids="$(app_pids)" + if [ -n "$pids" ]; then + # shellcheck disable=SC2086 + kill -9 $pids 2>/dev/null || true + fi +} + +terminate_existing_app + mkdir -p "$macos_dir" "$resources_dir" rm -f "$bundle_executable" cp "$binary" "$bundle_executable" @@ -160,14 +225,22 @@ stdout_fifo="$log_dir/stdout" stderr_fifo="$log_dir/stderr" mkfifo "$stdout_fifo" "$stderr_fifo" +terminate_started_app() { + if [ -n "${app_pid:-}" ] && kill -0 "$app_pid" 2>/dev/null; then + kill_pids_now "$app_pid" + fi +} + cleanup() { + release_launch_lock rm -rf "$log_dir" + terminate_started_app if [ -n "${open_pid:-}" ] && kill -0 "$open_pid" 2>/dev/null; then kill "$open_pid" 2>/dev/null || true fi } trap cleanup EXIT -trap 'kill $open_pid 2>/dev/null || true; exit 130' INT TERM +trap 'terminate_started_app; if [ -n "${open_pid:-}" ]; then kill "$open_pid" 2>/dev/null || true; kill -9 "$open_pid" 2>/dev/null || true; fi; exit 130' INT TERM cat "$stdout_fifo" & cat_stdout_pid=$! @@ -182,11 +255,10 @@ for var in VITE_PORT CLAUDETTE_DEBUG_PORT CLAUDETTE_DEV_OVERRIDE RUST_LOG RUST_B done echo "▸ Launching $bundle_dir via Launch Services" -# `-n` matters for dev loops: the bundle identifier is stable, so Launch -# Services may otherwise activate an already-running Claudette Dev instance -# and return immediately. That makes cargo-tauri tear down Vite, which Bun -# reports as exit 143. -# +# The file lock above serializes overlapping DevCommand runners. Each runner +# first terminates the previous instance, then Launch Services starts exactly +# one fresh copy of this rebuilt bundle. +terminate_existing_app # Build the open(1) argv incrementally rather than relying on # `${arr[@]+"${arr[@]}"}` parameter-substitution tricks. Two reasons: # (1) `set -u` makes naked `"${empty[@]}"` an "unbound variable" error @@ -195,7 +267,7 @@ echo "▸ Launching $bundle_dir via Launch Services" # (2) Building the array explicitly is unambiguous to the next reader — # each element is properly quoted, no mental model of expansion # required, and adding a future flag is just an `+=` append. -open_argv=(open -n -W -a "$bundle_dir" --stdout "$stdout_fifo" --stderr "$stderr_fifo") +open_argv=(open -W -n "$bundle_dir" --stdout "$stdout_fifo" --stderr "$stderr_fifo") if [ "${#env_args[@]}" -gt 0 ]; then open_argv+=("${env_args[@]}") fi @@ -206,6 +278,21 @@ fi "${open_argv[@]}" & open_pid=$! +for _ in {1..50}; do + app_pid="$(app_pids | tail -n 1)" + if [ -n "$app_pid" ]; then + break + fi + if ! kill -0 "$open_pid" 2>/dev/null; then + break + fi + sleep 0.1 +done + +# Let later hot-reload runner invocations proceed. They will terminate this +# specific app instance before launching their replacement. +release_launch_lock + wait "$open_pid" exit_code=$? diff --git a/scripts/stage-cli-sidecar.sh b/scripts/stage-cli-sidecar.sh index 32b63f4f6..622771403 100755 --- a/scripts/stage-cli-sidecar.sh +++ b/scripts/stage-cli-sidecar.sh @@ -8,23 +8,57 @@ # Usage: # scripts/stage-cli-sidecar.sh # auto-detect host triple # scripts/stage-cli-sidecar.sh # explicit triple (CI) +# scripts/stage-cli-sidecar.sh --profile debug # scripts/stage-cli-sidecar.sh --release-built -# # don't rebuild; assume `target//release/claudette` already +# # don't rebuild; assume `target///claudette` already # # exists (CI flow where claudette-cli was built in a separate step). set -euo pipefail repo_root="$(cd "$(dirname "$0")/.." && pwd)" cd "$repo_root" -if [ "${1:-}" != "" ] && [ "$1" != "--release-built" ]; then +if [ "${1:-}" != "" ] && [[ "$1" != --* ]]; then triple="$1" shift else triple="$(rustc -vV | awk '/host:/ {print $2}')" fi -release_built=false -for arg in "$@"; do - if [ "$arg" = "--release-built" ]; then release_built=true; fi +profile="release" +built=false +while [ "$#" -gt 0 ]; do + arg="$1" + case "$arg" in + --profile) + if [ "$#" -lt 2 ]; then + echo "missing value for --profile" >&2 + exit 64 + fi + case "$2" in + debug|release) profile="$2" ;; + *) + echo "unsupported profile: $2" >&2 + exit 64 + ;; + esac + shift 2 + ;; + --profile=debug|--debug) + profile="debug" + shift + ;; + --profile=release|--release) + profile="release" + shift + ;; + --release-built) + built=true + shift + ;; + *) + echo "unsupported argument: $arg" >&2 + exit 64 + ;; + esac done case "$triple" in @@ -32,10 +66,14 @@ case "$triple" in *) bin_name="claudette" ;; esac -target_bin="target/${triple}/release/${bin_name}" -if [ "$release_built" != "true" ]; then - echo "▸ Building claudette-cli for ${triple}" - cargo build --release --target "${triple}" -p claudette-cli +target_bin="target/${triple}/${profile}/${bin_name}" +if [ "$built" != "true" ]; then + echo "▸ Building claudette-cli for ${triple} (${profile})" + cargo_args=(build --target "${triple}" -p claudette-cli) + if [ "$profile" = "release" ]; then + cargo_args+=(--release) + fi + cargo "${cargo_args[@]}" fi if [ ! -f "$target_bin" ]; then diff --git a/src-server/src/auth.rs b/src-server/src/auth.rs index e67b6f1fb..9aa48a009 100644 --- a/src-server/src/auth.rs +++ b/src-server/src/auth.rs @@ -13,7 +13,21 @@ pub fn generate_token() -> String { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ServerConfig { pub server: ServerSection, - pub auth: AuthSection, + /// Auth section is retained for compatibility with older config files — + /// if a `pairing_token` field is present on disk, we keep loading it but + /// the runtime no longer consults it. New deployments don't need any + /// global pairing token; all access is share-scoped. + #[serde(default)] + pub auth: Option, + /// Active shares. Each share holds its own pairing token, the workspace + /// ids it grants access to, and the session tokens it has issued. + /// Removing a share invalidates every session token that came from it + /// (next request fails the share-existence check), giving immediate + /// revocation without any separate "revoked" set to keep in sync. + #[serde(default)] + pub shares: Vec, + /// Legacy persistent sessions from the pre-shares era, retained only + /// so old config files round-trip cleanly. New code never appends here. #[serde(default)] pub sessions: Vec, } @@ -30,6 +44,45 @@ pub struct AuthSection { pub pairing_token: String, } +/// One share — a workspace-scoped authorization grant. +/// +/// The pairing token in this struct is what the host hands out to the +/// people they want to grant access to (typically embedded in a +/// `claudette://...` connection string). The `allowed_workspace_ids` list +/// is the *complete* set of workspace ids any session token issued by this +/// share is permitted to see; every workspace-touching RPC consults it. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ShareEntry { + /// Stable id for this share. The host UI uses it to revoke + /// (`stop_share`) and list active shares. + pub id: String, + /// Optional human-friendly label (e.g. "Work team", "OSS collaborators"). + /// Surfaced on the host UI so the user can tell shares apart. + #[serde(default)] + pub label: Option, + /// The pairing token. Distinct from session tokens — clients pair + /// once with this, then reuse the issued session_token on subsequent + /// reconnects. + pub pairing_token: String, + /// Workspace ids this share grants access to. The set is fixed at + /// share-creation time; to change scope, revoke and create a new share. + pub allowed_workspace_ids: Vec, + /// `false` for plain remote-control shares; `true` when the share is + /// for collaborative sessions. When true, joining a chat session in + /// scope auto-creates a `Room` (with `consensus_required` from below). + #[serde(default)] + pub collaborative: bool, + /// Whether plan-consensus voting is required for this share's collab + /// sessions. Ignored when `collaborative=false`. + #[serde(default)] + pub consensus_required: bool, + /// Session tokens issued by pair-ins against this share's + /// `pairing_token`. Each entry inherits the share's scope. + #[serde(default)] + pub sessions: Vec, + pub created_at: String, +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SessionEntry { pub token: String, @@ -38,6 +91,20 @@ pub struct SessionEntry { pub last_seen: String, } +/// What a successful authentication produces — enough state for the WS +/// handler to construct a `ConnectionCtx`. +#[derive(Debug, Clone)] +pub struct ResolvedSession { + pub session: SessionEntry, + /// The id of the share this session was issued from. Existence of this + /// id in `ServerConfig.shares` is checked on every RPC; if the share + /// has been revoked, the connection is rejected. + pub share_id: String, + pub allowed_workspace_ids: Vec, + pub collaborative: bool, + pub consensus_required: bool, +} + impl ServerConfig { pub fn load_or_create(path: &Path) -> Result> { if path.exists() { @@ -52,9 +119,8 @@ impl ServerConfig { port: crate::DEFAULT_PORT, bind: "0.0.0.0".to_string(), }, - auth: AuthSection { - pairing_token: generate_token(), - }, + auth: None, + shares: Vec::new(), sessions: Vec::new(), }; config.save(path)?; @@ -71,36 +137,141 @@ impl ServerConfig { Ok(()) } - pub fn regenerate_token(&mut self) { - self.auth.pairing_token = generate_token(); - self.sessions.clear(); + /// Mint a new share with the given scope and return the freshly-created + /// entry (so the caller can read its `pairing_token` for display). + pub fn create_share( + &mut self, + label: Option, + allowed_workspace_ids: Vec, + collaborative: bool, + consensus_required: bool, + ) -> &ShareEntry { + let entry = ShareEntry { + id: uuid_v4(), + label, + pairing_token: generate_token(), + allowed_workspace_ids, + collaborative, + consensus_required, + sessions: Vec::new(), + created_at: now_iso(), + }; + self.shares.push(entry); + self.shares.last().expect("just pushed") } - /// Validate a pairing token and issue a new session token. - pub fn pair(&mut self, pairing_token: &str, client_name: &str) -> Option { - if self.auth.pairing_token != pairing_token { - return None; - } + /// Drop a share by id and return whether it existed. Removing the + /// share also drops every session token it issued — there is no other + /// list to scrub. The next RPC from any of those connections looks up + /// the missing `share_id` and is rejected. + pub fn revoke_share(&mut self, share_id: &str) -> bool { + let before = self.shares.len(); + self.shares.retain(|s| s.id != share_id); + before != self.shares.len() + } + + pub fn list_shares(&self) -> &[ShareEntry] { + &self.shares + } + + /// Validate a pairing token. On match, issue a new session token, + /// record it on the share's session list, and return the resolved + /// session bundle. Returns `None` for unknown / revoked tokens. + pub fn pair(&mut self, pairing_token: &str, client_name: &str) -> Option { + let share_idx = self + .shares + .iter() + .position(|s| s.pairing_token == pairing_token)?; let session_token = generate_token(); let now = now_iso(); - self.sessions.push(SessionEntry { - token: session_token.clone(), + let session = SessionEntry { + token: session_token, name: client_name.to_string(), created_at: now.clone(), last_seen: now, - }); - Some(session_token) + }; + let share = &mut self.shares[share_idx]; + share.sessions.push(session.clone()); + Some(ResolvedSession { + session, + share_id: share.id.clone(), + allowed_workspace_ids: share.allowed_workspace_ids.clone(), + collaborative: share.collaborative, + consensus_required: share.consensus_required, + }) } - /// Validate an existing session token. Returns true and updates last_seen if valid. - pub fn validate_session(&mut self, session_token: &str) -> bool { - if let Some(session) = self.sessions.iter_mut().find(|s| s.token == session_token) { - session.last_seen = now_iso(); - true - } else { - false + /// Re-authenticate an existing session token. Walks every share's + /// session list (cheap — a host has O(few) shares each with O(few) + /// sessions) and returns the matching `ResolvedSession` with + /// `last_seen` bumped. Returns `None` for tokens belonging to + /// revoked shares or otherwise unknown tokens. + pub fn validate_session(&mut self, session_token: &str) -> Option { + for share in self.shares.iter_mut() { + if let Some(session) = share.sessions.iter_mut().find(|s| s.token == session_token) { + session.last_seen = now_iso(); + return Some(ResolvedSession { + session: session.clone(), + share_id: share.id.clone(), + allowed_workspace_ids: share.allowed_workspace_ids.clone(), + collaborative: share.collaborative, + consensus_required: share.consensus_required, + }); + } } + None } + + /// Look up which workspaces a given session token is scoped to, + /// without modifying state. Used by RPC authorization gates so we + /// don't have to thread the resolved scope through every call. + pub fn share_for_session(&self, session_token: &str) -> Option<&ShareEntry> { + self.shares + .iter() + .find(|s| s.sessions.iter().any(|x| x.token == session_token)) + } +} + +/// Derive a stable, opaque participant id from a session token. The same +/// pairing always yields the same id; different pairings (or token rotation) +/// yield different ids. One-way: the id reveals nothing about the token, so +/// it is safe to broadcast in events and persist in chat-message rows. +pub fn participant_id_for_token(token: &str) -> String { + use sha2::{Digest, Sha256}; + let digest = Sha256::digest(token.as_bytes()); + base64::Engine::encode( + &base64::engine::general_purpose::URL_SAFE_NO_PAD, + &digest[..16], + ) +} + +fn uuid_v4() -> String { + // Lightweight UUID using random bytes — avoids pulling another dep at + // this layer and matches the existing `generate_token` convention. + let mut bytes = [0u8; 16]; + rand::thread_rng().fill(&mut bytes); + // Set version (4) and variant (RFC4122) bits. + bytes[6] = (bytes[6] & 0x0f) | 0x40; + bytes[8] = (bytes[8] & 0x3f) | 0x80; + format!( + "{:02x}{:02x}{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", + bytes[0], + bytes[1], + bytes[2], + bytes[3], + bytes[4], + bytes[5], + bytes[6], + bytes[7], + bytes[8], + bytes[9], + bytes[10], + bytes[11], + bytes[12], + bytes[13], + bytes[14], + bytes[15], + ) } fn now_iso() -> String { @@ -134,3 +305,136 @@ fn epoch_days_to_date(days: u64) -> (u64, u64, u64) { let y = if m <= 2 { y + 1 } else { y }; (y, m, d) } + +#[cfg(test)] +mod tests { + use super::*; + + fn empty_config() -> ServerConfig { + ServerConfig { + server: ServerSection { + name: "test".into(), + port: 0, + bind: "127.0.0.1".into(), + }, + auth: None, + shares: Vec::new(), + sessions: Vec::new(), + } + } + + #[test] + fn create_share_assigns_unique_id_and_token() { + let mut cfg = empty_config(); + let a = cfg + .create_share(None, vec!["ws-1".into()], false, false) + .clone(); + let b = cfg + .create_share(None, vec!["ws-2".into()], true, true) + .clone(); + assert_ne!(a.id, b.id); + assert_ne!(a.pairing_token, b.pairing_token); + assert_eq!(a.allowed_workspace_ids, vec!["ws-1"]); + assert!(b.consensus_required); + } + + #[test] + fn pair_only_matches_existing_share_token() { + let mut cfg = empty_config(); + let token = cfg + .create_share(None, vec!["ws-1".into()], false, false) + .pairing_token + .clone(); + assert!(cfg.pair("not-a-real-token", "alice").is_none()); + let resolved = cfg.pair(&token, "alice").unwrap(); + assert_eq!(resolved.allowed_workspace_ids, vec!["ws-1"]); + // Share now has one issued session. + assert_eq!(cfg.shares[0].sessions.len(), 1); + } + + #[test] + fn validate_session_returns_scope_and_bumps_last_seen() { + let mut cfg = empty_config(); + let token = cfg + .create_share( + Some("Work".into()), + vec!["ws-A".into(), "ws-B".into()], + true, + true, + ) + .pairing_token + .clone(); + let resolved = cfg.pair(&token, "bob").unwrap(); + let session_token = resolved.session.token.clone(); + let original_last_seen = resolved.session.last_seen.clone(); + + // Sleep a real second so last_seen visibly bumps. now_iso has + // 1s resolution. + std::thread::sleep(std::time::Duration::from_millis(1100)); + + let revalidated = cfg.validate_session(&session_token).unwrap(); + assert_eq!(revalidated.allowed_workspace_ids, vec!["ws-A", "ws-B"]); + assert!(revalidated.collaborative); + assert!(revalidated.consensus_required); + assert_ne!(revalidated.session.last_seen, original_last_seen); + } + + #[test] + fn revoke_share_invalidates_its_sessions() { + let mut cfg = empty_config(); + let share = cfg + .create_share(None, vec!["ws-1".into()], false, false) + .clone(); + let resolved = cfg.pair(&share.pairing_token, "alice").unwrap(); + let token = resolved.session.token.clone(); + + // Sanity: validates before revocation. + assert!(cfg.validate_session(&token).is_some()); + + let removed = cfg.revoke_share(&share.id); + assert!(removed); + + // After revocation, the session token is unknown — no remaining + // share carries it. + assert!(cfg.validate_session(&token).is_none()); + } + + #[test] + fn revoke_share_returns_false_for_unknown_id() { + let mut cfg = empty_config(); + assert!(!cfg.revoke_share("nonexistent")); + } + + #[test] + fn share_for_session_finds_owning_share() { + let mut cfg = empty_config(); + let token_a = cfg + .create_share(None, vec!["ws-1".into()], false, false) + .pairing_token + .clone(); + let token_b = cfg + .create_share(None, vec!["ws-2".into()], false, false) + .pairing_token + .clone(); + let session_a = cfg.pair(&token_a, "alice").unwrap().session.token; + let session_b = cfg.pair(&token_b, "bob").unwrap().session.token; + + let share_a = cfg.share_for_session(&session_a).unwrap(); + assert_eq!(share_a.allowed_workspace_ids, vec!["ws-1"]); + let share_b = cfg.share_for_session(&session_b).unwrap(); + assert_eq!(share_b.allowed_workspace_ids, vec!["ws-2"]); + } + + #[test] + fn participant_ids_are_stable_per_token() { + let token = "abc"; + assert_eq!( + participant_id_for_token(token), + participant_id_for_token(token) + ); + assert_ne!( + participant_id_for_token("abc"), + participant_id_for_token("abd") + ); + } +} diff --git a/src-server/src/collab.rs b/src-server/src/collab.rs new file mode 100644 index 000000000..22b92ed95 --- /dev/null +++ b/src-server/src/collab.rs @@ -0,0 +1,374 @@ +//! Server-side handlers for collaborative-session RPCs. +//! +//! Lives in its own file so the (already-large) `handler.rs` doesn't grow +//! further. Each function in this module is invoked from the dispatch arms +//! in `handle_request` and is structured to do its own auth checks (host +//! vs. non-host, joined-session membership) before mutating room state. + +use std::sync::Arc; + +use claudette::room::{ParticipantId, ParticipantInfo, PendingVote, Vote}; +use serde_json::json; + +use crate::handler::ConnectionCtx; +use crate::ws::{ServerState, Writer, try_send_message}; + +/// Register a participant against a room, spawn their per-connection event +/// forwarder, and return a snapshot of the room's current state so the +/// client can render without lag. +/// +/// Idempotent: re-joining the same session does not double-add the +/// participant or duplicate the forwarder. The forwarder ends naturally +/// when the underlying broadcast channel closes (room dropped from the +/// registry on `stop_collaborative_share`). +pub async fn handle_join_session( + state: &Arc, + writer: &Arc, + ctx: &ConnectionCtx, + chat_session_id: &str, +) -> Result { + // When the parent share is collaborative, we lazily create a room on + // first join. That removes the host's per-session "Enable collab" + // step — once they share a workspace in collab mode, every chat + // session in scope automatically gets a multi-user room when the + // first remote arrives. + if !ctx.collaborative { + return Err("Session is not collaborative".into()); + } + let room = state + .rooms + .get_or_create(chat_session_id, ctx.consensus_required) + .await; + + // Mark this connection as joined first; idempotency below depends on it. + let already_joined = { + let mut joined = ctx.joined_sessions.lock().await; + !joined.insert(chat_session_id.to_string()) + }; + + if !already_joined { + let info = ParticipantInfo { + id: ctx.participant_id.clone(), + display_name: ctx.display_name.clone(), + is_host: ctx.is_host, + joined_at: now_unix_ms(), + muted: false, + }; + room.add_participant(info.clone()).await; + + // Broadcast the join so existing participants update their roster. + room.publish(json!({ + "event": "participants-changed", + "payload": { + "chat_session_id": chat_session_id, + "participants": room.participant_list().await, + }, + })); + + // Spawn the per-connection forwarder. Captures the writer so each + // event published to the room reaches this client. On `Lagged`, we + // emit a `resync-required` hint so the client can re-`join_session` + // rather than silently miss events. + let writer = Arc::clone(writer); + let mut rx = room.subscribe(); + let chat_session_id_for_forwarder = chat_session_id.to_string(); + let share_id = ctx.share_id.clone(); + let config = Arc::clone(&state.config); + let forwarder = tokio::spawn(async move { + loop { + match rx.recv().await { + Ok(evt) => { + let share_still_exists = { + let cfg = config.lock().await; + cfg.shares.iter().any(|share| share.id == share_id) + }; + if !share_still_exists { + break; + } + if try_send_message(&writer, &evt.0).await.is_err() { + break; + } + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => { + if try_send_message( + &writer, + &json!({ + "event": "resync-required", + "payload": { + "chat_session_id": &chat_session_id_for_forwarder, + }, + }), + ) + .await + .is_err() + { + break; + } + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + } + } + }); + ctx.room_forwarders + .lock() + .await + .insert(chat_session_id.to_string(), forwarder); + } + + // Snapshot for late joiners: current participants, turn metadata, and + // any open vote/question so consensus UI renders immediately. Chat + // history is intentionally not included here; clients fetch it through + // the existing history RPC to keep join_session lightweight. + let participants = room.participant_list().await; + let consensus_required = *room.consensus_required.read().await; + let turn_holder = room.current_turn_holder().await; + let turn_started_at_ms = *room.turn_started_at_ms.lock().await; + let turn_settings = room.turn_settings.read().await.clone(); + let pending_vote = state.rooms.pending_vote_snapshot(chat_session_id).await; + let pending_question = state.rooms.pending_question_snapshot(chat_session_id).await; + + Ok(json!({ + "participants": participants, + "consensus_required": consensus_required, + "turn_holder": turn_holder.map(|p| p.0), + "turn_started_at_ms": turn_started_at_ms, + "turn_settings": turn_settings, + "pending_vote": pending_vote, + "pending_question": pending_question, + })) +} + +pub async fn handle_leave_session( + state: &Arc, + ctx: &ConnectionCtx, + chat_session_id: &str, +) -> Result { + let room = match state.rooms.get(chat_session_id).await { + Some(r) => r, + None => return Ok(json!(null)), + }; + let removed = ctx.joined_sessions.lock().await.remove(chat_session_id); + if removed { + if let Some(handle) = ctx.room_forwarders.lock().await.remove(chat_session_id) { + handle.abort(); + } + room.remove_participant(&ctx.participant_id).await; + room.publish(json!({ + "event": "participants-changed", + "payload": { + "chat_session_id": chat_session_id, + "participants": room.participant_list().await, + }, + })); + } + Ok(json!(null)) +} + +/// Record a remote participant's vote on an open ExitPlanMode consensus +/// round. Forwards into the same resolver the local Tauri side uses, but +/// we cannot call that directly across the process boundary — instead the +/// server publishes a `plan-vote-cast` event whose payload is consumed by +/// the host-side resolver task spawned in `start_collaborative_share`. +/// +/// The host-side resolver is the *single* place where `send_control_response` +/// ever fires for ExitPlanMode in collab mode, so server-side voters never +/// race the host on the CLI control channel. +pub async fn handle_vote_plan_approval( + state: &Arc, + ctx: &ConnectionCtx, + chat_session_id: &str, + tool_use_id: &str, + approved: bool, + reason: Option, +) -> Result { + if !ctx.has_joined(chat_session_id).await { + return Err("Not joined to this session".into()); + } + let room = state + .rooms + .get(chat_session_id) + .await + .ok_or("Session is not collaborative")?; + if room.is_muted(&ctx.participant_id).await { + return Err("Muted participants cannot vote".into()); + } + let vote = if approved { + Vote::Approve + } else { + Vote::Deny { + reason: reason.unwrap_or_else(|| "Denied without reason".into()), + } + }; + { + let mut pending = room.pending_vote.write().await; + record_plan_vote(&mut pending, &ctx.participant_id, tool_use_id, vote.clone())?; + } + room.publish(json!({ + "event": "plan-vote-cast", + "payload": { + "chat_session_id": chat_session_id, + "tool_use_id": tool_use_id, + "participant_id": ctx.participant_id.as_str(), + "vote": &vote, + }, + })); + Ok(json!(null)) +} + +fn record_plan_vote( + pending: &mut Option, + participant_id: &ParticipantId, + tool_use_id: &str, + vote: Vote, +) -> Result<(), String> { + let Some(pending) = pending.as_mut() else { + return Err("No pending plan approval vote".into()); + }; + if pending.tool_use_id != tool_use_id { + return Err("Stale plan approval vote".into()); + } + if !pending.required_voters.contains(participant_id) { + return Err("Participant is not required for this vote".into()); + } + pending.votes.insert(participant_id.clone(), vote); + Ok(()) +} + +pub async fn handle_submit_agent_answer( + state: &Arc, + ctx: &ConnectionCtx, + chat_session_id: &str, + tool_use_id: &str, + answers: std::collections::HashMap, + annotations: Option, +) -> Result { + if !ctx.has_joined(chat_session_id).await { + return Err("Not joined to this session".into()); + } + let room = state + .rooms + .get(chat_session_id) + .await + .ok_or("Session is not collaborative")?; + if room.is_muted(&ctx.participant_id).await { + return Err("Muted participants cannot answer questions".into()); + } + room.publish(json!({ + "event": "agent-answer-submitted", + "payload": { + "chat_session_id": chat_session_id, + "tool_use_id": tool_use_id, + "participant_id": ctx.participant_id.as_str(), + "answers": answers, + "annotations": annotations, + }, + })); + Ok(json!(null)) +} + +/// Forget every session this connection joined and broadcast the +/// resulting roster updates. Called from the WS connection-close path. +pub async fn drop_all_joined_sessions(state: &Arc, ctx: &ConnectionCtx) { + let session_ids: Vec = ctx.joined_sessions.lock().await.drain().collect(); + let forwarders: Vec> = ctx + .room_forwarders + .lock() + .await + .drain() + .map(|(_, h)| h) + .collect(); + for handle in forwarders { + handle.abort(); + } + for session_id in session_ids { + let Some(room) = state.rooms.get(&session_id).await else { + continue; + }; + room.remove_participant(&ctx.participant_id).await; + room.publish(json!({ + "event": "participants-changed", + "payload": { + "chat_session_id": &session_id, + "participants": room.participant_list().await, + }, + })); + } +} + +fn now_unix_ms() -> i64 { + use std::time::{SystemTime, UNIX_EPOCH}; + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_millis() as i64) + .unwrap_or(0) +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use claudette::room::{ParticipantId, PendingVote, Vote}; + + use super::record_plan_vote; + + fn pending_vote(required: &[&str]) -> Option { + Some(PendingVote::new( + "tool-1".to_string(), + required + .iter() + .map(|id| ParticipantId((*id).to_string())) + .collect::>(), + serde_json::json!({}), + )) + } + + #[test] + fn record_plan_vote_rejects_late_joiner() { + let mut pending = pending_vote(&["host", "guest-a"]); + let late_joiner = ParticipantId("guest-b".to_string()); + + let err = record_plan_vote(&mut pending, &late_joiner, "tool-1", Vote::Approve) + .expect_err("late joiners are observers"); + + assert_eq!(err, "Participant is not required for this vote"); + assert!( + !pending + .as_ref() + .expect("pending vote") + .votes + .contains_key(&late_joiner) + ); + } + + #[test] + fn record_plan_vote_rejects_stale_tool_use_id() { + let voter = ParticipantId("guest-a".to_string()); + let mut pending = pending_vote(&["guest-a"]); + + let err = record_plan_vote(&mut pending, &voter, "tool-2", Vote::Approve) + .expect_err("stale tool id must be rejected"); + + assert_eq!(err, "Stale plan approval vote"); + assert!( + !pending + .as_ref() + .expect("pending vote") + .votes + .contains_key(&voter) + ); + } + + #[test] + fn record_plan_vote_records_required_voter() { + let voter = ParticipantId("guest-a".to_string()); + let mut pending = pending_vote(&["guest-a"]); + + record_plan_vote(&mut pending, &voter, "tool-1", Vote::Approve) + .expect("required voter can vote"); + + assert_eq!( + pending.as_ref().expect("pending vote").votes.get(&voter), + Some(&Vote::Approve) + ); + } +} diff --git a/src-server/src/handler.rs b/src-server/src/handler.rs index 4853730f2..6ba7ffafc 100644 --- a/src-server/src/handler.rs +++ b/src-server/src/handler.rs @@ -1,25 +1,173 @@ +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use claudette::agent::{self, AgentEvent, AgentSettings, InnerStreamEvent, StreamEvent}; use claudette::chat::{ BuildAssistantArgs, CheckpointArgs, build_assistant_chat_message, create_turn_checkpoint, - extract_assistant_text, extract_event_thinking, }; use claudette::db::Database; -use claudette::model::{ChatMessage, ChatRole}; +use claudette::model::{ChatMessage, ChatRole, WorkspaceStatus}; +use claudette::room::ParticipantId; use portable_pty::{CommandBuilder, PtySize, native_pty_system}; use serde_json::json; +use tokio::sync::Mutex; use crate::ws::{AgentSessionState, PtyHandle, ServerState, Writer, send_message}; use claudette::permissions::tools_for_level; +/// Per-connection identity + authorization scope, passed into every RPC +/// handler. Constructed once in `ws.rs` after authentication, then carried +/// for the life of the connection. RPC dispatchers consult this to: +/// +/// - stamp `author_participant_id` / `author_display_name` on user messages +/// - reject host-only operations (kick / mute) from non-host participants +/// - identify the source of a vote in plan-consensus resolution +/// - enforce that mutating RPCs only target sessions the caller has joined +/// - **gate every workspace-touching RPC** on the share's allowed workspace +/// set, and reject all RPCs once the parent share is revoked +/// +/// Handlers must NOT trust client-supplied identity or scope in params — +/// only this struct is authoritative. `is_host` is always `false` for +/// connections that arrive through the WebSocket; the host process +/// constructs its own `ParticipantId::HOST` ctx for its own publish path. +/// +/// `joined_sessions` is shared mutably (behind `Mutex`) across the spawned +/// dispatch tasks for a single connection so a `join_session` on one task +/// is visible to subsequent RPCs on others. Cleared on `leave_session` +/// and on connection close (see `ws.rs`). +#[derive(Debug, Clone)] +pub struct ConnectionCtx { + pub participant_id: ParticipantId, + pub display_name: String, + pub is_host: bool, + pub joined_sessions: Arc>>, + pub room_forwarders: Arc>>>, + /// The id of the share this connection authenticated against. Each RPC + /// re-checks the share still exists in the live config; a missing + /// share means the host revoked it and every subsequent request fails. + pub share_id: String, + /// Workspace ids the share grants access to. The handler's + /// `ctx_can_access_workspace` helper consults this on every + /// workspace-touching RPC; list RPCs filter their results down to it. + pub allowed_workspace_ids: Vec, + /// Whether the parent share enables collaborative mode. When true, + /// joining a chat session in scope auto-creates a `Room` with + /// `consensus_required` set from the share — the host doesn't need + /// to enable collab per session. + pub collaborative: bool, + pub consensus_required: bool, +} + +impl ConnectionCtx { + /// Construct a ctx for an authenticated WebSocket connection from the + /// matching share + session. + pub fn from_session( + participant_id: String, + display_name: String, + share_id: String, + allowed_workspace_ids: Vec, + collaborative: bool, + consensus_required: bool, + ) -> Self { + Self { + participant_id: ParticipantId(participant_id), + display_name, + is_host: false, + joined_sessions: Arc::new(Mutex::new(HashSet::new())), + room_forwarders: Arc::new(Mutex::new(HashMap::new())), + share_id, + allowed_workspace_ids, + collaborative, + consensus_required, + } + } + + pub async fn has_joined(&self, chat_session_id: &str) -> bool { + self.joined_sessions.lock().await.contains(chat_session_id) + } + + /// Whether this connection's share permits operating on the given + /// workspace. The host's local UI never goes through this gate (it + /// runs as `ParticipantId::HOST` and skips the WS auth path entirely); + /// remote participants always do. + pub fn can_access_workspace(&self, workspace_id: &str) -> bool { + self.allowed_workspace_ids + .iter() + .any(|id| id == workspace_id) + } +} + +async fn ctx_can_access_workspace_live( + state: &Arc, + ctx: &ConnectionCtx, + workspace_id: &str, +) -> bool { + if ctx.can_access_workspace(workspace_id) { + return true; + } + let cfg = state.config.lock().await; + cfg.shares + .iter() + .find(|share| share.id == ctx.share_id) + .map(|share| { + share + .allowed_workspace_ids + .iter() + .any(|id| id == workspace_id) + }) + .unwrap_or(false) +} + +/// Resolve a chat-session id to its workspace id and check that the +/// connection's share grants access to that workspace. Returns the +/// workspace id on success so the caller can reuse it (e.g. to look up +/// the worktree path), or an `Err(json-rpc-error-message)` to bail out. +async fn ctx_authorize_chat_session( + state: &Arc, + ctx: &ConnectionCtx, + chat_session_id: &str, +) -> Result { + let db = open_db(state).map_err(|e| e.to_string())?; + let session = db + .get_chat_session(chat_session_id) + .map_err(|e| e.to_string())? + .ok_or("Chat session not found")?; + if !ctx_can_access_workspace_live(state, ctx, &session.workspace_id).await { + return Err("Not authorized for this workspace".into()); + } + Ok(session.workspace_id) +} + /// Dispatch a JSON-RPC request and return a JSON-RPC response. pub async fn handle_request( state: &Arc, writer: &Arc, + ctx: &ConnectionCtx, request: &serde_json::Value, ) -> serde_json::Value { + // Revocation check: if the share that issued this connection has been + // removed from the live config, every subsequent RPC is rejected + // before any work happens. This is what makes "stop sharing" actually + // stop in-flight access. Cheap — one async lock + a vec scan over + // a handful of entries. + { + let cfg = state.config.lock().await; + if !cfg.shares.iter().any(|s| s.id == ctx.share_id) { + let id = request + .get("id") + .cloned() + .unwrap_or(serde_json::Value::Null); + return json!({ + "id": id, + "error": { + "code": -1, + "message": "This share has been revoked by the host." + } + }); + } + } + let id = request .get("id") .cloned() @@ -28,119 +176,219 @@ pub async fn handle_request( let params = request.get("params").cloned().unwrap_or_default(); let result = match method { - "load_initial_data" => handle_load_initial_data(state).await, + "load_initial_data" => handle_load_initial_data(state, ctx).await, "load_chat_history" => { let chat_session_id = param_chat_session_id(¶ms); - handle_load_chat_history(state, &chat_session_id).await + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => handle_load_chat_history(state, &chat_session_id).await, + } } "send_chat_message" => { let chat_session_id = param_chat_session_id(¶ms); - let content = param_str(¶ms, "content"); - let permission_level = params - .get("permission_level") - .and_then(|v| v.as_str()) - .map(String::from); - let model = params - .get("model") - .and_then(|v| v.as_str()) - .map(String::from); - let fast_mode = params.get("fast_mode").and_then(|v| v.as_bool()); - let thinking_enabled = params.get("thinking_enabled").and_then(|v| v.as_bool()); - let plan_mode = params.get("plan_mode").and_then(|v| v.as_bool()); - let effort = params - .get("effort") - .and_then(|v| v.as_str()) - .map(String::from); - let chrome_enabled = params.get("chrome_enabled").and_then(|v| v.as_bool()); - let disable_1m_context = params.get("disable_1m_context").and_then(|v| v.as_bool()); - let mentioned_files: Option> = params - .get("mentioned_files") - .and_then(|v| serde_json::from_value(v.clone()).ok()); - handle_send_chat_message( - state, - writer, - &chat_session_id, - &content, - permission_level.as_deref(), - model, - fast_mode, - thinking_enabled, - plan_mode, - effort, - chrome_enabled, - disable_1m_context, - mentioned_files, - ) - .await + // Workspace scope gate — also early-rejects unknown sessions. + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => { + // Collab-only secondary gate: must have join_session'd + // and not be muted. + let collab_block = if let Some(room) = state.rooms.get(&chat_session_id).await { + if !ctx.has_joined(&chat_session_id).await { + Some("Not joined to this session".to_string()) + } else if room.is_muted(&ctx.participant_id).await { + Some("Muted participants cannot send messages".to_string()) + } else { + None + } + } else { + None + }; + if let Some(msg) = collab_block { + Err(msg) + } else { + let content = param_str(¶ms, "content"); + let permission_level = params + .get("permission_level") + .and_then(|v| v.as_str()) + .map(String::from); + let model = params + .get("model") + .and_then(|v| v.as_str()) + .map(String::from); + let fast_mode = params.get("fast_mode").and_then(|v| v.as_bool()); + let thinking_enabled = + params.get("thinking_enabled").and_then(|v| v.as_bool()); + let plan_mode = params.get("plan_mode").and_then(|v| v.as_bool()); + let effort = params + .get("effort") + .and_then(|v| v.as_str()) + .map(String::from); + let chrome_enabled = params.get("chrome_enabled").and_then(|v| v.as_bool()); + let disable_1m_context = + params.get("disable_1m_context").and_then(|v| v.as_bool()); + let mentioned_files: Option> = params + .get("mentioned_files") + .and_then(|v| serde_json::from_value(v.clone()).ok()); + let message_id = params + .get("message_id") + .and_then(|v| v.as_str()) + .map(String::from); + handle_send_chat_message( + state, + writer, + ctx, + &chat_session_id, + message_id, + &content, + permission_level.as_deref(), + model, + fast_mode, + thinking_enabled, + plan_mode, + effort, + chrome_enabled, + disable_1m_context, + mentioned_files, + ) + .await + } + } + } } "steer_queued_chat_message" => { Err("Mid-turn steering is not yet supported for remote sessions".to_string()) } "stop_agent" => { let chat_session_id = param_chat_session_id(¶ms); - handle_stop_agent(state, &chat_session_id).await + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => handle_stop_agent(state, &chat_session_id).await, + } } "reset_agent_session" => { let chat_session_id = param_chat_session_id(¶ms); - let mut agents = state.agents.write().await; - agents.remove(&chat_session_id); - Ok(json!(null)) - } - "list_repositories" => { - let db = open_db(state).map_err(|e| e.to_string()); - match db { - Ok(db) => db - .list_repositories() - .map(|repos| serde_json::to_value(repos).unwrap_or_default()) - .map_err(|e| e.to_string()), + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { Err(e) => Err(e), + Ok(_) => { + let mut agents = state.agents.write().await; + agents.remove(&chat_session_id); + Ok(json!(null)) + } } } + "list_repositories" => { + // Filter to repos that contain at least one in-scope workspace. + // Repos with zero in-scope workspaces leak nothing useful and + // would only confuse the remote UI, so we hide them. + (|| -> Result { + let db = open_db(state).map_err(|e| e.to_string())?; + let repos = db.list_repositories().map_err(|e| e.to_string())?; + let workspaces = db.list_workspaces().map_err(|e| e.to_string())?; + let allowed_repo_ids: HashSet = workspaces + .iter() + .filter(|w| ctx.can_access_workspace(&w.id)) + .map(|w| w.repository_id.clone()) + .collect(); + let filtered: Vec<_> = repos + .into_iter() + .filter(|r| allowed_repo_ids.contains(&r.id)) + .collect(); + Ok(serde_json::to_value(filtered).unwrap_or_default()) + })() + } "list_workspaces" => { let db = open_db(state).map_err(|e| e.to_string()); match db { Ok(db) => db .list_workspaces() - .map(|ws| serde_json::to_value(ws).unwrap_or_default()) + .map(|ws| { + // Archived workspaces are excluded — once a host + // archives a workspace, remotes should treat it + // as gone. The push-side `workspace-lifecycle` + // event removes it live; this filter handles the + // reconnect / refresh path so the workspace + // doesn't reappear on subsequent calls. + let filtered: Vec<_> = ws + .into_iter() + .filter(|w| ctx.can_access_workspace(&w.id)) + .filter(|w| w.status == WorkspaceStatus::Active) + .collect(); + serde_json::to_value(filtered).unwrap_or_default() + }) .map_err(|e| e.to_string()), Err(e) => Err(e), } } "create_workspace" => { - let repository_id = param_str(¶ms, "repository_id"); - let name = param_str(¶ms, "name"); - handle_create_workspace(state, &repository_id, &name).await + // Creating a workspace can't be in-scope by definition (the + // workspace doesn't exist yet, so it's not in any allow list). + // Reserve this for the host only. + Err("Remote clients cannot create workspaces".into()) } "archive_workspace" => { let workspace_id = param_str(¶ms, "workspace_id"); - handle_archive_workspace(state, &workspace_id).await + if !ctx_can_access_workspace_live(state, ctx, &workspace_id).await { + Err("Not authorized for this workspace".into()) + } else { + handle_archive_workspace(state, &workspace_id).await + } } "load_diff_files" => { let workspace_id = param_str(¶ms, "workspace_id"); - handle_load_diff_files(state, &workspace_id).await + if !ctx_can_access_workspace_live(state, ctx, &workspace_id).await { + Err("Not authorized for this workspace".into()) + } else { + handle_load_diff_files(state, &workspace_id).await + } } "load_file_diff" => { + // `worktree_path` is a filesystem path, not a workspace id — + // the auth gate runs on the workspace lookup that produced it. + // Cross-reference: the path must match a worktree of a workspace + // in scope. Otherwise a remote could read arbitrary files. let worktree_path = param_str(¶ms, "worktree_path"); - let file_path = param_str(¶ms, "file_path"); - let merge_base = param_str(¶ms, "merge_base"); - let diff_layer = params - .get("diff_layer") - .and_then(|v| v.as_str()) - .map(String::from); - handle_load_file_diff( - &worktree_path, - &file_path, - &merge_base, - diff_layer.as_deref(), - ) - .await + let workspaces = open_db(state) + .map_err(|e| e.to_string()) + .and_then(|db| db.list_workspaces().map_err(|e| e.to_string())) + .unwrap_or_default(); + let mut allowed = false; + for workspace in workspaces + .iter() + .filter(|w| w.worktree_path.as_deref() == Some(&worktree_path)) + { + if ctx_can_access_workspace_live(state, ctx, &workspace.id).await { + allowed = true; + break; + } + } + if !allowed { + Err("Not authorized for this worktree".into()) + } else { + let file_path = param_str(¶ms, "file_path"); + let merge_base = param_str(¶ms, "merge_base"); + let diff_layer = params + .get("diff_layer") + .and_then(|v| v.as_str()) + .map(String::from); + handle_load_file_diff( + &worktree_path, + &file_path, + &merge_base, + diff_layer.as_deref(), + ) + .await + } } "spawn_pty" => { let workspace_id = param_str(¶ms, "workspace_id"); - let cwd = param_str(¶ms, "cwd"); - let rows = params.get("rows").and_then(|v| v.as_u64()).unwrap_or(24) as u16; - let cols = params.get("cols").and_then(|v| v.as_u64()).unwrap_or(80) as u16; - handle_spawn_pty(state, writer, &workspace_id, &cwd, rows, cols).await + if !ctx_can_access_workspace_live(state, ctx, &workspace_id).await { + Err("Not authorized for this workspace".into()) + } else { + let cwd = param_str(¶ms, "cwd"); + let rows = params.get("rows").and_then(|v| v.as_u64()).unwrap_or(24) as u16; + let cols = params.get("cols").and_then(|v| v.as_u64()).unwrap_or(80) as u16; + handle_spawn_pty(state, writer, &workspace_id, &cwd, rows, cols).await + } } "write_pty" => { let pty_id = params.get("pty_id").and_then(|v| v.as_u64()).unwrap_or(0); @@ -166,38 +414,148 @@ pub async fn handle_request( handle_close_pty(state, pty_id).await } "get_app_setting" => { + // Global settings are not workspace-scoped; treat reads as + // benign metadata (theme, fonts, feature flags) since the + // remote client may need them for rendering. Writes are + // host-only — see `set_app_setting` below. let key = param_str(¶ms, "key"); handle_get_app_setting(state, &key) } "set_app_setting" => { - let key = param_str(¶ms, "key"); - let value = param_str(¶ms, "value"); - handle_set_app_setting(state, &key, &value).await + // Settings are global; a scoped remote shouldn't be able to + // mutate machine-wide config. Host-only. + Err("Remote clients cannot change app settings".into()) } "list_chat_sessions" => { let workspace_id = param_str(¶ms, "workspace_id"); - let include_archived = params - .get("include_archived") - .and_then(|v| v.as_bool()) - .unwrap_or(false); - handle_list_chat_sessions(state, &workspace_id, include_archived) + if !ctx_can_access_workspace_live(state, ctx, &workspace_id).await { + Err("Not authorized for this workspace".into()) + } else { + let include_archived = params + .get("include_archived") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + handle_list_chat_sessions(state, &workspace_id, include_archived) + } } "get_chat_session" => { let chat_session_id = param_chat_session_id(¶ms); - handle_get_chat_session(state, &chat_session_id) + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => handle_get_chat_session(state, &chat_session_id), + } } "create_chat_session" => { let workspace_id = param_str(¶ms, "workspace_id"); - handle_create_chat_session(state, &workspace_id) + if !ctx_can_access_workspace_live(state, ctx, &workspace_id).await { + Err("Not authorized for this workspace".into()) + } else { + handle_create_chat_session(state, &workspace_id) + } } "rename_chat_session" => { let chat_session_id = param_chat_session_id(¶ms); - let name = param_str(¶ms, "name"); - handle_rename_chat_session(state, &chat_session_id, &name) + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => { + let name = param_str(¶ms, "name"); + handle_rename_chat_session(state, &chat_session_id, &name) + } + } } "archive_chat_session" => { let chat_session_id = param_chat_session_id(¶ms); - handle_archive_chat_session(state, &chat_session_id).await + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => handle_archive_chat_session(state, &chat_session_id).await, + } + } + "join_session" => { + let chat_session_id = param_chat_session_id(¶ms); + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => { + crate::collab::handle_join_session(state, writer, ctx, &chat_session_id).await + } + } + } + "leave_session" => { + let chat_session_id = param_chat_session_id(¶ms); + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => crate::collab::handle_leave_session(state, ctx, &chat_session_id).await, + } + } + "vote_plan_approval" => { + let chat_session_id = param_chat_session_id(¶ms); + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => { + let tool_use_id = param_str(¶ms, "tool_use_id"); + let approved = params + .get("approved") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + let reason = params + .get("reason") + .and_then(|v| v.as_str()) + .map(String::from); + crate::collab::handle_vote_plan_approval( + state, + ctx, + &chat_session_id, + &tool_use_id, + approved, + reason, + ) + .await + } + } + } + "submit_agent_answer" => { + let chat_session_id = param_chat_session_id(¶ms); + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(_) => { + let tool_use_id = param_str(¶ms, "tool_use_id"); + let answers = params + .get("answers") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .unwrap_or_default(); + let annotations = params.get("annotations").cloned(); + crate::collab::handle_submit_agent_answer( + state, + ctx, + &chat_session_id, + &tool_use_id, + answers, + annotations, + ) + .await + } + } + } + "read_plan_file" => { + // Remote participants need to view plans during a consensus + // vote, but a remote with a workspace-scoped share must NOT + // be able to read plan files outside that scope. Two layers + // of defense: + // 1. `chat_session_id` is required and gates through the + // same `ctx_authorize_chat_session` check every other + // workspace-touching RPC uses — so a share scoped to + // workspace A can't ever ask about a session in + // workspace B. + // 2. The canonical path must resolve to somewhere under + // that session's worktree, AND match the existing + // `.claude/plans/*.md` whitelist. The path-shape check + // mirrors the local Tauri command in + // `src-tauri/src/commands/plan.rs::read_plan_file`. + let chat_session_id = param_chat_session_id(¶ms); + let path = param_str(¶ms, "path"); + match ctx_authorize_chat_session(state, ctx, &chat_session_id).await { + Err(e) => Err(e), + Ok(workspace_id) => handle_read_plan_file(state, &workspace_id, path).await, + } } _ => Err(format!("Unknown method: {method}")), }; @@ -321,10 +679,33 @@ fn now_iso() -> String { // ---- Command handlers ---- -async fn handle_load_initial_data(state: &ServerState) -> Result { +async fn handle_load_initial_data( + state: &ServerState, + ctx: &ConnectionCtx, +) -> Result { let db = open_db(state)?; - let repositories = db.list_repositories().map_err(|e| e.to_string())?; - let workspaces = db.list_workspaces().map_err(|e| e.to_string())?; + // Filter both lists down to the connection's scope. Workspaces is the + // direct gate; repositories follows from workspaces (a repo is visible + // iff at least one of its workspaces is in scope). + let all_workspaces = db.list_workspaces().map_err(|e| e.to_string())?; + // Hide archived workspaces from remotes. Archive is a host-side + // soft-delete that tears down the worktree and stops agents — the + // workspace can't usefully be operated on remotely anyway, and + // leaving it in the list lets the bug under PR #612 manifest where + // the remote shows ghost entries for archived workspaces. + let workspaces: Vec<_> = all_workspaces + .into_iter() + .filter(|w| ctx.can_access_workspace(&w.id)) + .filter(|w| w.status == WorkspaceStatus::Active) + .collect(); + let allowed_repo_ids: std::collections::HashSet = + workspaces.iter().map(|w| w.repository_id.clone()).collect(); + let repositories: Vec<_> = db + .list_repositories() + .map_err(|e| e.to_string())? + .into_iter() + .filter(|r| allowed_repo_ids.contains(&r.id)) + .collect(); let worktree_base_dir = { let dir = state.worktree_base_dir.read().await; @@ -399,7 +780,13 @@ async fn handle_load_initial_data(state: &ServerState) -> Result = db + .last_message_per_workspace() + .map_err(|e| e.to_string())? + .into_iter() + .filter(|m| ctx.can_access_workspace(&m.workspace_id)) + .collect(); Ok(json!({ "repositories": repositories, @@ -410,6 +797,57 @@ async fn handle_load_initial_data(state: &ServerState) -> Result Result { + use std::path::{Path, PathBuf}; + // Look up the workspace's worktree. Plans live under + // `/.claude/plans/`; any path that doesn't canonicalize to + // somewhere inside *that* worktree is rejected, so a scoped share + // can't read other workspaces' plans even though they're all under + // `.claude/plans/` on disk. + let db = open_db(state)?; + let workspaces = db.list_workspaces().map_err(|e| e.to_string())?; + let worktree_path: PathBuf = workspaces + .iter() + .find(|w| w.id == workspace_id) + .and_then(|w| w.worktree_path.clone()) + .map(PathBuf::from) + .ok_or_else(|| "Workspace has no worktree".to_string())?; + let allowed_root = std::fs::canonicalize(&worktree_path) + .map_err(|e| format!("Workspace worktree is unreadable: {e}"))?; + + let content = tokio::task::spawn_blocking(move || { + let canonical = std::fs::canonicalize(Path::new(&path)) + .map_err(|e| format!("Invalid plan path: {e}"))?; + // Containment check: the requested path must canonicalize to a + // descendant of the workspace's worktree. `starts_with` works on + // canonicalized paths because both sides have all `..` and + // symlinks resolved. + if !canonical.starts_with(&allowed_root) { + return Err("Plan path is outside the workspace's worktree".to_string()); + } + // Path-shape whitelist: must include `.claude/plans/` and end + // with `.md`. Mirrors the Tauri-side command's check. + let components: Vec<&str> = canonical + .components() + .filter_map(|c| c.as_os_str().to_str()) + .collect(); + let has_plans_dir = components + .windows(2) + .any(|w| w[0] == ".claude" && w[1] == "plans"); + if !has_plans_dir || canonical.extension().and_then(|e| e.to_str()) != Some("md") { + return Err("Only .claude/plans/*.md files can be read".to_string()); + } + std::fs::read_to_string(&canonical).map_err(|e| format!("Failed to read plan file: {e}")) + }) + .await + .map_err(|e| format!("Failed to read plan file: {e}"))??; + Ok(serde_json::Value::String(content)) +} + async fn handle_load_chat_history( state: &ServerState, chat_session_id: &str, @@ -425,7 +863,9 @@ async fn handle_load_chat_history( async fn handle_send_chat_message( state: &Arc, writer: &Arc, + ctx: &ConnectionCtx, chat_session_id: &str, + message_id: Option, content: &str, permission_level: Option<&str>, model: Option, @@ -459,7 +899,7 @@ async fn handle_send_chat_message( // Save user message. let user_msg = ChatMessage { - id: uuid::Uuid::new_v4().to_string(), + id: message_id.unwrap_or_else(|| uuid::Uuid::new_v4().to_string()), workspace_id: workspace_id.clone(), chat_session_id: chat_session_id.clone(), role: ChatRole::User, @@ -472,10 +912,25 @@ async fn handle_send_chat_message( output_tokens: None, cache_read_tokens: None, cache_creation_tokens: None, + author_participant_id: Some(ctx.participant_id.0.clone()), + author_display_name: Some(ctx.display_name.clone()), }; db.insert_chat_message(&user_msg) .map_err(|e| e.to_string())?; + // In collaborative mode, broadcast the user message so other participants + // render it live. Without this, only the *agent's* responses propagate + // (via agent-stream); user prompts only persist to DB and would never + // show up in another participant's UI in real time. Frontend dedupe + // skips when `author_participant_id === selfParticipantId` so the + // sender's own optimistic message isn't duplicated. + if let Some(room) = state.rooms.get(&chat_session_id).await { + room.publish(json!({ + "event": "chat-message-added", + "payload": &user_msg, + })); + } + let level = permission_level.unwrap_or("full"); let allowed_tools = tools_for_level(level); @@ -614,12 +1069,48 @@ async fn handle_send_chat_message( session.session_resolved_env = resolved_env.vars.clone(); drop(agents); - // Bridge agent events to WebSocket. + // Bridge agent events to subscribers. When a room exists for this + // session (collaborative mode), every event is published to the room's + // broadcast channel — fan-out is handled by per-connection forwarder + // tasks (see `handle_join_session`). When no room exists (solo / 1:1 + // legacy), events flow directly to the prompter's writer. let ws_id = workspace_id.clone(); let chat_session_id_for_stream = chat_session_id.clone(); let db_path = state.db_path.clone(); let wt_path = worktree_path.clone(); let user_msg_id = user_msg.id.clone(); + let room = state.rooms.get(&chat_session_id).await; + // Acquire the turn lock for this participant just before spawning the + // bridge. Hard reject — competing participants are already greyed out + // by the `turn-started` broadcast below. + if let Some(r) = &room + && let Err(holder) = r.try_acquire_turn(&ctx.participant_id).await + { + return Err(format!("turn-locked-by:{}", holder.as_str())); + } + if let Some(r) = &room { + let started_at_ms = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_millis() as i64) + .unwrap_or(0); + *r.turn_started_at_ms.lock().await = Some(started_at_ms); + *r.turn_settings.write().await = Some(claudette::room::TurnSettingsSnapshot { + model: agent_settings.model.clone(), + plan_mode: agent_settings.plan_mode, + }); + r.publish(json!({ + "event": "turn-started", + "payload": { + "workspace_id": &workspace_id, + "chat_session_id": &chat_session_id, + "participant_id": ctx.participant_id.as_str(), + "display_name": &ctx.display_name, + "started_at_ms": started_at_ms, + "model": agent_settings.model.clone(), + "plan_mode": agent_settings.plan_mode, + }, + })); + } let state = Arc::clone(state); let writer = Arc::clone(writer); tokio::spawn(async move { @@ -650,10 +1141,12 @@ async fn handle_send_chat_message( agents.remove(&chat_session_id_for_stream); } - // Track per-assistant-message cumulative usage as the CLI streams - // it. The final MessageDelta before message_stop carries the - // authoritative per-message total; we overwrite on every delta and - // consume it when the assistant message is persisted below. + // Track per-assistant-message cumulative usage as the CLI streams it. + // The final MessageDelta before message_stop carries the authoritative + // per-message total; we overwrite on every delta and consume it when the + // assistant message is persisted below. Mirrors the Tauri-side bridge in + // `src-tauri/src/commands/chat/send.rs`. Without this, remote-initiated + // turns persist with null token counts. if let AgentEvent::Stream(StreamEvent::Stream { event: InnerStreamEvent::MessageDelta { usage: Some(u) }, }) = &event @@ -665,9 +1158,39 @@ async fn handle_send_chat_message( // events per turn (thinking-only, then text). Accumulate thinking // and save only when text content arrives. if let AgentEvent::Stream(StreamEvent::Assistant { ref message }) = event { - let full_text = extract_assistant_text(message); + let full_text: String = message + .content + .iter() + .filter_map(|block| { + if let claudette::agent::ContentBlock::Text { text } = block { + Some(text.as_str()) + } else { + None + } + }) + .collect::>() + .join(""); + + let event_thinking: Option = { + let parts: Vec<&str> = message + .content + .iter() + .filter_map(|block| { + if let claudette::agent::ContentBlock::Thinking { thinking } = block { + Some(thinking.as_str()) + } else { + None + } + }) + .collect(); + if parts.is_empty() { + None + } else { + Some(parts.join("")) + } + }; - if let Some(t) = extract_event_thinking(message) { + if let Some(t) = event_thinking { pending_thinking = Some(match pending_thinking.take() { Some(mut existing) => { existing.push_str(&t); @@ -732,16 +1255,36 @@ async fn handle_send_chat_message( } } - // Emit event over WebSocket. - let event_msg = json!({ - "event": "agent-stream", + // Emit event. Collab path (room exists) → publish so the room's + // subscribers fan it out. Legacy path (no room) → direct write + // to the prompter's writer, matching today's 1:1 behavior. + // + // Build the payload via the shared `AgentStreamPayload` struct + // so the wire shape stays in lockstep with the Tauri-side + // bridge and the frontend's TypeScript interface. Hand-rolled + // JSON here once silently regressed to a `session_id` key + // and dropped every event on remote receivers — see + // `src/chat.rs::AgentStreamPayload` for the full backstory. + let payload = claudette::chat::AgentStreamPayload { + workspace_id: ws_id.clone(), + chat_session_id: chat_session_id_for_stream.clone(), + event, + }; + let event_msg = json!({ "event": "agent-stream", "payload": payload }); + match &room { + Some(r) => r.publish(event_msg), + None => send_message(&writer, &event_msg).await, + } + } + if let Some(r) = &room { + r.release_turn().await; + r.publish(json!({ + "event": "turn-ended", "payload": { - "workspace_id": ws_id, - "session_id": chat_session_id_for_stream, - "event": event, - } - }); - send_message(&writer, &event_msg).await; + "workspace_id": &ws_id, + "chat_session_id": &chat_session_id_for_stream, + }, + })); } }); @@ -782,11 +1325,14 @@ async fn handle_stop_agent( output_tokens: None, cache_read_tokens: None, cache_creation_tokens: None, + author_participant_id: None, + author_display_name: None, }; db.insert_chat_message(&msg).map_err(|e| e.to_string())?; Ok(json!(null)) } +#[allow(dead_code)] // Workspace creation is host-only; the dispatch arm rejects. async fn handle_create_workspace( state: &ServerState, repository_id: &str, @@ -845,7 +1391,7 @@ async fn handle_archive_workspace( workspace_id: &str, ) -> Result { use claudette::ops::{NoopHooks, workspace as ops_workspace}; - + use claudette::workspace_events::WorkspaceEvent; // Stop any running agents for sessions in this workspace. Collect the // PIDs to stop under the lock, then drop the lock before awaiting any // process teardowns to avoid blocking unrelated requests. Agent state @@ -881,6 +1427,16 @@ async fn handle_archive_workspace( .await .map_err(|e| e.to_string())?; + // Notify every connected remote (including the initiator's siblings, + // and the host's own UI subscribers if any) that the workspace is gone. + // Without this, a remote that didn't initiate the archive would keep + // showing the workspace in its sidebar until the next reconnect. + if let Some(bus) = state.workspace_events.as_ref() { + bus.publish(WorkspaceEvent::Archived { + workspace_id: workspace_id.to_string(), + }); + } + Ok(json!(null)) } @@ -1094,6 +1650,7 @@ fn handle_get_app_setting(state: &ServerState, key: &str) -> Result, pub no_mdns: bool, pub config_path: Option, + /// Pre-built shared config. When `Some`, the server uses this instance + /// (so a host process can mutate the same `ServerConfig` to mint and + /// revoke shares); when `None`, the server loads from `config_path` + /// and owns its own copy. + pub existing_config: Option>>, } impl Default for ServerOptions { @@ -31,6 +38,7 @@ impl Default for ServerOptions { name: None, no_mdns: false, config_path: None, + existing_config: None, } } } @@ -57,7 +65,38 @@ pub fn db_path() -> PathBuf { /// /// Prints the connection string to stdout before entering the accept loop /// (the Tauri parent process reads this to extract the connection string). +/// +/// This is the **subprocess** entrypoint — the server owns its own +/// `RoomRegistry`. For collaborative sessions where the Tauri host needs to +/// share rooms with the embedded server, see [`run_with_rooms`]. pub async fn run(options: ServerOptions) -> Result<(), Box> { + run_with_rooms(options, RoomRegistry::new()).await +} + +/// Variant of [`run`] that accepts an externally-owned `RoomRegistry`. The +/// Tauri host calls this from a `tokio::spawn` after starting collaborative +/// share: the registry is the same `Arc` held by `AppState`, so events +/// published from either side reach subscribers on the other. +pub async fn run_with_rooms( + options: ServerOptions, + rooms: Arc, +) -> Result<(), Box> { + run_with_rooms_and_events(options, rooms, None).await +} + +/// Variant of [`run_with_rooms`] that additionally wires a +/// [`claudette::workspace_events::WorkspaceEventBus`] into the server. +/// Authenticated WS connections subscribe to the bus and forward events +/// (currently: workspace archive) to remote clients in scope. +/// +/// The Tauri host passes a `Some(Arc<...>)` cloned from `AppState.workspace_events` +/// so a publish on the host side reaches every connected remote. +/// Subprocess servers without a host process pass `None`. +pub async fn run_with_rooms_and_events( + options: ServerOptions, + rooms: Arc, + workspace_events: Option>, +) -> Result<(), Box> { // Install the default crypto provider for rustls. When both `aws-lc-rs` and // `ring` features are active (e.g. embedded in the Tauri binary where // tauri-plugin-updater pulls in ring), rustls cannot auto-detect and panics. @@ -65,23 +104,41 @@ pub async fn run(options: ServerOptions) -> Result<(), Box Result<(), Box Result<(), Box, - - #[command(subcommand)] - command: Option, } -#[derive(Subcommand)] -enum Commands { - /// Generate a new pairing token (revokes all sessions). - RegenerateToken, - /// Print the connection string for this server. - ShowConnectionString, -} +// The legacy subcommands `regenerate-token` and `show-connection-string` +// were removed when the auth model switched from a single global pairing +// token to per-share scoped grants. Connection strings are now minted by +// the Claudette GUI when a share is created (one per share), so there's +// no useful single string for the binary to print on demand. #[tokio::main] async fn main() -> Result<(), Box> { let cli = Cli::parse(); - let config_path = cli.config.clone().unwrap_or_else(default_config_path); - - match &cli.command { - Some(Commands::RegenerateToken) => { - let mut config = ServerConfig::load_or_create(&config_path)?; - config.regenerate_token(); - config.save(&config_path)?; - println!("Pairing token regenerated. All existing sessions have been revoked."); - println!("\nNew connection string:"); - let host = gethostname::gethostname().to_string_lossy().to_string(); - println!( - " claudette://{}:{}/{}", - host, config.server.port, config.auth.pairing_token - ); - } - Some(Commands::ShowConnectionString) => { - let config = ServerConfig::load_or_create(&config_path)?; - let host = gethostname::gethostname().to_string_lossy().to_string(); - println!( - "claudette://{}:{}/{}", - host, config.server.port, config.auth.pairing_token - ); - } - None => { - claudette_server::run(ServerOptions { - port: cli.port, - bind: cli.bind, - name: cli.name, - no_mdns: cli.no_mdns, - config_path: cli.config, - }) - .await?; - } - } - + claudette_server::run(ServerOptions { + port: cli.port, + bind: cli.bind, + name: cli.name, + no_mdns: cli.no_mdns, + config_path: cli.config, + existing_config: None, + }) + .await?; Ok(()) } diff --git a/src-server/src/ws.rs b/src-server/src/ws.rs index 8e1213269..b58158603 100644 --- a/src-server/src/ws.rs +++ b/src-server/src/ws.rs @@ -9,15 +9,17 @@ use std::sync::atomic::{AtomicU64, Ordering}; use claudette::env_provider::EnvCache; use claudette::env_provider::types::EnvMap; use claudette::plugin_runtime::PluginRegistry; +use claudette::room::RoomRegistry; +use claudette::workspace_events::WorkspaceEventBus; use futures_util::{SinkExt, StreamExt}; use tokio::net::TcpStream; -use tokio::sync::RwLock; +use tokio::sync::{Mutex as AsyncMutex, RwLock}; use tokio_rustls::server::TlsStream; use tokio_tungstenite::WebSocketStream; use tokio_tungstenite::tungstenite::Message; -use crate::auth::ServerConfig; -use crate::handler; +use crate::auth::{ServerConfig, participant_id_for_token}; +use crate::handler::{self, ConnectionCtx}; /// Server-side application state — mirrors src-tauri's AppState but without Tauri dependencies. pub struct ServerState { @@ -36,6 +38,25 @@ pub struct ServerState { /// keep ownership cheap when the handler hands a reference into /// `resolve_with_registry`. pub env_cache: Arc, + /// Registry of collaborative-session rooms. Shared with the embedding + /// Tauri process so a publish from either side reaches subscribers on + /// the other. Solo / 1:1 sessions never enter the registry; bridge code + /// falls back to the direct-write path when no room exists. + pub rooms: Arc, + /// Cross-process bus for workspace lifecycle events (currently: + /// archive). Shared `Arc` with the Tauri host so a publish from + /// either side reaches every connected WebSocket subscriber. When + /// `None`, the auth handshake skips installing the per-connection + /// forwarder — used by tests and any future deployment that doesn't + /// need workspace push events. Production callers always supply one. + pub workspace_events: Option>, + /// The live `ServerConfig`. Held here (not just locally in + /// `handle_tls_connection`) so RPC handlers can re-check that the + /// connection's parent share still exists on every request — that's + /// how we get immediate revocation when the host calls `stop_share`. + /// Wrapped in an async `Mutex` because mutations happen from both the + /// auth path and the share-management commands. + pub config: Arc>, } pub struct AgentSessionState { @@ -66,7 +87,7 @@ impl ServerState { /// Construct a `ServerState` without plugin discovery. Used by tests /// that don't exercise the env-provider path; production callers /// should use `new_with_plugins`. - pub fn new(db_path: PathBuf, worktree_base_dir: PathBuf) -> Self { + pub fn new(db_path: PathBuf, worktree_base_dir: PathBuf, config: ServerConfig) -> Self { Self { db_path, worktree_base_dir: RwLock::new(worktree_base_dir), @@ -75,6 +96,9 @@ impl ServerState { next_pty_id: AtomicU64::new(1), plugins: None, env_cache: Arc::new(EnvCache::new()), + rooms: RoomRegistry::new(), + workspace_events: None, + config: Arc::new(AsyncMutex::new(config)), } } @@ -86,6 +110,7 @@ impl ServerState { db_path: PathBuf, worktree_base_dir: PathBuf, plugins: PluginRegistry, + config: ServerConfig, ) -> Self { Self { db_path, @@ -95,9 +120,66 @@ impl ServerState { next_pty_id: AtomicU64::new(1), plugins: Some(RwLock::new(plugins)), env_cache: Arc::new(EnvCache::new()), + rooms: RoomRegistry::new(), + workspace_events: None, + config: Arc::new(AsyncMutex::new(config)), } } + /// Construct a `ServerState` with both a plugin registry and an + /// externally-owned `RoomRegistry`. The Tauri host shares its own + /// registry so collab events fan out across both processes. + pub fn new_with_plugins_and_rooms( + db_path: PathBuf, + worktree_base_dir: PathBuf, + plugins: PluginRegistry, + rooms: Arc, + config: ServerConfig, + ) -> Self { + Self::new_with_plugins_rooms_and_config_arc( + db_path, + worktree_base_dir, + plugins, + rooms, + Arc::new(AsyncMutex::new(config)), + ) + } + + /// Construct from an already-shared `Arc>`. Used + /// when the Tauri host wants to share both the room registry AND the + /// config (so it can mint and revoke shares while the in-process + /// server is running). + pub fn new_with_plugins_rooms_and_config_arc( + db_path: PathBuf, + worktree_base_dir: PathBuf, + plugins: PluginRegistry, + rooms: Arc, + config: Arc>, + ) -> Self { + Self { + db_path, + worktree_base_dir: RwLock::new(worktree_base_dir), + agents: RwLock::new(HashMap::new()), + ptys: RwLock::new(HashMap::new()), + next_pty_id: AtomicU64::new(1), + plugins: Some(RwLock::new(plugins)), + env_cache: Arc::new(EnvCache::new()), + rooms, + workspace_events: None, + config, + } + } + + /// Attach a workspace event bus shared with the embedding Tauri host. + /// Authenticated WebSocket connections subscribe to this bus and + /// forward events whose `workspace_id` is in the connection's scope. + /// Must be called before the server starts accepting connections — + /// connections that authenticate before the bus is attached won't + /// pick it up retroactively. + pub fn set_workspace_events(&mut self, bus: Arc) { + self.workspace_events = Some(bus); + } + pub fn next_pty_id(&self) -> u64 { self.next_pty_id.fetch_add(1, Ordering::Relaxed) } @@ -108,10 +190,17 @@ pub type Writer = tokio::sync::Mutex< futures_util::stream::SplitSink>, Message>, >; -pub async fn send_message(writer: &Writer, value: &serde_json::Value) { +pub async fn try_send_message( + writer: &Writer, + value: &serde_json::Value, +) -> Result<(), tokio_tungstenite::tungstenite::Error> { let text = serde_json::to_string(value).unwrap_or_default(); let mut w = writer.lock().await; - let _ = w.send(Message::Text(text.into())).await; + w.send(Message::Text(text.into())).await +} + +pub async fn send_message(writer: &Writer, value: &serde_json::Value) { + let _ = try_send_message(writer, value).await; } /// Accept a TLS connection and upgrade it to WebSocket. @@ -139,6 +228,14 @@ pub async fn handle_tls_connection( let mut auth_attempts = 0; let max_attempts = 3; let mut authenticated = false; + // Captured after successful auth and used to construct the per-request + // `ConnectionCtx`. All stay `None` until auth succeeds. + let mut auth_participant_id: Option = None; + let mut auth_display_name: Option = None; + let mut auth_share_id: Option = None; + let mut auth_allowed_workspaces: Option> = None; + let mut auth_collaborative: bool = false; + let mut auth_consensus_required: bool = false; while auth_attempts < max_attempts { let msg = match read.next().await { @@ -183,14 +280,32 @@ pub async fn handle_tls_connection( // Try session token first, then pairing token. if let Some(session_token) = params.get("session_token").and_then(|t| t.as_str()) { let mut cfg = config.lock().await; - if cfg.validate_session(session_token) { + if let Some(resolved) = cfg.validate_session(session_token) { let _ = cfg.save(&config_path); let server_name = cfg.server.name.clone(); drop(cfg); + let participant_id = participant_id_for_token(session_token); + auth_participant_id = Some(participant_id.clone()); + auth_display_name = Some(resolved.session.name.clone()); + auth_share_id = Some(resolved.share_id.clone()); + auth_allowed_workspaces = Some(resolved.allowed_workspace_ids.clone()); + auth_collaborative = resolved.collaborative; + auth_consensus_required = resolved.consensus_required; + + // `participant_id` lets the client label its own messages + // ("You" vs "Alice") in collaborative sessions. Hashed from + // the session token, so it leaks nothing the client doesn't + // already hold. let resp = serde_json::json!({ "id": id, - "result": {"server_name": server_name} + "result": { + "server_name": server_name, + "participant_id": participant_id, + "allowed_workspace_ids": resolved.allowed_workspace_ids, + "collaborative": resolved.collaborative, + "consensus_required": resolved.consensus_required, + } }); send_message(&writer, &resp).await; authenticated = true; @@ -206,16 +321,29 @@ pub async fn handle_tls_connection( .unwrap_or("Unknown client"); let mut cfg = config.lock().await; - if let Some(session_token) = cfg.pair(pairing_token, client_name) { + if let Some(resolved) = cfg.pair(pairing_token, client_name) { let _ = cfg.save(&config_path); let server_name = cfg.server.name.clone(); drop(cfg); + let session_token = resolved.session.token.clone(); + let participant_id = participant_id_for_token(&session_token); + auth_participant_id = Some(participant_id.clone()); + auth_display_name = Some(client_name.to_string()); + auth_share_id = Some(resolved.share_id.clone()); + auth_allowed_workspaces = Some(resolved.allowed_workspace_ids.clone()); + auth_collaborative = resolved.collaborative; + auth_consensus_required = resolved.consensus_required; + let resp = serde_json::json!({ "id": id, "result": { "session_token": session_token, - "server_name": server_name + "server_name": server_name, + "participant_id": participant_id, + "allowed_workspace_ids": resolved.allowed_workspace_ids, + "collaborative": resolved.collaborative, + "consensus_required": resolved.consensus_required, } }); send_message(&writer, &resp).await; @@ -246,7 +374,78 @@ pub async fn handle_tls_connection( return; } - println!("[ws] Authenticated connection from {addr}"); + // SAFETY: every successful auth path above populates all four fields + // before `authenticated = true`; the `if !authenticated { return; }` + // gate ensures we only reach this point after one of those branches ran. + let ctx = ConnectionCtx::from_session( + auth_participant_id.expect("participant id set on successful auth"), + auth_display_name.expect("display name set on successful auth"), + auth_share_id.expect("share id set on successful auth"), + auth_allowed_workspaces.expect("allowed workspaces set on successful auth"), + auth_collaborative, + auth_consensus_required, + ); + + println!( + "[ws] Authenticated connection from {addr} as {} ({})", + ctx.display_name, + ctx.participant_id.as_str() + ); + + // Spawn the per-connection workspace event forwarder when the host + // has wired up a bus. Each event is filtered against this connection's + // allowed workspaces before forwarding, so a remote never learns about + // workspaces outside its share scope. The forwarder ends naturally when + // the broadcast channel closes (host shutdown) or when the writer's + // owning connection drops (the spawned task's `send_message` becomes + // a no-op once the WS has been closed). + let workspace_events_task = state.workspace_events.as_ref().map(|bus| { + let mut rx = bus.subscribe(); + let writer_for_events = std::sync::Arc::clone(&writer); + let share_id = ctx.share_id.clone(); + let config = Arc::clone(&state.config); + tokio::spawn(async move { + loop { + match rx.recv().await { + Ok(evt) => { + let share_allows = { + let cfg = config.lock().await; + cfg.shares + .iter() + .find(|share| share.id == share_id) + .map(|share| { + share + .allowed_workspace_ids + .iter() + .any(|id| id == evt.workspace_id()) + }) + }; + match share_allows { + Some(true) => {} + Some(false) => continue, + None => break, + }; + // The frontend listens for top-level events of the + // form `{event, payload}`, mirroring the room-event + // shape (see `ServerEvent` decoding in + // src-tauri/src/transport/ws.rs). + let msg = serde_json::json!({ + "event": "workspace-lifecycle", + "payload": evt, + }); + send_message(&writer_for_events, &msg).await; + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => { + // Slow subscriber. The workspace list is reloaded + // on the next `load_initial_data`, so dropping a + // batch is recoverable — just keep going. + continue; + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + } + } + }) + }); // Phase 2: Command loop. while let Some(msg_result) = read.next().await { @@ -280,13 +479,27 @@ pub async fn handle_tls_connection( } }; - let state = std::sync::Arc::clone(&state); + let state_for_dispatch = std::sync::Arc::clone(&state); let writer = std::sync::Arc::clone(&writer); + let ctx_for_dispatch = ctx.clone(); tokio::spawn(async move { - let response = handler::handle_request(&state, &writer, &request).await; + let response = + handler::handle_request(&state_for_dispatch, &writer, &ctx_for_dispatch, &request) + .await; send_message(&writer, &response).await; }); } + // Connection ended — drop the participant from any rooms they were in + // so other participants see them leave promptly. Without this, a + // ghosted voter could hold up plan consensus for the rest of the room. + crate::collab::drop_all_joined_sessions(&state, &ctx).await; + // Cancel the workspace-event forwarder so it doesn't outlive the + // connection. The task would also exit on its own when the broadcast + // channel closes (host shutdown), but on a per-connection drop we + // want the resource freed immediately. + if let Some(handle) = workspace_events_task { + handle.abort(); + } println!("[ws] Disconnected: {addr}"); } diff --git a/src-server/tests/env_provider_integration.rs b/src-server/tests/env_provider_integration.rs index d0e1401cb..aa134ed88 100644 --- a/src-server/tests/env_provider_integration.rs +++ b/src-server/tests/env_provider_integration.rs @@ -15,9 +15,26 @@ use std::sync::Arc; use claudette::model::{AgentStatus, Repository, Workspace, WorkspaceStatus}; use claudette::plugin_runtime::PluginRegistry; +use claudette_server::auth::{ServerConfig, ServerSection}; use claudette_server::handler::resolve_workspace_env; use claudette_server::ws::ServerState; +/// Build a minimal `ServerConfig` for tests. The constructors now require +/// a config (so the runtime revocation check has something to consult); +/// these tests don't exercise auth, so an empty-shares config suffices. +fn test_config() -> ServerConfig { + ServerConfig { + server: ServerSection { + name: "test".into(), + port: 0, + bind: "127.0.0.1".into(), + }, + auth: None, + shares: Vec::new(), + sessions: Vec::new(), + } +} + /// Build a synthetic env-provider plugin in `plugin_dir/env-fixture` that /// detects whenever `.envrc` exists in the worktree and exports /// `FOO=bar`. Avoids the real `direnv` dependency so the test runs on @@ -117,6 +134,7 @@ async fn setup_state_with_envrc() -> ( db_path, PathBuf::from(worktree.path()), plugins, + test_config(), )); let repo = make_repo(&worktree.path().to_string_lossy()); @@ -192,7 +210,11 @@ async fn server_without_plugin_registry_returns_empty_env() { let _ = claudette::db::Database::open(&db_path).unwrap(); let worktree = tempfile::tempdir().unwrap(); - let state = Arc::new(ServerState::new(db_path, PathBuf::from(worktree.path()))); + let state = Arc::new(ServerState::new( + db_path, + PathBuf::from(worktree.path()), + test_config(), + )); let repo = make_repo(&worktree.path().to_string_lossy()); let ws = make_workspace(&repo.id, &worktree.path().to_string_lossy()); @@ -238,6 +260,7 @@ async fn server_skips_disabled_provider_per_repo() { db_path, PathBuf::from(worktree.path()), plugins, + test_config(), )); let repo = make_repo(&worktree.path().to_string_lossy()); diff --git a/src-server/tests/workspace_archive_propagation.rs b/src-server/tests/workspace_archive_propagation.rs new file mode 100644 index 000000000..01f5323cb --- /dev/null +++ b/src-server/tests/workspace_archive_propagation.rs @@ -0,0 +1,149 @@ +//! Regression coverage for the bug where archiving a host workspace +//! left a "ghost" entry on every connected remote claudette. +//! +//! Two surfaces matter: +//! 1. The pull-side filter — `list_workspaces` / `load_initial_data` +//! must exclude `WorkspaceStatus::Archived`, so reconnects don't +//! re-surface the archived workspace. +//! 2. The push-side fanout — archiving must publish a +//! `WorkspaceEvent::Archived` to the shared `WorkspaceEventBus` so +//! currently-connected remotes drop the workspace immediately, +//! regardless of which (if any) chat session they have open. +//! +//! Spinning up the whole TLS+WS stack here would be overkill — we +//! exercise the underlying primitives that the handler delegates to. +//! Together with the WS forwarder in `ws.rs` (which is a thin +//! `subscribe → filter → forward` loop) this covers the bug end-to-end. + +use std::sync::Arc; + +use claudette::db::Database; +use claudette::model::{AgentStatus, Repository, Workspace, WorkspaceStatus}; +use claudette::workspace_events::{WorkspaceEvent, WorkspaceEventBus}; + +fn make_repo(id: &str) -> Repository { + Repository { + id: id.into(), + path: format!("/tmp/{id}"), + name: format!("repo-{id}"), + path_slug: format!("repo-{id}"), + icon: None, + created_at: "2026-01-01 00:00:00".into(), + setup_script: None, + custom_instructions: None, + sort_order: 0, + branch_rename_preferences: None, + setup_script_auto_run: false, + base_branch: None, + default_remote: None, + path_valid: true, + } +} + +fn make_workspace(id: &str, repo_id: &str, status: WorkspaceStatus) -> Workspace { + Workspace { + id: id.into(), + repository_id: repo_id.into(), + name: format!("ws-{id}"), + branch_name: "main".into(), + worktree_path: Some(format!("/tmp/{id}-wt")), + status, + agent_status: AgentStatus::Idle, + status_line: String::new(), + sort_order: 0, + created_at: "2026-01-01 00:00:00".into(), + } +} + +/// The handler's `list_workspaces` / `load_initial_data` arms apply +/// `status == Active` after the access-scope check. This test pins +/// down the underlying behavior: `db.list_workspaces()` returns both +/// active and archived rows, and the filter the handler uses cleanly +/// separates them. If `list_workspaces` ever silently changes semantics +/// (e.g. starts excluding archived rows internally) this test fails +/// loud, prompting a re-review of the handler-side filter. +#[test] +fn list_workspaces_returns_active_and_archived_until_filtered() { + let dir = tempfile::tempdir().unwrap(); + let db_path = dir.path().join("test.db"); + let db = Database::open(&db_path).unwrap(); + + db.insert_repository(&make_repo("r1")).unwrap(); + db.insert_workspace(&make_workspace("active-1", "r1", WorkspaceStatus::Active)) + .unwrap(); + db.insert_workspace(&make_workspace("active-2", "r1", WorkspaceStatus::Active)) + .unwrap(); + db.insert_workspace(&make_workspace( + "archived-1", + "r1", + WorkspaceStatus::Archived, + )) + .unwrap(); + + let all = db.list_workspaces().unwrap(); + assert_eq!( + all.len(), + 3, + "DB layer must surface every workspace regardless of status \ + — the handler is the layer responsible for filtering" + ); + + // Same predicate the handler applies. + let visible_to_remote: Vec<_> = all + .into_iter() + .filter(|w| w.status == WorkspaceStatus::Active) + .collect(); + let visible_ids: Vec<_> = visible_to_remote.iter().map(|w| w.id.as_str()).collect(); + assert_eq!(visible_ids, vec!["active-1", "active-2"]); +} + +/// After the host archives a workspace, the bus must fan the event out +/// to every subscriber. The WS connection forwarder in `ws.rs` is one +/// such subscriber — it filters by the connection's allowed-workspaces +/// scope and writes the event to the socket. This test pins down the +/// publish/subscribe contract that whole pipeline depends on. +#[tokio::test] +async fn workspace_event_bus_fans_archive_to_all_subscribers() { + let bus = Arc::new(WorkspaceEventBus::new()); + + // Two subscribers stand in for two connected remotes. + let mut rx_a = bus.subscribe(); + let mut rx_b = bus.subscribe(); + + bus.publish(WorkspaceEvent::Archived { + workspace_id: "ws-42".into(), + }); + + let evt_a = tokio::time::timeout(std::time::Duration::from_secs(1), rx_a.recv()) + .await + .expect("subscriber A must receive the event before the timeout") + .expect("subscriber A must not see a Lagged/Closed error"); + let evt_b = tokio::time::timeout(std::time::Duration::from_secs(1), rx_b.recv()) + .await + .expect("subscriber B must receive the event before the timeout") + .expect("subscriber B must not see a Lagged/Closed error"); + + assert_eq!(evt_a.workspace_id(), "ws-42"); + assert_eq!(evt_b.workspace_id(), "ws-42"); +} + +/// Late-attaching subscribers don't observe past events — a deliberate +/// property of the broadcast channel (mirrors what a freshly-connected +/// remote sees: it relies on `load_initial_data` for the snapshot, not +/// on the bus). Pinning this down so we don't accidentally switch to a +/// replay channel without re-thinking the snapshot path. +#[tokio::test] +async fn workspace_event_bus_does_not_replay_for_late_subscribers() { + let bus = WorkspaceEventBus::new(); + bus.publish(WorkspaceEvent::Archived { + workspace_id: "ws-old".into(), + }); + + let mut rx = bus.subscribe(); + // Nothing buffered for a late subscriber. `try_recv` returns Empty + // immediately rather than yielding the past event. + assert!(matches!( + rx.try_recv(), + Err(tokio::sync::broadcast::error::TryRecvError::Empty) + )); +} diff --git a/src-tauri/src/commands/chat/checkpoint.rs b/src-tauri/src/commands/chat/checkpoint.rs index 4699e4de4..43e409fe6 100644 --- a/src-tauri/src/commands/chat/checkpoint.rs +++ b/src-tauri/src/commands/chat/checkpoint.rs @@ -104,9 +104,23 @@ pub async fn rollback_to_checkpoint( db.clear_chat_session_state(&chat_session_id) .map_err(|e| e.to_string())?; - // Return the truncated message list for this session. - db.list_chat_messages_for_session(&chat_session_id) - .map_err(|e| e.to_string()) + // Return the truncated message list for this session and broadcast the + // replacement to any remote participants watching the same room. + let messages = db + .list_chat_messages_for_session(&chat_session_id) + .map_err(|e| e.to_string())?; + if let Some(room) = state.rooms.get(&chat_session_id).await { + room.publish(serde_json::json!({ + "event": "session-history-replaced", + "payload": { + "workspace_id": workspace_id, + "chat_session_id": chat_session_id, + "checkpoint_id": checkpoint_id, + "messages": messages.clone(), + }, + })); + } + Ok(messages) } /// Clear the entire conversation for a workspace, optionally restoring files @@ -184,9 +198,23 @@ pub async fn clear_conversation( db.clear_chat_session_state(&chat_session_id) .map_err(|e| e.to_string())?; - // Return empty list. - db.list_chat_messages_for_session(&chat_session_id) - .map_err(|e| e.to_string()) + // Return empty list and broadcast the replacement to any remote + // participants watching the same room. + let messages = db + .list_chat_messages_for_session(&chat_session_id) + .map_err(|e| e.to_string())?; + if let Some(room) = state.rooms.get(&chat_session_id).await { + room.publish(serde_json::json!({ + "event": "session-history-replaced", + "payload": { + "workspace_id": workspace_id, + "chat_session_id": chat_session_id, + "checkpoint_id": serde_json::Value::Null, + "messages": messages.clone(), + }, + })); + } + Ok(messages) } #[tauri::command] diff --git a/src-tauri/src/commands/chat/interaction.rs b/src-tauri/src/commands/chat/interaction.rs index a9a178ca6..f90117e50 100644 --- a/src-tauri/src/commands/chat/interaction.rs +++ b/src-tauri/src/commands/chat/interaction.rs @@ -41,12 +41,64 @@ pub async fn submit_agent_answer( annotations: Option, state: State<'_, AppState>, ) -> Result<(), String> { + record_agent_answer( + &state, + &session_id, + &tool_use_id, + &claudette::room::ParticipantId::host(), + answers, + annotations, + true, + ) + .await +} + +pub async fn record_agent_answer( + state: &AppState, + session_id: &str, + tool_use_id: &str, + participant: &claudette::room::ParticipantId, + answers: std::collections::HashMap, + annotations: Option, + broadcast_cast: bool, +) -> Result<(), String> { + use std::sync::Arc; + + enum QuestionOutcome { + WaitingForMore, + Finalized { + request_id: String, + original_input: serde_json::Value, + pending: Box, + annotations: Option, + ps: Arc, + }, + } + // Validate everything BEFORE removing the pending entry: if the session // has been torn down or the entry maps to the wrong tool, the entry must // stay so the user (or the correct submit_* command) can still see it. - let (pending, ps) = { + let live_required_voters = if let Some(room) = state.rooms.get(session_id).await + && *room.consensus_required.read().await + { + let participants = room.participants.read().await; + let voters = participants + .values() + .filter(|p| !p.muted) + .map(|p| p.id.clone()) + .collect::>(); + if voters.is_empty() { + None + } else { + Some(voters) + } + } else { + None + }; + + let outcome = { let mut agents = state.agents.write().await; - let session = agents.get_mut(&session_id).ok_or("Session not found")?; + let session = agents.get_mut(session_id).ok_or("Session not found")?; // 1. Persistent session must be alive — otherwise nobody is reading // stdin and the response would be discarded. let ps = session @@ -54,7 +106,7 @@ pub async fn submit_agent_answer( .clone() .ok_or("Agent session is not active")?; // 2. Tool kind must match — peek by reference. - match session.pending_permissions.get(&tool_use_id) { + match session.pending_permissions.get(tool_use_id) { None => { let pending_ids: Vec = session.pending_permissions.keys().cloned().collect(); @@ -70,17 +122,82 @@ pub async fn submit_agent_answer( } _ => {} } - // 3. All checks passed — now it is safe to remove. - let pending = session + let pending_mut = session .pending_permissions - .remove(&tool_use_id) + .get_mut(tool_use_id) .expect("checked above"); - session.reset_attention(); - (pending, ps) + if pending_mut.required_voters.is_empty() + && let Some(required_voters) = live_required_voters + { + pending_mut.required_voters = required_voters; + } + pending_mut.question_votes.insert( + participant.clone(), + claudette::room::QuestionVote { + answers: answers.clone(), + }, + ); + + if !pending_mut.required_voters.is_empty() + && !pending_mut + .required_voters + .iter() + .all(|voter| pending_mut.question_votes.contains_key(voter)) + { + QuestionOutcome::WaitingForMore + } else { + let pending = session + .pending_permissions + .remove(tool_use_id) + .expect("checked above"); + session.reset_attention(); + QuestionOutcome::Finalized { + request_id: pending.request_id.clone(), + original_input: pending.original_input.clone(), + pending: Box::new(pending), + annotations, + ps, + } + } }; + if let Some(room) = state.rooms.get(session_id).await { + if broadcast_cast { + room.publish(serde_json::json!({ + "event": "agent-question-answer-cast", + "payload": { + "chat_session_id": session_id, + "tool_use_id": tool_use_id, + "participant_id": participant.as_str(), + "answers": &answers, + }, + })); + } + if matches!(outcome, QuestionOutcome::Finalized { .. }) { + room.publish(serde_json::json!({ + "event": "agent-question-resolved", + "payload": { + "chat_session_id": session_id, + "tool_use_id": tool_use_id, + }, + })); + } + } + + let QuestionOutcome::Finalized { + request_id, + original_input, + pending, + annotations, + ps, + } = outcome + else { + return Ok(()); + }; + let answers = aggregate_question_answers(state, session_id, pending.as_ref()).await; + // Layer answers (and annotations, if any) onto the original input. - let mut updated_input = pending.original_input.clone(); + let mut updated_input = original_input; if !updated_input.is_object() { updated_input = serde_json::Value::Object(serde_json::Map::new()); } @@ -97,14 +214,81 @@ pub async fn submit_agent_answer( "behavior": "allow", "updatedInput": updated_input, }); - ps.send_control_response(&pending.request_id, response) - .await + ps.send_control_response(&request_id, response).await +} + +async fn aggregate_question_answers( + state: &AppState, + session_id: &str, + pending: &crate::state::PendingPermission, +) -> std::collections::HashMap { + if pending.question_votes.len() <= 1 { + return pending + .question_votes + .values() + .next() + .map(|vote| vote.answers.clone()) + .unwrap_or_default(); + } + + let participants = if let Some(room) = state.rooms.get(session_id).await { + room.participant_list() + .await + .into_iter() + .map(|p| (p.id, p.display_name)) + .collect::>() + } else { + std::collections::HashMap::new() + }; + + let mut by_question: std::collections::BTreeMap> = + std::collections::BTreeMap::new(); + for (participant, vote) in &pending.question_votes { + let display = participants + .get(participant) + .cloned() + .unwrap_or_else(|| participant.as_str().to_string()); + for (question, answer) in &vote.answers { + by_question + .entry(question.clone()) + .or_default() + .push((display.clone(), answer.clone())); + } + } + + by_question + .into_iter() + .map(|(question, mut answers)| { + answers.sort_by(|a, b| a.0.cmp(&b.0)); + let unanimous = answers + .first() + .map(|(_, first)| answers.iter().all(|(_, answer)| answer == first)) + .unwrap_or(false); + let answer = if unanimous { + answers + .first() + .map(|(_, answer)| answer.clone()) + .unwrap_or_default() + } else { + answers + .into_iter() + .map(|(display, answer)| format!("{display}: {answer}")) + .collect::>() + .join("\n") + }; + (question, answer) + }) + .collect() } /// Resolve a pending ExitPlanMode `can_use_tool` request. /// `approved=true` → allow with the model's original input (the CLI's /// `call()` will save the plan and emit the real tool_result). /// `approved=false` → deny with the given reason (or a sensible default). +/// +/// In collaborative + consensus mode, this records the host's vote and may +/// finalize the outcome immediately (host veto: an approve forces approval, +/// a deny forces denial), or wait for remaining required voters. #[tauri::command] pub async fn submit_plan_approval( session_id: String, @@ -113,16 +297,62 @@ pub async fn submit_plan_approval( reason: Option, state: State<'_, AppState>, ) -> Result<(), String> { - // Same validate-before-remove pattern as submit_agent_answer — see that - // function for the rationale. - let (pending, ps) = { + record_plan_vote( + &state, + &session_id, + &tool_use_id, + &claudette::room::ParticipantId::host(), + true, // is_host + if approved { + claudette::room::Vote::Approve + } else { + claudette::room::Vote::Deny { + reason: reason.unwrap_or_else(|| "Plan denied. Please revise the approach.".into()), + } + }, + true, // broadcast cast — fresh host-originated vote + ) + .await +} + +/// Record one participant's vote on an open plan-consensus and finalize the +/// outcome if the unanimous-with-host-veto rule is now satisfied. Shared +/// between the local Tauri command and the host-side resolver task that +/// consumes `plan-vote-cast` events forwarded from remote participants. +/// +/// `broadcast_cast` controls whether this call broadcasts the +/// `plan-vote-cast` event itself. `true` for fresh host-originated votes; +/// `false` when called from the resolver task on an event that the remote +/// server already broadcast (preventing double-emission). +pub async fn record_plan_vote( + state: &AppState, + session_id: &str, + tool_use_id: &str, + participant: &claudette::room::ParticipantId, + is_host: bool, + vote: claudette::room::Vote, + broadcast_cast: bool, +) -> Result<(), String> { + use std::sync::Arc; + enum VoteOutcome { + WaitingForMore, + Finalized { + request_id: String, + response: serde_json::Value, + ps: Arc, + outcome_kind: &'static str, + outcome_reason: Option, + }, + } + + let outcome = { let mut agents = state.agents.write().await; - let session = agents.get_mut(&session_id).ok_or("Session not found")?; + let session = agents.get_mut(session_id).ok_or("Session not found")?; let ps = session .persistent_session .clone() .ok_or("Agent session is not active")?; - match session.pending_permissions.get(&tool_use_id) { + match session.pending_permissions.get(tool_use_id) { None => { let pending_ids: Vec = session.pending_permissions.keys().cloned().collect(); @@ -138,31 +368,303 @@ pub async fn submit_plan_approval( } _ => {} } - let pending = session + + let pending_mut = session .pending_permissions - .remove(&tool_use_id) + .get_mut(tool_use_id) .expect("checked above"); - session.reset_attention(); - (pending, ps) + if !is_host + && !pending_mut.required_voters.is_empty() + && !pending_mut.required_voters.contains(participant) + { + return Ok(()); + } + pending_mut.votes.insert(participant.clone(), vote.clone()); + + let resolution = resolve_consensus(pending_mut, participant, is_host, &vote); + + match resolution { + None => VoteOutcome::WaitingForMore, + Some(final_vote) => { + let pending = session + .pending_permissions + .remove(tool_use_id) + .expect("checked above"); + // Use the shared `reset_attention()` helper so this block stays + // in sync with the other places that clear pending-permission + // state (`submit_agent_answer`, `submit_plan_approval`, the + // session-lifecycle teardown). Inline triple-assign would + // drift if the helper grew new fields. + session.reset_attention(); + let (response, kind, reason) = match &final_vote { + claudette::room::Vote::Approve => ( + serde_json::json!({ + "behavior": "allow", + "updatedInput": &pending.original_input, + }), + "approve", + None, + ), + claudette::room::Vote::Deny { reason } => { + let message = format!( + "{reason}\n\nRevise the plan to address this feedback, then call ExitPlanMode again to present the updated plan for approval. Do not begin implementation until the user approves the revised plan." + ); + ( + serde_json::json!({ + "behavior": "deny", + "message": message, + }), + "deny", + Some(reason.clone()), + ) + } + }; + VoteOutcome::Finalized { + request_id: pending.request_id, + response, + ps, + outcome_kind: kind, + outcome_reason: reason, + } + } + } }; - let response = if approved { - serde_json::json!({ - "behavior": "allow", - "updatedInput": pending.original_input, - }) + let finalized = matches!(outcome, VoteOutcome::Finalized { .. }); + if let Some(room) = state.rooms.get(session_id).await { + if finalized { + *room.pending_vote.write().await = None; + } else { + let mut pending_vote = room.pending_vote.write().await; + if let Some(pending_vote) = pending_vote.as_mut() + && pending_vote.tool_use_id == tool_use_id + { + pending_vote.votes.insert(participant.clone(), vote.clone()); + } + } + } + + if broadcast_cast && let Some(room) = state.rooms.get(session_id).await { + room.publish(serde_json::json!({ + "event": "plan-vote-cast", + "payload": { + "chat_session_id": session_id, + "tool_use_id": tool_use_id, + "participant_id": participant.as_str(), + "vote": &vote, + }, + })); + } + + match outcome { + VoteOutcome::WaitingForMore => Ok(()), + VoteOutcome::Finalized { + request_id, + response, + ps, + outcome_kind, + outcome_reason, + } => { + if let Some(room) = state.rooms.get(session_id).await { + room.publish(serde_json::json!({ + "event": "plan-vote-resolved", + "payload": { + "chat_session_id": session_id, + "tool_use_id": tool_use_id, + "outcome": outcome_kind, + "reason": outcome_reason, + }, + })); + } + ps.send_control_response(&request_id, response).await + } + } +} + +/// Pure resolution rule: given the current pending state and the just-cast +/// vote, return `Some(final_vote)` if the round resolves now, or `None` if +/// it still needs more input. Extracted as a free function for unit tests. +/// +/// Rules: host vote is decisive (host veto). Non-host: any deny short-circuits +/// to deny with that user's critique; approve resolves only when every required +/// voter has voted approve. Empty `required_voters` (non-consensus path) +/// always resolves to the submitted vote. +fn resolve_consensus( + pending: &crate::state::PendingPermission, + voter: &claudette::room::ParticipantId, + voter_is_host: bool, + just_cast: &claudette::room::Vote, +) -> Option { + if pending.required_voters.is_empty() { + return Some(just_cast.clone()); + } + if voter_is_host { + return Some(just_cast.clone()); + } + if !pending.required_voters.contains(voter) { + return None; + } + evaluate_resolved_state(pending) +} + +/// State-only resolution rule used when re-evaluating an open vote after +/// the participant set changes (no fresh "just cast"). Same shape as the +/// non-host branch of [`resolve_consensus`]: a non-host deny short- +/// circuits, otherwise unanimous approve resolves, otherwise wait. +/// +/// Pulled out so the participant-pruning path can share the rule with +/// `resolve_consensus`. +fn evaluate_resolved_state( + pending: &crate::state::PendingPermission, +) -> Option { + if let Some((_, vote)) = pending + .votes + .iter() + .find(|(pid, v)| !pid.is_host() && pending.required_voters.contains(pid) && v.is_deny()) + { + return Some(vote.clone()); + } + let all_approved = pending.required_voters.iter().all(|pid| { + pending + .votes + .get(pid) + .map(|v| matches!(v, claudette::room::Vote::Approve)) + .unwrap_or(false) + }); + if all_approved { + Some(claudette::room::Vote::Approve) } else { - let feedback = reason.unwrap_or_else(|| "Plan denied. Please revise the approach.".into()); - let message = format!( - "{feedback}\n\nRevise the plan to address this feedback, then call ExitPlanMode again to present the updated plan for approval. Do not begin implementation until the user approves the revised plan." - ); - serde_json::json!({ - "behavior": "deny", - "message": message, - }) - }; - ps.send_control_response(&pending.request_id, response) - .await + None + } +} + +/// Re-evaluate every open consensus vote on `session_id` after the room's +/// participant set changed (someone joined, left, or was kicked/muted). For +/// each pending plan-permission with a non-empty `required_voters`: +/// +/// 1. Drop any required voter who is no longer in `current_participants`, +/// along with their cast vote (treat the absentee as an *implicit +/// abstain* — neither approve nor deny). The host is exempt from +/// pruning since the host is always implicitly present from the +/// Tauri side. +/// 2. Re-evaluate via [`evaluate_resolved_state`]. If the round now +/// resolves (remaining required voters all approved, or a non-host +/// deny is still in the pruned vote set), finalize: send the +/// `control_response` and broadcast `plan-vote-resolved`. +/// +/// "Implicit abstain" is the conservative default: leaving the vote +/// open until one of the remaining voters acts, rather than auto- +/// approving or auto-denying on a participant's behalf. Without this +/// pruning a single disconnect could deadlock the agent indefinitely. +pub async fn prune_consensus_voters_for_session( + state: &AppState, + session_id: &str, + current_participants: &std::collections::HashSet, +) -> Result<(), String> { + use std::sync::Arc; + + struct Finalize { + request_id: String, + tool_use_id: String, + ps: Arc, + final_vote: claudette::room::Vote, + original_input: serde_json::Value, + } + + // Pass 1: prune absentees and collect any pending entries that have + // newly resolved as a result. + let mut finalize_list: Vec = Vec::new(); + { + let mut agents = state.agents.write().await; + let Some(session) = agents.get_mut(session_id) else { + return Ok(()); + }; + let Some(ps) = session.persistent_session.clone() else { + return Ok(()); + }; + let tool_use_ids: Vec = session.pending_permissions.keys().cloned().collect(); + let mut any_finalized = false; + for tool_use_id in tool_use_ids { + let Some(pending_mut) = session.pending_permissions.get_mut(&tool_use_id) else { + continue; + }; + // Only consensus-required entries care about participant changes. + if pending_mut.required_voters.is_empty() { + continue; + } + pending_mut + .required_voters + .retain(|pid| pid.is_host() || current_participants.contains(pid)); + pending_mut + .votes + .retain(|pid, _| pid.is_host() || current_participants.contains(pid)); + + if let Some(final_vote) = evaluate_resolved_state(pending_mut) { + let pending = session + .pending_permissions + .remove(&tool_use_id) + .expect("checked above"); + finalize_list.push(Finalize { + request_id: pending.request_id, + tool_use_id, + ps: ps.clone(), + final_vote, + original_input: pending.original_input, + }); + any_finalized = true; + } + } + if any_finalized { + session.reset_attention(); + } + } + + // Pass 2: deliver control responses and broadcast outcomes. Done + // outside the agents write lock so the broadcast doesn't block other + // RPCs and the control-response send can do its own awaits. + for f in finalize_list { + let (response, kind, reason) = match &f.final_vote { + claudette::room::Vote::Approve => ( + serde_json::json!({ + "behavior": "allow", + "updatedInput": &f.original_input, + }), + "approve", + None, + ), + claudette::room::Vote::Deny { reason } => { + let message = format!( + "{reason}\n\nRevise the plan to address this feedback, then call ExitPlanMode again to present the updated plan for approval. Do not begin implementation until the user approves the revised plan." + ); + ( + serde_json::json!({ + "behavior": "deny", + "message": message, + }), + "deny", + Some(reason.clone()), + ) + } + }; + if let Some(room) = state.rooms.get(session_id).await { + room.publish(serde_json::json!({ + "event": "plan-vote-resolved", + "payload": { + "chat_session_id": session_id, + "tool_use_id": f.tool_use_id, + "outcome": kind, + "reason": reason, + }, + })); + } + if let Err(e) = f.ps.send_control_response(&f.request_id, response).await { + eprintln!( + "[collab] prune resolver: send_control_response failed for {}: {e}", + f.tool_use_id + ); + } + } + Ok(()) } /// Synchronously drain any pending permission requests from `session` and @@ -211,3 +713,66 @@ pub(crate) async fn deny_drained_permissions( } } } + +#[cfg(test)] +mod tests { + use std::collections::{HashMap, HashSet}; + + use super::{evaluate_resolved_state, resolve_consensus}; + use crate::state::PendingPermission; + + fn pid(id: &str) -> claudette::room::ParticipantId { + claudette::room::ParticipantId(id.to_string()) + } + + fn pending(required: &[&str]) -> PendingPermission { + PendingPermission { + request_id: "request-1".to_string(), + tool_name: "ExitPlanMode".to_string(), + original_input: serde_json::json!({}), + required_voters: required.iter().map(|id| pid(id)).collect::>(), + votes: HashMap::new(), + question_votes: HashMap::new(), + } + } + + #[test] + fn consensus_ignores_non_required_voter_denies() { + let mut pending = pending(&["alice"]); + pending.votes.insert( + pid("observer"), + claudette::room::Vote::Deny { + reason: "nope".to_string(), + }, + ); + + assert_eq!(evaluate_resolved_state(&pending), None); + assert_eq!( + resolve_consensus( + &pending, + &pid("observer"), + false, + &claudette::room::Vote::Deny { + reason: "nope".to_string(), + }, + ), + None, + ); + } + + #[test] + fn consensus_resolves_when_required_voters_approve() { + let mut pending = pending(&["alice", "bob"]); + pending + .votes + .insert(pid("alice"), claudette::room::Vote::Approve); + pending + .votes + .insert(pid("bob"), claudette::room::Vote::Approve); + + assert_eq!( + evaluate_resolved_state(&pending), + Some(claudette::room::Vote::Approve), + ); + } +} diff --git a/src-tauri/src/commands/chat/lifecycle.rs b/src-tauri/src/commands/chat/lifecycle.rs index e4ec33035..7e7ce7454 100644 --- a/src-tauri/src/commands/chat/lifecycle.rs +++ b/src-tauri/src/commands/chat/lifecycle.rs @@ -98,6 +98,8 @@ pub async fn stop_agent( output_tokens: None, cache_read_tokens: None, cache_creation_tokens: None, + author_participant_id: None, + author_display_name: None, }; db.insert_chat_message(&msg).map_err(|e| e.to_string())?; diff --git a/src-tauri/src/commands/chat/mod.rs b/src-tauri/src/commands/chat/mod.rs index 30593accf..c9d5da76a 100644 --- a/src-tauri/src/commands/chat/mod.rs +++ b/src-tauri/src/commands/chat/mod.rs @@ -6,6 +6,11 @@ mod naming; pub mod send; pub mod session; +// Re-export the consensus resolver so the host-side resolver task in +// `commands::remote` can call into it through the canonical +// `crate::commands::chat` path. +pub use interaction::{prune_consensus_voters_for_session, record_agent_answer, record_plan_vote}; + use std::sync::Arc; use serde::{Deserialize, Serialize}; @@ -16,9 +21,14 @@ use claudette::env::WorkspaceEnv; use claudette::git; use crate::agent_mcp_sink::ChatBridgeSink; -use claudette::agent::AgentEvent; use claudette::agent_mcp::bridge::{BridgeHandle, McpBridgeSession}; +// Re-export the shared struct under the existing path so call sites in this +// crate keep working unchanged. The canonical home is `claudette::chat` — +// see that module's docstring for why both bridges must serialize the same +// struct. +pub(crate) use claudette::chat::AgentStreamPayload; + /// Frontend-facing input for a file attachment (base64-encoded). #[derive(Clone, Deserialize)] pub struct AttachmentInput { @@ -60,13 +70,6 @@ pub struct ChatHistoryPage { pub total_count: i64, } -#[derive(Clone, Serialize)] -pub(crate) struct AgentStreamPayload { - pub workspace_id: String, - pub chat_session_id: String, - pub event: AgentEvent, -} - /// How long to wait between emitting `agent-permission-prompt` and firing the /// attention system notification. This is the window in which the webview /// picks up the event, runs the Zustand setter, and paints the question/plan diff --git a/src-tauri/src/commands/chat/send.rs b/src-tauri/src/commands/chat/send.rs index 5ebff419d..ead5310a3 100644 --- a/src-tauri/src/commands/chat/send.rs +++ b/src-tauri/src/commands/chat/send.rs @@ -194,6 +194,7 @@ fn should_defer_persistent_restart_for_state( has_persistent_session && has_running_background_tasks } +#[allow(clippy::too_many_arguments)] async fn apply_task_notification_status( app: &AppHandle, db_path: &std::path::Path, @@ -568,13 +569,11 @@ fn schedule_background_task_wake( duration_ms, .. }) = &event + && let Ok(db) = Database::open(&db_path) + && let (Some(cost), Some(dur)) = (total_cost_usd, duration_ms) + && let Some(ref msg_id) = last_assistant_msg_id { - if let Ok(db) = Database::open(&db_path) - && let (Some(cost), Some(dur)) = (total_cost_usd, duration_ms) - && let Some(ref msg_id) = last_assistant_msg_id - { - let _ = db.update_chat_message_cost(msg_id, *cost, *dur); - } + let _ = db.update_chat_message_cost(msg_id, *cost, *dur); } let is_done = matches!( @@ -819,6 +818,8 @@ fn prepare_user_send( message_id: Option, content: &str, attachments: Option<&[AttachmentInput]>, + author_participant_id: Option, + author_display_name: Option, ) -> Result { let user_msg = ChatMessage { id: message_id.unwrap_or_else(|| uuid::Uuid::new_v4().to_string()), @@ -834,6 +835,8 @@ fn prepare_user_send( output_tokens: None, cache_read_tokens: None, cache_creation_tokens: None, + author_participant_id, + author_display_name, }; let mut att_models: Vec = Vec::new(); @@ -986,12 +989,24 @@ pub async fn steer_queued_chat_message( .ok_or("No active persistent Claude session for this chat")? }; + let room_for_user_send = state.rooms.get(&chat_session_id).await; + if let Some(room) = &room_for_user_send { + crate::commands::remote::ensure_host_participant(&state, room).await; + } + let host_author = room_for_user_send.as_ref().map(|_| { + ( + claudette::room::ParticipantId::HOST.to_string(), + state.resolve_host_display_name(), + ) + }); let prepared_user_send = prepare_user_send( &workspace_id, &chat_session_id, message_id, &content, attachments.as_deref(), + host_author.as_ref().map(|(id, _)| id.clone()), + host_author.as_ref().map(|(_, name)| name.clone()), )?; let anchor_msg_id = db @@ -1038,6 +1053,12 @@ pub async fn steer_queued_chat_message( ); return Err(e); } + if let Some(room) = &room_for_user_send { + room.publish(serde_json::json!({ + "event": "chat-message-added", + "payload": &prepared_user_send.user_msg, + })); + } Ok(Some(pre_steer_checkpoint)) } @@ -1085,17 +1106,43 @@ pub async fn send_chat_message( // Save user message to DB. Use the frontend-provided ID so optimistic // UI state (attachments keyed by message ID) stays consistent. + let room_for_user_send = state.rooms.get(&chat_session_id).await; + if let Some(room) = &room_for_user_send { + crate::commands::remote::ensure_host_participant(&state, room).await; + } + let host_author = room_for_user_send.as_ref().map(|_| { + ( + claudette::room::ParticipantId::HOST.to_string(), + state.resolve_host_display_name(), + ) + }); let prepared_user_send = prepare_user_send( &workspace_id, &chat_session_id, message_id, &content, attachments.as_deref(), + host_author.as_ref().map(|(id, _)| id.clone()), + host_author.as_ref().map(|(_, name)| name.clone()), )?; persist_user_send(&db, &prepared_user_send)?; let user_msg = prepared_user_send.user_msg.clone(); let image_attachments = prepared_user_send.cli_atts; + // In collaborative mode, broadcast the user message so other participants + // render it live. Without this, only the agent's responses propagate + // (via agent-stream) — user prompts persist to DB but never reach + // other participants. Frontend dedupes via + // `author_participant_id === selfParticipantId` so the sender's own + // optimistic message isn't duplicated. Solo / 1:1 sessions skip this: + // the local UI already rendered the message optimistically. + if let Some(room) = &room_for_user_send { + room.publish(serde_json::json!({ + "event": "chat-message-added", + "payload": &user_msg, + })); + } + // Resolve allowed tools from permission level. let level = permission_level.as_deref().unwrap_or("full"); if !matches!(level, "readonly" | "standard" | "full") { @@ -1527,6 +1574,8 @@ pub async fn send_chat_message( output_tokens: None, cache_read_tokens: None, cache_creation_tokens: None, + author_participant_id: None, + author_display_name: None, }; if let Err(err) = db.insert_chat_message(&warning) { // Logging-only: a missing warning shouldn't block the turn. @@ -1827,6 +1876,59 @@ pub async fn send_chat_message( let wt_path = worktree_path.clone(); let user_msg_id = user_msg.id.clone(); let repo_id_for_mcp = ws.repository_id.clone(); + // Captured once: in collaborative mode, every event is published to this + // room (where the host's own UI subscribes alongside any joined remote + // clients). In solo mode this is `None` and the bridge keeps emitting + // directly via `app.emit("agent-stream", ...)`. + let room_for_stream = state.rooms.get(&chat_session_id).await; + // The host's own UI is just another subscriber of this room. The + // host-side mirror task that re-emits room events into the local + // webview is attached at room *creation* time via + // `RoomRegistry::set_on_create` (installed once in `main.rs::setup`), + // so by the time we get here the host is already listening — even + // if the room was first created by a remote `join_session` whose + // `participants-changed` publish would otherwise have raced ahead + // of any subscriber spawned on the bridge path. + // Acquire the turn lock just before spawning the bridge so we never + // leak the lock across an early `?` propagation. Hard reject — the + // composer in other clients greys out on `turn-started`, so by the + // time a competing send arrives we expect this almost always to fail + // only on genuine races (composer flicker, stale UI). + let host_participant = claudette::room::ParticipantId::host(); + if let Some(room) = &room_for_stream + && let Err(holder) = room.try_acquire_turn(&host_participant).await + { + return Err(format!("turn-locked-by:{}", holder.as_str())); + } + if let Some(room) = &room_for_stream { + // Use the configured collaboration display name so the + // turn-started broadcast matches the author chip stamped on + // chat messages and the roster entry for the host. Hardcoding + // "Host" here would surface inconsistently when the user has + // set a custom collab display name in settings. + let host_display_name = state.resolve_host_display_name(); + let started_at_ms = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_millis() as i64) + .unwrap_or(0); + *room.turn_started_at_ms.lock().await = Some(started_at_ms); + *room.turn_settings.write().await = Some(claudette::room::TurnSettingsSnapshot { + model: agent_settings.model.clone(), + plan_mode: agent_settings.plan_mode, + }); + room.publish(serde_json::json!({ + "event": "turn-started", + "payload": { + "workspace_id": &workspace_id, + "chat_session_id": &chat_session_id, + "participant_id": host_participant.as_str(), + "display_name": host_display_name, + "started_at_ms": started_at_ms, + "model": agent_settings.model.clone(), + "plan_mode": agent_settings.plan_mode, + }, + })); + } drop(ws_env); // consumed by rename_ws_env; notification path rebuilds from DB tokio::spawn(async move { if claimed_rename { @@ -1999,29 +2101,28 @@ pub async fn send_chat_message( &chat_session_id_for_stream, ) .is_some() + && let Ok(db) = Database::open(&db_path) { - if let Ok(db) = Database::open(&db_path) { - let _ = db.update_agent_shell_terminal_tab_status( + let _ = db.update_agent_shell_terminal_tab_status( + &chat_session_id_for_stream, + None, + if start.run_in_background { + "running" + } else { + "starting" + }, + command, + ); + if let Ok(Some(tab)) = + db.get_agent_shell_terminal_tab(&chat_session_id_for_stream) + { + emit_agent_background_task_event( + &app, + AgentBackgroundTaskEventKind::Starting, + &ws_id, &chat_session_id_for_stream, - None, - if start.run_in_background { - "running" - } else { - "starting" - }, - command, + tab, ); - if let Ok(Some(tab)) = - db.get_agent_shell_terminal_tab(&chat_session_id_for_stream) - { - emit_agent_background_task_event( - &app, - AgentBackgroundTaskEventKind::Starting, - &ws_id, - &chat_session_id_for_stream, - tab, - ); - } } } } @@ -2071,6 +2172,33 @@ pub async fn send_chat_message( { if matches!(tool_name.as_str(), "AskUserQuestion" | "ExitPlanMode") { let app_state = app.state::(); + + // Collab consensus snapshot: prompts in a room with + // `consensus_required = true` gate on all unmuted + // participants. Muted participants are excluded — they + // can't act on a vote, so they can't hold one up. Late + // joiners are observers only. + let (required_voters, vote_voters_payload) = + if matches!(tool_name.as_str(), "AskUserQuestion" | "ExitPlanMode") + && let Some(room) = + app_state.rooms.get(&chat_session_id_for_stream).await + && *room.consensus_required.read().await + { + let participants = room.participants.read().await; + let voters: std::collections::HashSet = + participants + .values() + .filter(|p| !p.muted) + .map(|p| p.id.clone()) + .collect(); + let payload: Vec<&claudette::room::ParticipantInfo> = + participants.values().filter(|p| !p.muted).collect(); + let json_voters = serde_json::to_value(&payload).unwrap_or_default(); + (voters, Some(json_voters)) + } else { + (std::collections::HashSet::new(), None) + }; + let mut agents = app_state.agents.write().await; if let Some(session) = agents.get_mut(&chat_session_id_for_stream) { session.pending_permissions.insert( @@ -2079,18 +2207,69 @@ pub async fn send_chat_message( request_id: request_id.clone(), tool_name: tool_name.clone(), original_input: input.clone(), + required_voters: required_voters.clone(), + votes: std::collections::HashMap::new(), + question_votes: std::collections::HashMap::new(), }, ); } drop(agents); + + // Snapshot any open plan approval in the room so reconnect + // and lag recovery can rebuild the card even when the + // prompt is not consensus-gated. + if tool_name == "ExitPlanMode" + && let Some(room) = app_state.rooms.get(&chat_session_id_for_stream).await + { + *room.pending_vote.write().await = Some(claudette::room::PendingVote::new( + tool_use_id.clone(), + required_voters.clone(), + input.clone(), + )); + // Broadcast the consensus vote opening so all + // subscribers render the progress rows. Non-consensus + // prompts still use the pending_vote snapshot above, + // but skip the consensus-progress event. + if let Some(voters) = vote_voters_payload.clone() { + room.publish(serde_json::json!({ + "event": "plan-vote-opened", + "payload": { + "chat_session_id": &chat_session_id_for_stream, + "tool_use_id": &tool_use_id, + "required_voters": voters, + }, + })); + } + } + let required_voters_payload = vote_voters_payload.clone(); let payload = serde_json::json!({ "workspace_id": &ws_id, "chat_session_id": &chat_session_id_for_stream, "tool_use_id": tool_use_id, "tool_name": tool_name, "input": input, + "required_voters": required_voters_payload, }); - let _ = app.emit("agent-permission-prompt", &payload); + // In a collaborative room, broadcast the permission + // prompt to every participant so the question / plan + // card renders on both ends. Without this only the + // prompter saw it — remotes saw the tool call appear + // in the activity feed but the actual interactive + // surface never reached them. + // + // Both ExitPlanMode and AskUserQuestion get the + // broadcast treatment. In consensus rooms, both hold the + // CLI control response until the required participants + // vote/answer. Solo / 1:1 sessions keep using the + // original host-local emit. + if let Some(room) = app_state.rooms.get(&chat_session_id_for_stream).await { + room.publish(serde_json::json!({ + "event": "agent-permission-prompt", + "payload": payload, + })); + } else { + let _ = app.emit("agent-permission-prompt", &payload); + } // Fire the system notification after the frontend has the // data it needs to render the card. We emit @@ -2260,6 +2439,8 @@ pub async fn send_chat_message( output_tokens: None, cache_read_tokens: None, cache_creation_tokens: None, + author_participant_id: None, + author_display_name: None, }; let _ = db.insert_chat_message(&msg); } @@ -2664,7 +2845,32 @@ pub async fn send_chat_message( chat_session_id: chat_session_id_for_stream.clone(), event, }; - let _ = app.emit("agent-stream", &payload); + // Collab path (room exists) → publish to the room as a JSON-RPC + // envelope. The host's own UI subscribes via `start_share` and + // translates each envelope back to `app.emit(event, payload)`; + // remote forwarders write the envelope to the WebSocket wire + // as-is. Single source of truth across host + remote subscribers. + match &room_for_stream { + Some(room) => room.publish(serde_json::json!({ + "event": "agent-stream", + "payload": &payload, + })), + None => { + let _ = app.emit("agent-stream", &payload); + } + } + } + // Stream is done — release the turn lock and tell other participants + // their composers can re-enable. Solo sessions (`None`) skip this. + if let Some(room) = &room_for_stream { + room.release_turn().await; + room.publish(serde_json::json!({ + "event": "turn-ended", + "payload": { + "workspace_id": &ws_id, + "chat_session_id": &chat_session_id_for_stream, + }, + })); } }); diff --git a/src-tauri/src/commands/mod.rs b/src-tauri/src/commands/mod.rs index 9e0377dc9..78c9c5f2e 100644 --- a/src-tauri/src/commands/mod.rs +++ b/src-tauri/src/commands/mod.rs @@ -21,6 +21,7 @@ pub mod remote; pub mod repository; pub mod scm; pub mod settings; +pub mod share; pub mod shell; pub mod slash_commands; pub mod terminal; diff --git a/src-tauri/src/commands/remote.rs b/src-tauri/src/commands/remote.rs index f62969031..f71dc590d 100644 --- a/src-tauri/src/commands/remote.rs +++ b/src-tauri/src/commands/remote.rs @@ -1,14 +1,18 @@ use serde::Serialize; -use tauri::{AppHandle, State}; +use tauri::{AppHandle, Manager, State}; use claudette::db::Database; +use claudette::room::{ + ParticipantId, ParticipantInfo, PendingQuestionSnapshot, PendingVoteSnapshot, +}; -use crate::remote::{DiscoveredServer, RemoteConnectionInfo, RemoteConnectionManager}; +use crate::remote::{ + DiscoveredServer, RemoteConnectionInfo, RemoteConnectionManager, participant_id_for, +}; use crate::state::AppState; #[cfg(feature = "server")] use crate::state::LocalServerState; use crate::transport::ws::WebSocketTransport; -#[cfg(feature = "server")] use claudette::process::CommandWindowExt as _; #[cfg(feature = "server")] use tokio::io::{AsyncBufReadExt, BufReader}; @@ -18,6 +22,20 @@ pub struct PairResult { pub connection: RemoteConnectionInfo, pub server_name: String, pub initial_data: Option, + /// Participant id this connection has on the remote server. + /// Frontend stores it and uses it to detect "this message is mine" + /// in collaborative sessions. + pub participant_id: Option, +} + +#[derive(Serialize)] +pub struct CollaborationSessionSnapshot { + pub participants: Vec, + pub turn_holder: Option, + pub turn_started_at_ms: Option, + pub turn_settings: Option, + pub pending_vote: Option, + pub pending_question: Option, } #[tauri::command] @@ -28,19 +46,49 @@ pub async fn list_remote_connections( let connections = db.list_remote_connections().map_err(|e| e.to_string())?; Ok(connections .into_iter() - .map(|c| RemoteConnectionInfo { - id: c.id, - name: c.name, - host: c.host, - port: c.port, - session_token: c.session_token, - cert_fingerprint: c.cert_fingerprint, - auto_connect: c.auto_connect, - created_at: c.created_at, + .map(|c| { + let participant_id = participant_id_for(c.session_token.as_deref()); + RemoteConnectionInfo { + id: c.id, + name: c.name, + host: c.host, + port: c.port, + session_token: c.session_token, + cert_fingerprint: c.cert_fingerprint, + auto_connect: c.auto_connect, + created_at: c.created_at, + participant_id, + } }) .collect()) } +#[tauri::command] +pub async fn collaboration_session_snapshot( + chat_session_id: String, + state: State<'_, AppState>, +) -> Result, String> { + let Some(room) = state.rooms.get(&chat_session_id).await else { + return Ok(None); + }; + + if ensure_host_participant(&state, &room).await { + publish_participants_changed(&room).await; + } + + Ok(Some(CollaborationSessionSnapshot { + participants: room.participant_list().await, + turn_holder: room.current_turn_holder().await.map(|p| p.0), + turn_started_at_ms: *room.turn_started_at_ms.lock().await, + turn_settings: room.turn_settings.read().await.clone(), + pending_vote: state.rooms.pending_vote_snapshot(&chat_session_id).await, + pending_question: state + .rooms + .pending_question_snapshot(&chat_session_id) + .await, + })) +} + #[tauri::command] pub async fn pair_with_server( host: String, @@ -61,34 +109,59 @@ pub async fn pair_with_server( .authenticate_pairing(&pairing_token, &hostname) .await?; - let connection_id = uuid::Uuid::new_v4().to_string(); - - // Persist to DB. + // Persist to DB. If we already have a row for this host:port, + // refresh it in place rather than inserting a duplicate. Re-pairing + // against the same nearby server, or pasting another connection + // string from a host we already know, otherwise produces a second + // sidebar entry while the first is left holding a dead session + // token (the user-visible "stale, unusable connection" symptom). let db = Database::open(&state.db_path).map_err(|e| e.to_string())?; - let db_conn = claudette::model::RemoteConnection { - id: connection_id.clone(), - name: auth.server_name.clone(), - host: host.clone(), - port, - session_token: auth.session_token.clone(), - cert_fingerprint: Some(cert_fingerprint.clone()), - auto_connect: false, - created_at: String::new(), - }; - db.insert_remote_connection(&db_conn) - .map_err(|e| e.to_string())?; - - // Re-fetch to get the DB-generated created_at timestamp. - let saved = db - .get_remote_connection(&connection_id) + let session_token_str = auth.session_token.clone().unwrap_or_default(); + let saved = if let Some(existing) = db + .find_remote_connection_by_host_port(&host, port) .map_err(|e| e.to_string())? - .ok_or("Failed to re-read saved connection")?; + { + db.update_remote_connection_pairing( + &existing.id, + &auth.server_name, + &session_token_str, + &cert_fingerprint, + ) + .map_err(|e| e.to_string())?; + db.get_remote_connection(&existing.id) + .map_err(|e| e.to_string())? + .ok_or("Failed to re-read updated connection")? + } else { + let connection_id = uuid::Uuid::new_v4().to_string(); + let db_conn = claudette::model::RemoteConnection { + id: connection_id.clone(), + name: auth.server_name.clone(), + host: host.clone(), + port, + session_token: auth.session_token.clone(), + cert_fingerprint: Some(cert_fingerprint.clone()), + auto_connect: false, + created_at: String::new(), + }; + db.insert_remote_connection(&db_conn) + .map_err(|e| e.to_string())?; + // Re-fetch to get the DB-generated created_at timestamp. + db.get_remote_connection(&connection_id) + .map_err(|e| e.to_string())? + .ok_or("Failed to re-read saved connection")? + }; let info = RemoteConnectionInfo { id: saved.id, name: saved.name, host: saved.host, port: saved.port, + // Prefer the auth-returned id when present; fall back to deriving + // from the (just-issued) session token. They should always match. + participant_id: auth + .participant_id + .clone() + .or_else(|| participant_id_for(saved.session_token.as_deref())), session_token: saved.session_token, cert_fingerprint: saved.cert_fingerprint, auto_connect: saved.auto_connect, @@ -113,6 +186,7 @@ pub async fn pair_with_server( connection: info, server_name: auth.server_name, initial_data: remote_data, + participant_id: auth.participant_id, }) } @@ -148,6 +222,10 @@ pub async fn connect_remote( name: auth.server_name.clone(), host: conn.host.clone(), port: conn.port, + participant_id: auth + .participant_id + .clone() + .or_else(|| participant_id_for(conn.session_token.as_deref())), session_token: conn.session_token.clone(), cert_fingerprint: conn.cert_fingerprint.clone(), auto_connect: conn.auto_connect, @@ -244,6 +322,20 @@ pub async fn send_remote_command( "params": params, }); let response = manager.send(&connection_id, request).await?; + // Propagate JSON-RPC errors as Rust `Err`s so the frontend's + // sendRemoteCommand promise actually rejects on remote failures. + // Previously we just dropped the error and returned `Null`, which + // caused silent UX bugs — e.g. read_plan_file returning "Unknown + // method" was indistinguishable from "the plan is empty" because + // the frontend saw a clean `null` resolution. + if let Some(err) = response.get("error") { + let message = err + .get("message") + .and_then(|v| v.as_str()) + .unwrap_or("Remote returned an error") + .to_string(); + return Err(message); + } Ok(response .get("result") .cloned() @@ -302,7 +394,13 @@ pub async fn start_local_server(state: State<'_, AppState>) -> Result) -> Result) -> Result) -> Result, +) -> Result<(), String> { + let room = state + .rooms + .get(&chat_session_id) + .await + .ok_or("Session is not collaborative")?; + let pid = claudette::room::ParticipantId(participant_id); + if pid.is_host() { + return Err("Cannot kick the host".into()); + } + room.remove_participant(&pid).await; + room.publish(serde_json::json!({ + "event": "participants-changed", + "payload": { + "chat_session_id": &chat_session_id, + "participants": room.participant_list().await, + }, + })); + room.publish(serde_json::json!({ + "event": "participant-kicked", + "payload": { + "chat_session_id": &chat_session_id, + "participant_id": pid.as_str(), + }, + })); + Ok(()) +} + +/// Host-only: mute (or un-mute) a participant. Muted participants' RPCs +/// for `send_chat_message` and `vote_plan_approval` are rejected at the +/// server boundary, but they still receive room broadcasts so they can +/// observe what's happening. +#[tauri::command] +pub async fn mute_participant( + chat_session_id: String, + participant_id: String, + muted: bool, + state: State<'_, AppState>, +) -> Result<(), String> { + let room = state + .rooms + .get(&chat_session_id) + .await + .ok_or("Session is not collaborative")?; + let pid = claudette::room::ParticipantId(participant_id); + if pid.is_host() { + return Err("Cannot mute the host".into()); + } + if !room.set_muted(&pid, muted).await { + return Err("Participant not found in session".into()); + } + room.publish(serde_json::json!({ + "event": "participants-changed", + "payload": { + "chat_session_id": &chat_session_id, + "participants": room.participant_list().await, + }, + })); + Ok(()) +} + +// Removed: `start_collaborative_share`, `stop_collaborative_share`, and +// `build_collab_connection_string`. These were the per-chat-session +// collab-share entry points from the previous design. The new model +// (workspace-scoped `Share`s) supersedes them — see +// `crate::commands::share`. The host-side subscribers below are now +// attached via `RoomRegistry::set_on_create`, installed once at app +// startup in `main.rs`, so a fresh room gets a host subscriber before +// any handler can publish into it. + +/// Capture broadcast receivers and spawn both host-side mirror tasks for +/// `room`. **Must** be called synchronously inside the `RoomRegistry` +/// `on_create` hook so the receivers attach before the first publish — +/// `tokio::sync::broadcast` does not buffer for late subscribers. +#[cfg(feature = "server")] +pub fn attach_host_room_subscribers(app: AppHandle, room: std::sync::Arc) { + // `subscribe()` is sync and returns a `Receiver` immediately; doing + // this *before* the `tokio::spawn` calls inside the helpers is the + // load-bearing detail. The helpers re-create their receivers + // internally today, so we just call them in order — they capture + // their receivers synchronously as their first statement. + let chat_session_id = room.chat_session_id.clone(); + spawn_host_event_subscriber(app.clone(), room.clone()); + spawn_host_vote_resolver(app, room, chat_session_id); +} + +#[cfg(feature = "server")] +fn spawn_host_event_subscriber(app: AppHandle, room: std::sync::Arc) { + use tauri::Emitter; + let mut rx = room.subscribe(); + tokio::spawn(async move { + loop { + match rx.recv().await { + Ok(evt) => { + let Some(name) = evt.0.get("event").and_then(|v| v.as_str()) else { + continue; + }; + let payload = evt + .0 + .get("payload") + .cloned() + .unwrap_or(serde_json::Value::Null); + if name == "participants-changed" { + let state = app.state::(); + if ensure_host_participant(&state, &room).await { + publish_participants_changed(&room).await; + continue; + } + } + let _ = app.emit(name, payload); + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => { + let _ = app.emit("resync-required", serde_json::Value::Null); + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + } + } + }); +} + +pub(crate) async fn ensure_host_participant( + state: &AppState, + room: &std::sync::Arc, +) -> bool { + let host_id = ParticipantId::host(); + let display_name = state.resolve_host_display_name(); + let mut participants = room.participants.write().await; + match participants.get_mut(&host_id) { + Some(existing) => { + if existing.display_name == display_name && existing.is_host && !existing.muted { + false + } else { + existing.display_name = display_name; + existing.is_host = true; + existing.muted = false; + true + } + } + None => { + participants.insert( + host_id.clone(), + ParticipantInfo { + id: host_id, + display_name, + is_host: true, + joined_at: now_unix_ms(), + muted: false, + }, + ); + true + } + } +} + +pub(crate) async fn publish_participants_changed(room: &std::sync::Arc) { + room.publish(serde_json::json!({ + "event": "participants-changed", + "payload": { + "chat_session_id": room.chat_session_id, + "participants": room.participant_list().await, + }, + })); +} + +fn now_unix_ms() -> i64 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_millis() as i64) + .unwrap_or(0) +} + +#[cfg(feature = "server")] +fn spawn_host_vote_resolver( + app: AppHandle, + room: std::sync::Arc, + chat_session_id: String, +) { + let mut rx = room.subscribe(); + tokio::spawn(async move { + loop { + let evt = match rx.recv().await { + Ok(e) => e, + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue, + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + }; + let Some(name) = evt.0.get("event").and_then(|v| v.as_str()) else { + continue; + }; + let payload = match evt.0.get("payload") { + Some(p) => p, + None => continue, + }; + match name { + // Apply non-host plan votes locally so resolution finalizes + // on the host side. Host votes flow through + // `submit_plan_approval` directly and run resolution inline. + "plan-vote-cast" => { + let participant_id = payload + .get("participant_id") + .and_then(|v| v.as_str()) + .unwrap_or(""); + if participant_id == claudette::room::ParticipantId::HOST { + continue; + } + let tool_use_id = payload + .get("tool_use_id") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let session_id = payload + .get("chat_session_id") + .and_then(|v| v.as_str()) + .unwrap_or(&chat_session_id) + .to_string(); + let vote: claudette::room::Vote = match payload + .get("vote") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + { + Some(v) => v, + None => continue, + }; + let participant = claudette::room::ParticipantId(participant_id.to_string()); + let app_state = tauri::Manager::state::(&app); + if let Err(e) = crate::commands::chat::record_plan_vote( + &app_state, + &session_id, + &tool_use_id, + &participant, + false, // is_host + vote, + false, // already broadcast by remote side + ) + .await + { + eprintln!("[collab] resolver: record_plan_vote failed: {e}"); + } + } + "agent-answer-submitted" => { + let participant_id = payload + .get("participant_id") + .and_then(|v| v.as_str()) + .unwrap_or(""); + if participant_id == claudette::room::ParticipantId::HOST { + continue; + } + let tool_use_id = payload + .get("tool_use_id") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let session_id = payload + .get("chat_session_id") + .and_then(|v| v.as_str()) + .unwrap_or(&chat_session_id) + .to_string(); + let answers: std::collections::HashMap = payload + .get("answers") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .unwrap_or_default(); + let annotations = payload.get("annotations").cloned(); + let app_state = tauri::Manager::state::(&app); + let participant = claudette::room::ParticipantId(participant_id.to_string()); + if let Err(e) = crate::commands::chat::record_agent_answer( + &app_state, + &session_id, + &tool_use_id, + &participant, + answers, + annotations, + false, + ) + .await + { + eprintln!("[collab] resolver: resolve_agent_answer failed: {e}"); + } + } + // A participant joined or left — re-evaluate every open + // consensus vote in the session so a departure doesn't + // deadlock the agent. Implicit-abstain semantics: see + // `prune_consensus_voters_for_session` for the rule. + "participants-changed" => { + let session_id = payload + .get("chat_session_id") + .and_then(|v| v.as_str()) + .unwrap_or(&chat_session_id) + .to_string(); + let participants_array = match payload.get("participants") { + Some(p) => p, + None => continue, + }; + let parsed: Vec = + match serde_json::from_value(participants_array.clone()) { + Ok(v) => v, + Err(_) => continue, + }; + let current: std::collections::HashSet = parsed + .into_iter() + .filter(|p| p.is_host || !p.muted) + .map(|p| p.id) + .collect(); + let app_state = tauri::Manager::state::(&app); + if let Err(e) = crate::commands::chat::prune_consensus_voters_for_session( + &app_state, + &session_id, + ¤t, + ) + .await + { + eprintln!("[collab] resolver: prune_consensus_voters failed: {e}"); + } + } + _ => continue, + } + } + }); +} diff --git a/src-tauri/src/commands/share.rs b/src-tauri/src/commands/share.rs new file mode 100644 index 000000000..ec791da6d --- /dev/null +++ b/src-tauri/src/commands/share.rs @@ -0,0 +1,296 @@ +//! Share-management Tauri commands. +//! +//! A *share* is a workspace-scoped authorization grant the host hands out +//! to remote users. Each share holds its own pairing token and a list of +//! workspace ids; remote clients pairing with that token are issued +//! session tokens whose RPCs are gated on the share's scope. +//! +//! Share mutations lock the shared `Arc>` stored in +//! `AppState::share_server_config`, then persist the same config to +//! `server.toml`. Stopping a share removes it from that live config, which +//! invalidates its session tokens and causes both RPC handlers and long-lived +//! event forwarders to stop serving already-connected clients. + +use serde::Serialize; +use tauri::{AppHandle, State}; + +#[cfg(feature = "server")] +use std::sync::Arc; +#[cfg(feature = "server")] +use tauri::Manager; + +use crate::state::AppState; + +#[derive(Serialize, Debug, Clone)] +pub struct ShareSummary { + pub id: String, + pub label: Option, + pub allowed_workspace_ids: Vec, + pub collaborative: bool, + pub consensus_required: bool, + pub created_at: String, + /// Number of session tokens currently issued for this share — i.e. how + /// many remote clients have paired. Useful for the UI's "1 connected / + /// 0 connected" hint. + pub session_count: usize, + /// `claudette://host:port/` — the string the user gives + /// to people they want to grant this share's scope to. + pub connection_string: String, +} + +#[derive(Serialize, Debug)] +pub struct StartShareResult { + pub share: ShareSummary, + /// `true` if this call also booted the in-process server (first share + /// of the app's lifetime). Subsequent shares return `false`. + pub server_started: bool, +} + +/// Mint a new workspace-scoped share. Boots the in-process server on +/// first call. Returns the new share including its connection string. +#[tauri::command] +pub async fn start_share( + label: Option, + workspace_ids: Vec, + collaborative: bool, + consensus_required: bool, + app: AppHandle, + state: State<'_, AppState>, +) -> Result { + #[cfg(not(feature = "server"))] + { + let _ = ( + label, + workspace_ids, + collaborative, + consensus_required, + app, + state, + ); + return Err("Server feature disabled".into()); + } + + #[cfg(feature = "server")] + { + if workspace_ids.is_empty() { + return Err("A share must include at least one workspace".into()); + } + // Refuse to coexist with the legacy subprocess server — they bind + // the same port. Restarting the app clears that legacy server state; + // the current share modal manages only workspace-scoped shares. + if state.local_server.read().await.is_some() { + return Err( + "Stop the legacy local server first by restarting Claudette, then create this workspace share.".into(), + ); + } + + let server_started = ensure_share_server(&app, &state).await?; + + // Mutate the shared config: append the new share, persist to disk. + let summary = { + let cfg_arc = state + .share_server_config + .read() + .await + .clone() + .ok_or("Share server not running")?; + let mut cfg = cfg_arc.lock().await; + let entry = cfg + .create_share(label, workspace_ids, collaborative, consensus_required) + .clone(); + // Persist so a restart preserves active shares. Failures here + // are non-fatal: the in-memory share is still valid for this + // session. + let _ = cfg.save(&claudette_server::default_config_path()); + share_summary_from_entry(&entry, cfg.server.port) + }; + + Ok(StartShareResult { + share: summary, + server_started, + }) + } +} + +/// Revoke a share by id. Removes it from the live config (which makes the +/// per-RPC `share_id` lookup in `handler.rs` fail for every session token +/// issued from this share — that's our immediate-revocation guarantee). +#[tauri::command] +pub async fn stop_share(share_id: String, state: State<'_, AppState>) -> Result<(), String> { + #[cfg(not(feature = "server"))] + { + let _ = (share_id, state); + return Err("Server feature disabled".into()); + } + + #[cfg(feature = "server")] + { + let cfg_arc = state + .share_server_config + .read() + .await + .clone() + .ok_or("No active shares")?; + let mut cfg = cfg_arc.lock().await; + if !cfg.revoke_share(&share_id) { + return Err("Unknown share id".into()); + } + let _ = cfg.save(&claudette_server::default_config_path()); + Ok(()) + } +} + +/// Snapshot the active shares for the UI. Mostly read-only metadata — +/// the connection strings are recomputed on each call from the live +/// host name + port + pairing token. +#[tauri::command] +pub async fn list_shares(state: State<'_, AppState>) -> Result, String> { + #[cfg(not(feature = "server"))] + { + let _ = state; + return Ok(Vec::new()); + } + + #[cfg(feature = "server")] + { + if let Some(cfg_arc) = state.share_server_config.read().await.clone() { + let cfg = cfg_arc.lock().await; + return Ok(share_summaries_from_config(&cfg)); + } + + // Startup share hydration is kicked off asynchronously during Tauri + // setup. If the frontend asks for the count before that task has + // populated `share_server_config`, read the persisted config directly + // so the sidebar share indicator is still reliable on first paint. + let cfg_path = claudette_server::default_config_path(); + if !cfg_path.exists() { + return Ok(Vec::new()); + } + let cfg = claudette_server::auth::ServerConfig::load_or_create(&cfg_path) + .map_err(|e| format!("Failed to load server config: {e}"))?; + Ok(share_summaries_from_config(&cfg)) + } +} + +#[cfg(feature = "server")] +fn share_summary_from_entry(entry: &claudette_server::auth::ShareEntry, port: u16) -> ShareSummary { + let host = gethostname::gethostname().to_string_lossy().to_string(); + ShareSummary { + id: entry.id.clone(), + label: entry.label.clone(), + allowed_workspace_ids: entry.allowed_workspace_ids.clone(), + collaborative: entry.collaborative, + consensus_required: entry.consensus_required, + created_at: entry.created_at.clone(), + session_count: entry.sessions.len(), + connection_string: format!("claudette://{}:{}/{}", host, port, entry.pairing_token), + } +} + +#[cfg(feature = "server")] +fn share_summaries_from_config(config: &claudette_server::auth::ServerConfig) -> Vec { + config + .list_shares() + .iter() + .map(|e| share_summary_from_entry(e, config.server.port)) + .collect() +} + +#[cfg(feature = "server")] +async fn ensure_share_server(app: &AppHandle, state: &AppState) -> Result { + // Check & set the running flag inside one critical section so two + // concurrent `start_share` calls don't both spawn a server. + let mut running = state.collab_server_running.write().await; + if *running { + return Ok(false); + } + + // Build (or load from disk) the shared config arc and stash it on + // AppState so the new commands can mutate it. + let cfg_path = claudette_server::default_config_path(); + let cfg = claudette_server::auth::ServerConfig::load_or_create(&cfg_path) + .map_err(|e| format!("Failed to load server config: {e}"))?; + let cfg_arc = Arc::new(tokio::sync::Mutex::new(cfg)); + *state.share_server_config.write().await = Some(Arc::clone(&cfg_arc)); + + // Spawn the in-process server with the shared room registry AND the + // shared config. Any future `start_share` mutates the config Arc; the + // server's per-request revocation check sees the updated `shares` + // list immediately because it's the same `Arc`. + // + // The workspace-event bus is also shared so `archive_workspace` (and + // any future workspace lifecycle command) on the host side can push + // events that reach connected remotes immediately, regardless of + // whether they've joined any chat room yet. + let rooms = std::sync::Arc::clone(&state.rooms); + let workspace_events = std::sync::Arc::clone(&state.workspace_events); + let cfg_for_server = Arc::clone(&cfg_arc); + let app_for_server = app.clone(); + let opts = claudette_server::ServerOptions { + existing_config: Some(cfg_for_server), + ..Default::default() + }; + tokio::spawn(async move { + if let Err(e) = + claudette_server::run_with_rooms_and_events(opts, rooms, Some(workspace_events)).await + { + eprintln!("[share] in-process server exited: {e}"); + } + let state = app_for_server.state::(); + *state.collab_server_running.write().await = false; + }); + + // The host event subscriber attaches via `RoomRegistry::set_on_create`, + // installed once at app startup in `main.rs::setup`. Each new room + // gets a host-side mirror task synchronously at creation time, before + // any handler can publish into it (see the on_create hook docstring + // in `src/room.rs` for why this ordering is load-bearing). + *running = true; + Ok(true) +} + +/// Hydrate persisted shares from disk on app startup. +/// +/// Without this, shares written to `~/.claudette/server.toml` from a prior +/// app run are durable on disk but invisible to `list_shares` (which reads +/// the in-memory `share_server_config`, only ever populated by +/// `ensure_share_server`). The user-facing symptom: opening the share +/// modal after relaunch shows "No active shares", but the saved pairing +/// strings still work, and the moment the user mints any new share the +/// old ones reappear "magically" because that flow finally loads disk. +/// +/// We peek at the saved config first and only spawn the in-process +/// server if at least one share exists — avoiding an idle listener on +/// the share port for users who have never minted a share. If the user +/// later mints their first share, `ensure_share_server` handles the +/// boot path as before. +#[cfg(feature = "server")] +pub async fn hydrate_persisted_shares(app: &AppHandle, state: &AppState) -> Result<(), String> { + let cfg_path = claudette_server::default_config_path(); + if !cfg_path.exists() { + return Ok(()); + } + let cfg = match claudette_server::auth::ServerConfig::load_or_create(&cfg_path) { + Ok(c) => c, + Err(e) => { + // Non-fatal: corrupt / unreadable config shouldn't block app + // launch. Surface to logs and let the user re-mint shares. + eprintln!("[share] Failed to load persisted shares: {e}"); + return Ok(()); + } + }; + if cfg.shares.is_empty() { + return Ok(()); + } + eprintln!( + "[share] Hydrating {} persisted share(s); booting in-process server", + cfg.shares.len() + ); + ensure_share_server(app, state).await?; + Ok(()) +} + +#[cfg(not(feature = "server"))] +pub async fn hydrate_persisted_shares(_app: &AppHandle, _state: &AppState) -> Result<(), String> { + Ok(()) +} diff --git a/src-tauri/src/commands/workspace.rs b/src-tauri/src/commands/workspace.rs index 8f2e86894..9e53a98a8 100644 --- a/src-tauri/src/commands/workspace.rs +++ b/src-tauri/src/commands/workspace.rs @@ -141,10 +141,46 @@ pub async fn fork_workspace_at_checkpoint( .await .map_err(|e| e.to_string())?; + let forked_workspace = outcome.workspace.clone(); + + #[cfg(feature = "server")] + { + if let Some(cfg_arc) = state.share_server_config.read().await.clone() { + let mut cfg = cfg_arc.lock().await; + let mut changed = false; + for share in &mut cfg.shares { + let includes_source = share + .allowed_workspace_ids + .iter() + .any(|id| id == &workspace_id); + let includes_fork = share + .allowed_workspace_ids + .iter() + .any(|id| id == &forked_workspace.id); + if includes_source && !includes_fork { + share + .allowed_workspace_ids + .push(forked_workspace.id.clone()); + changed = true; + } + } + if changed { + let _ = cfg.save(&claudette_server::default_config_path()); + } + } + } + + state + .workspace_events + .publish(claudette::workspace_events::WorkspaceEvent::Forked { + source_workspace_id: workspace_id.clone(), + workspace: forked_workspace.clone(), + }); + crate::tray::rebuild_tray(&app); Ok(ForkWorkspaceResult { - workspace: outcome.workspace, + workspace: forked_workspace, session_resumed: outcome.session_resumed, }) } @@ -349,6 +385,18 @@ pub(crate) async fn archive_workspace_inner( let _ = app.emit("mcp-status-cleared", &out.repository_id); } + // Notify connected remotes so they remove the workspace from their + // sidebar live, instead of waiting for the next reconnect to filter + // it out. The forwarder in `claudette-server::ws` filters by the + // connection's allowed-workspaces scope before delivering. + state + .workspace_events + .publish(claudette::workspace_events::WorkspaceEvent::Archived { + workspace_id: id.to_string(), + }); + + crate::tray::rebuild_tray(app); + Ok(ArchiveWorkspaceOutput { delete_branch, branch_deleted: out.branch_deleted, diff --git a/src-tauri/src/main.rs b/src-tauri/src/main.rs index 6d9637174..8d18b25a2 100644 --- a/src-tauri/src/main.rs +++ b/src-tauri/src/main.rs @@ -495,6 +495,124 @@ fn main() { } }); + // Wire the host-side `Room` subscriber to fire on every new + // room creation. This must run before any handler that calls + // `RoomRegistry::get_or_create` — i.e. before the embedded + // collab server starts taking connections — otherwise the + // host UI misses the very first `participants-changed` event + // emitted by `handle_join_session`. + #[cfg(feature = "server")] + { + let rooms = app.state::().rooms.clone(); + let app_for_hook = app.handle().clone(); + rooms.set_on_create(move |room| { + commands::remote::attach_host_room_subscribers(app_for_hook.clone(), room); + }); + + let rooms = app.state::().rooms.clone(); + let app_for_snapshot = app.handle().clone(); + rooms.set_pending_vote_snapshot_provider(move |chat_session_id| { + let app = app_for_snapshot.clone(); + async move { + let app_state = app.state::(); + let plan_file_path = claudette::db::Database::open(&app_state.db_path) + .ok() + .and_then(|db| { + db.latest_plan_file_path_for_session(&chat_session_id) + .ok() + .flatten() + }); + let (tool_use_id, pending) = { + let agents = app_state.agents.read().await; + let session = agents.get(&chat_session_id)?; + session + .pending_permissions + .iter() + .find(|(_, pending)| pending.tool_name == "ExitPlanMode") + .map(|(tool_use_id, pending)| { + (tool_use_id.clone(), pending.clone()) + }) + }?; + let room = app_state.rooms.get(&chat_session_id).await?; + let participants = room.participant_list().await; + let required_voters = pending + .required_voters + .iter() + .filter_map(|id| participants.iter().find(|p| &p.id == id).cloned()) + .collect(); + let votes = pending + .votes + .into_iter() + .map(|(id, vote)| (id.0, vote)) + .collect(); + Some(claudette::room::PendingVoteSnapshot { + tool_use_id, + required_voters, + votes, + input: pending.original_input, + plan_file_path, + }) + } + }); + + let rooms = app.state::().rooms.clone(); + let app_for_question_snapshot = app.handle().clone(); + rooms.set_pending_question_snapshot_provider(move |chat_session_id| { + let app = app_for_question_snapshot.clone(); + async move { + let app_state = app.state::(); + let (tool_use_id, pending) = { + let agents = app_state.agents.read().await; + let session = agents.get(&chat_session_id)?; + session + .pending_permissions + .iter() + .find(|(_, pending)| pending.tool_name == "AskUserQuestion") + .map(|(tool_use_id, pending)| { + (tool_use_id.clone(), pending.clone()) + }) + }?; + let room = app_state.rooms.get(&chat_session_id).await?; + let participants = room.participant_list().await; + let required_voters = pending + .required_voters + .iter() + .filter_map(|id| participants.iter().find(|p| &p.id == id).cloned()) + .collect(); + let votes = pending + .question_votes + .into_iter() + .map(|(id, vote)| (id.0, vote)) + .collect(); + Some(claudette::room::PendingQuestionSnapshot { + tool_use_id, + required_voters, + votes, + input: pending.original_input, + }) + } + }); + } + + // Hydrate any persisted shares on disk so the share modal shows + // them on first open (and their saved pairing strings actually + // work — the in-process server only listens on the share port + // when at least one share is hydrated). Done after the on_create + // hook above so the host subscriber is wired before the server + // accepts its first `join_session`. + #[cfg(feature = "server")] + { + let app_handle = app.handle().clone(); + tauri::async_runtime::spawn(async move { + let state = app_handle.state::(); + if let Err(e) = + commands::share::hydrate_persisted_shares(&app_handle, &state).await + { + eprintln!("[share] hydrate_persisted_shares failed: {e}"); + } + }); + } + Ok(()) }) .on_window_event(|window, event| { @@ -750,6 +868,14 @@ fn main() { commands::remote::start_local_server, commands::remote::stop_local_server, commands::remote::get_local_server_status, + // Workspace-scoped share management — replaces the + // unscoped `start_collaborative_share` flow. + commands::share::start_share, + commands::share::stop_share, + commands::share::list_shares, + commands::remote::collaboration_session_snapshot, + commands::remote::kick_participant, + commands::remote::mute_participant, // Debug (dev builds only — cfg-gated in commands/debug.rs) #[cfg(debug_assertions)] commands::debug::debug_eval_js, diff --git a/src-tauri/src/remote.rs b/src-tauri/src/remote.rs index 0f9a5f677..a401b5c2f 100644 --- a/src-tauri/src/remote.rs +++ b/src-tauri/src/remote.rs @@ -18,6 +18,25 @@ pub struct RemoteConnectionInfo { pub cert_fingerprint: Option, pub auto_connect: bool, pub created_at: String, + /// Stable id for the local user as seen by the remote server. Derived + /// (not persisted) — recomputed from `session_token` at every construction + /// site so we don't need a DB migration. The frontend uses this to detect + /// "this message is mine" in collaborative sessions. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub participant_id: Option, +} + +/// Derive a frontend-visible participant id from a stored session token. +/// Calls into `claudette-server` so the algorithm matches the server side +/// (which is what the room/collab protocol uses to key participants). +#[cfg(feature = "server")] +pub fn participant_id_for(session_token: Option<&str>) -> Option { + session_token.map(claudette_server::auth::participant_id_for_token) +} + +#[cfg(not(feature = "server"))] +pub fn participant_id_for(_session_token: Option<&str>) -> Option { + None } /// An active connection to a remote claudette-server. @@ -63,8 +82,16 @@ impl RemoteConnectionManager { // Workspace IDs are UUIDs so there's no collision between local and remote. let connection_id = info.id.clone(); let event_task = tokio::spawn(async move { - while let Ok(event) = event_rx.recv().await { - let _ = app.emit(&event.event, &event.payload); + loop { + match event_rx.recv().await { + Ok(event) => { + let _ = app.emit(&event.event, &event.payload); + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => { + eprintln!("[remote] event channel lagged {n} messages, continuing"); + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + } } eprintln!("[remote] Event stream ended for connection {connection_id}"); }); diff --git a/src-tauri/src/state.rs b/src-tauri/src/state.rs index 68276fbcd..bf4fef17a 100644 --- a/src-tauri/src/state.rs +++ b/src-tauri/src/state.rs @@ -9,7 +9,9 @@ use tokio::sync::{RwLock, Semaphore}; use claudette::env_provider::{EnvCache, EnvWatcher}; use claudette::plugin_runtime::PluginRegistry; +use claudette::room::RoomRegistry; use claudette::scm::types::{CiCheck, PullRequest}; +use claudette::workspace_events::WorkspaceEventBus; use crate::commands::apps::DetectedApp; use crate::remote::DiscoveredServer; @@ -33,6 +35,13 @@ pub enum AttentionKind { /// resolve via `control_response`. Keyed in `AgentSessionState::pending_permissions` /// by tool_use_id so UI callbacks (AgentQuestionCard, PlanApprovalCard) can /// resolve them using the tool_use_id they already track. +/// +/// In collaborative mode with consensus required, `required_voters` is +/// populated with a snapshot of room participants at the moment an +/// interactive request arrives. The `control_response` is held until the +/// plan vote or question response round resolves. For non-collaborative +/// sessions and non-consensus prompts, `required_voters` stays empty and +/// resolution is single-shot as before. #[derive(Debug, Clone)] pub struct PendingPermission { pub request_id: String, @@ -40,6 +49,19 @@ pub struct PendingPermission { /// Original tool input sent by the model — used verbatim as the base for /// `updatedInput` when approving (we layer user-collected answers on top). pub original_input: serde_json::Value, + /// Snapshot of participants whose vote is required to resolve. Empty + /// for non-consensus prompts; non-empty marks a consensus vote that the + /// `submit_plan_approval` resolver must evaluate before sending the + /// CLI response. + pub required_voters: std::collections::HashSet, + /// Votes received so far, keyed by participant id. Populated as votes + /// arrive via `submit_plan_approval` (host) or the `vote_plan_approval` + /// RPC (remote). + pub votes: HashMap, + /// AskUserQuestion responses received so far, keyed by participant id. + /// In consensus mode the CLI control response is held until every + /// required voter has answered. + pub question_votes: HashMap, } /// Per-session agent state managed on the Rust side. One of these per @@ -123,8 +145,9 @@ pub struct AgentSessionState { /// turn. Updated each time a new user message is inserted; cleared on /// session teardown. See `agent_mcp_sink::ChatBridgeSink`. pub last_user_msg_id: Option, - /// Set after we've posted a trust-error system message into the - /// chat for the current resolved env. Cleared when an existing + /// Set on the first env-trust failure observed within a persistent + /// session, so the helpful "trust this env" system message is posted + /// at most once per session. Reset on every spawn — including when the /// persistent session is torn down because the resolved env drifted /// (e.g. after `direnv allow` / `mise trust`, config edits, or /// provider toggles), so a fresh failure re-emits once after that @@ -381,6 +404,37 @@ pub struct AppState { /// the waiter to kill the process and emit a cancelled completion event. /// `Some` while a flow is running, `None` otherwise. pub auth_login_cancel: tokio::sync::Mutex>>, + /// Collaborative-session room registry. Shared `Arc` with the embedded + /// `claudette-server` (when the server feature is enabled) so a publish + /// from either side reaches subscribers on the other. Non-collaborative + /// sessions never enter the registry, and call sites fall back to direct + /// `app.emit` when `RoomRegistry::get(...)` returns `None`. + pub rooms: Arc, + /// Cross-process bus for workspace lifecycle events (archive today, + /// rename/delete in future). Shared `Arc` with the embedded + /// `claudette-server` (when the server feature is enabled and a share + /// is running) so a publish from the host's Tauri commands reaches + /// every connected remote subscriber. Always present; the bus is + /// constructed unconditionally in `AppState::new` so publishers don't + /// need to handle the absent case. + pub workspace_events: Arc, + /// `true` while the in-process collaborative server is running for + /// this app instance. Distinct from `local_server` (subprocess) — the + /// in-process server shares `rooms` with `AppState` and is used only for + /// collab share. Reset when the server task exits so a failed bind or + /// early shutdown can be retried without restarting the app. Used to + /// refuse to start the subprocess server when an in-process server + /// already owns the port. + pub collab_server_running: tokio::sync::RwLock, + /// Shared `ServerConfig` for the in-process share server. Lazily + /// constructed on first `start_share` call and kept as a clone of the + /// same `Arc` `ServerState` holds, so share mutations on either side + /// (or via `list_shares` / `revoke_share`) are visible everywhere. + /// Persisted to `claudette-server`'s config file on each mutation. + /// Feature-gated: only meaningful when the `server` feature is enabled. + #[cfg(feature = "server")] + pub share_server_config: + tokio::sync::RwLock>>>, } impl AppState { @@ -410,12 +464,36 @@ impl AppState { pending_update: tokio::sync::Mutex::new(None), cesp_playback: Mutex::new(claudette::cesp::SoundPlaybackState::new()), auth_login_cancel: tokio::sync::Mutex::new(None), + rooms: RoomRegistry::new(), + workspace_events: Arc::new(WorkspaceEventBus::new()), + collab_server_running: tokio::sync::RwLock::new(false), + #[cfg(feature = "server")] + share_server_config: tokio::sync::RwLock::new(None), } } pub fn next_pty_id(&self) -> u64 { self.next_pty_id.fetch_add(1, Ordering::Relaxed) } + + /// Resolve the host's display name for collaborative sessions. + /// + /// Reads `app_settings:collab:display_name` if set and non-empty; + /// otherwise falls back to the OS hostname so users who never visit + /// the Collaboration settings page still get a sensible name. The + /// fallback matches what the legacy 1:1 pairing flow already used as + /// the client name, keeping behavior consistent across modes. + /// + /// This is a sync DB read (rusqlite::Connection isn't Send so we open + /// a fresh connection here, matching the convention from CLAUDE.md). + pub fn resolve_host_display_name(&self) -> String { + let name = claudette::db::Database::open(&self.db_path) + .ok() + .and_then(|db| db.get_app_setting("collab:display_name").ok().flatten()) + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()); + name.unwrap_or_else(|| gethostname::gethostname().to_string_lossy().to_string()) + } } #[cfg(test)] diff --git a/src-tauri/src/transport/ws.rs b/src-tauri/src/transport/ws.rs index 01b5ae9f4..4dbe7e4dc 100644 --- a/src-tauri/src/transport/ws.rs +++ b/src-tauri/src/transport/ws.rs @@ -185,6 +185,10 @@ impl WebSocketTransport { .and_then(|n| n.as_str()) .unwrap_or("Unknown") .to_string(), + participant_id: result + .get("participant_id") + .and_then(|p| p.as_str()) + .map(String::from), }) } @@ -216,6 +220,10 @@ impl WebSocketTransport { .and_then(|n| n.as_str()) .unwrap_or("Unknown") .to_string(), + participant_id: result + .get("participant_id") + .and_then(|p| p.as_str()) + .map(String::from), }) } } @@ -223,6 +231,10 @@ impl WebSocketTransport { pub struct AuthResult { pub session_token: Option, pub server_name: String, + /// The server-derived participant id for this connection. Used by the + /// frontend to detect "this message is mine" in collaborative sessions. + /// `None` for older servers that don't return it. + pub participant_id: Option, } #[async_trait::async_trait] diff --git a/src/chat.rs b/src/chat.rs index dde57c541..a2e69e17a 100644 --- a/src/chat.rs +++ b/src/chat.rs @@ -18,14 +18,34 @@ use std::path::Path; +use serde::Serialize; use serde_json::Value; -use crate::agent::{AssistantMessage, CompactMetadata, ContentBlock, TokenUsage}; +use crate::agent::{AgentEvent, AssistantMessage, CompactMetadata, ContentBlock, TokenUsage}; use crate::db::Database; use crate::model::{ChatMessage, ChatRole, ConversationCheckpoint}; use crate::permissions::is_bypass_tools; use crate::snapshot; +// --------------------------------------------------------------------------- +// Agent-stream event payload +// --------------------------------------------------------------------------- + +/// The wire shape of an `agent-stream` event, fan-out from either bridge to +/// every connected participant (host webview via Tauri events; remote clients +/// via the WebSocket forwarder). Both transports must serialize *this* +/// struct so the JSON shape stays in lockstep with the frontend's +/// `AgentStreamPayload` TypeScript interface — drifting field names here +/// silently drops events on receivers (see commit `1e1db36` for the +/// `session_id`-vs-`chat_session_id` regression that motivated extracting +/// this type into the shared crate). +#[derive(Debug, Clone, Serialize)] +pub struct AgentStreamPayload { + pub workspace_id: String, + pub chat_session_id: String, + pub event: AgentEvent, +} + // --------------------------------------------------------------------------- // Session-flag drift detection // --------------------------------------------------------------------------- @@ -216,6 +236,8 @@ pub fn build_assistant_chat_message(args: BuildAssistantArgs<'_>) -> ChatMessage cache_creation_tokens: usage .as_ref() .and_then(|u| u.cache_creation_input_tokens.map(|n| n as i64)), + author_participant_id: None, + author_display_name: None, } } @@ -249,6 +271,8 @@ pub fn build_compaction_sentinel( output_tokens: None, cache_read_tokens: Some(meta.post_tokens as i64), cache_creation_tokens: None, + author_participant_id: None, + author_display_name: None, } } diff --git a/src/db/chat.rs b/src/db/chat.rs index 81947f562..c573023e3 100644 --- a/src/db/chat.rs +++ b/src/db/chat.rs @@ -11,6 +11,20 @@ use crate::model::{AgentStatus, Attachment, AttachmentOrigin, ChatMessage, ChatS use super::Database; +fn extract_plan_file_path(content: &str) -> Option { + let marker = "/.claude/plans/"; + let marker_idx = content.find(marker)?; + let prefix = &content[..marker_idx]; + let start = prefix.rfind('/').unwrap_or(0); + let rest = &content[start..]; + let end = rest.find(".md")? + ".md".len(); + let candidate = &rest[..end]; + if candidate.contains('\n') || candidate.contains('\r') { + return None; + } + Some(candidate.to_string()) +} + fn row_to_attachment(row: &rusqlite::Row) -> rusqlite::Result { let data: Vec = row.get(4)?; let origin_str: String = row.get(9)?; @@ -39,8 +53,9 @@ impl Database { self.conn.execute( "INSERT INTO chat_messages ( id, workspace_id, chat_session_id, role, content, cost_usd, duration_ms, thinking, - input_tokens, output_tokens, cache_read_tokens, cache_creation_tokens - ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)", + input_tokens, output_tokens, cache_read_tokens, cache_creation_tokens, + author_participant_id, author_display_name + ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)", params![ msg.id, msg.workspace_id, @@ -54,6 +69,8 @@ impl Database { msg.output_tokens, msg.cache_read_tokens, msg.cache_creation_tokens, + msg.author_participant_id, + msg.author_display_name, ], )?; Ok(()) @@ -79,12 +96,14 @@ impl Database { output_tokens: row.get(10)?, cache_read_tokens: row.get(11)?, cache_creation_tokens: row.get(12)?, + author_participant_id: row.get(13)?, + author_display_name: row.get(14)?, }) } pub(super) const CHAT_MESSAGE_COLS: &str = "id, workspace_id, chat_session_id, role, content, cost_usd, \ duration_ms, created_at, thinking, input_tokens, output_tokens, cache_read_tokens, \ - cache_creation_tokens"; + cache_creation_tokens, author_participant_id, author_display_name"; /// Predicate that filters out legacy empty assistant rows (assistant role, /// empty content, no thinking text). The frontend used to drop these in @@ -138,6 +157,26 @@ impl Database { .optional() } + pub fn latest_plan_file_path_for_session( + &self, + chat_session_id: &str, + ) -> Result, rusqlite::Error> { + let mut stmt = self.conn.prepare( + "SELECT content FROM chat_messages + WHERE chat_session_id = ?1 + AND content LIKE '%.claude/plans/%.md%' + ORDER BY created_at DESC, rowid DESC + LIMIT 20", + )?; + let rows = stmt.query_map(params![chat_session_id], |row| row.get::<_, String>(0))?; + for row in rows { + if let Some(path) = extract_plan_file_path(&row?) { + return Ok(Some(path)); + } + } + Ok(None) + } + /// Count all non-legacy messages for a session (legacy = empty assistant /// rows; see `NON_LEGACY_MESSAGE_PREDICATE`). Used to compute pagination /// metadata (`total_count`) so callers can derive the global index offset @@ -1541,4 +1580,34 @@ mod tests { ), } } + + #[test] + fn latest_plan_file_path_for_session_finds_recent_plan_without_full_history() { + let db = setup_db_with_workspace(); + db.insert_chat_message(&make_chat_msg( + &db, + "m1", + "w1", + ChatRole::Assistant, + "View plan - /repo/.claude/plans/old.md", + )) + .unwrap(); + db.insert_chat_message(&make_chat_msg( + &db, + "m2", + "w1", + ChatRole::Assistant, + "View plan - /repo/.claude/plans/new.md", + )) + .unwrap(); + + let session_id = db + .default_session_id_for_workspace("w1") + .unwrap() + .expect("default session"); + assert_eq!( + db.latest_plan_file_path_for_session(&session_id).unwrap(), + Some("/repo/.claude/plans/new.md".to_string()), + ); + } } diff --git a/src/db/remote.rs b/src/db/remote.rs index 17a1a4b5d..d3412bec2 100644 --- a/src/db/remote.rs +++ b/src/db/remote.rs @@ -86,15 +86,56 @@ impl Database { .optional() } - pub fn update_remote_connection_session( + /// Look up a saved connection by (host, port). Used by the pairing + /// flow to detect "this is a re-pair against a host we already + /// know" and refresh the existing row instead of inserting a + /// duplicate sidebar entry. + pub fn find_remote_connection_by_host_port( + &self, + host: &str, + port: u16, + ) -> Result, rusqlite::Error> { + self.conn + .query_row( + "SELECT id, name, host, port, session_token, cert_fingerprint, auto_connect, created_at + FROM remote_connections WHERE host = ?1 AND port = ?2 + ORDER BY created_at LIMIT 1", + params![host, port as i32], + |row| { + let auto_connect_int: i32 = row.get(6)?; + Ok(RemoteConnection { + id: row.get(0)?, + name: row.get(1)?, + host: row.get(2)?, + port: Self::parse_port(row, 3)?, + session_token: row.get(4)?, + cert_fingerprint: row.get(5)?, + auto_connect: auto_connect_int != 0, + created_at: row.get(7)?, + }) + }, + ) + .optional() + } + + /// Refresh the volatile fields of an existing connection after a + /// successful re-pair: the host's display name (it may have been + /// renamed), the freshly-issued session token, and the cert + /// fingerprint observed during the new TLS handshake. The id and + /// `created_at` are preserved so the sidebar entry keeps its + /// stable identity across re-pairs. + pub fn update_remote_connection_pairing( &self, id: &str, + name: &str, session_token: &str, cert_fingerprint: &str, ) -> Result<(), rusqlite::Error> { self.conn.execute( - "UPDATE remote_connections SET session_token = ?1, cert_fingerprint = ?2 WHERE id = ?3", - params![session_token, cert_fingerprint, id], + "UPDATE remote_connections + SET name = ?1, session_token = ?2, cert_fingerprint = ?3 + WHERE id = ?4", + params![name, session_token, cert_fingerprint, id], )?; Ok(()) } @@ -156,15 +197,52 @@ mod tests { } #[test] - fn test_update_remote_connection_session() { + fn test_update_remote_connection_pairing() { let db = Database::open_in_memory().unwrap(); db.insert_remote_connection(&make_remote_conn("rc1", "Server A", "host-a.local", 7683)) .unwrap(); - db.update_remote_connection_session("rc1", "tok-123", "fp-abc") + let original_created_at = db + .get_remote_connection("rc1") + .unwrap() + .unwrap() + .created_at; + db.update_remote_connection_pairing("rc1", "Server A v2", "tok-123", "fp-abc") .unwrap(); let conn = db.get_remote_connection("rc1").unwrap().unwrap(); + assert_eq!(conn.name, "Server A v2"); assert_eq!(conn.session_token.as_deref(), Some("tok-123")); assert_eq!(conn.cert_fingerprint.as_deref(), Some("fp-abc")); + // Stable identity: id + created_at survive a re-pair so the sidebar + // entry doesn't shift position or lose its provenance. + assert_eq!(conn.id, "rc1"); + assert_eq!(conn.created_at, original_created_at); + } + + #[test] + fn test_find_remote_connection_by_host_port() { + let db = Database::open_in_memory().unwrap(); + db.insert_remote_connection(&make_remote_conn("rc1", "Server A", "host-a.local", 7683)) + .unwrap(); + db.insert_remote_connection(&make_remote_conn("rc2", "Server B", "host-b.local", 9000)) + .unwrap(); + + let hit = db + .find_remote_connection_by_host_port("host-a.local", 7683) + .unwrap() + .expect("expected to find host-a"); + assert_eq!(hit.id, "rc1"); + + // Different port on the same host is treated as a distinct server. + let miss_port = db + .find_remote_connection_by_host_port("host-a.local", 9999) + .unwrap(); + assert!(miss_port.is_none()); + + // Different host: also a miss. + let miss_host = db + .find_remote_connection_by_host_port("host-c.local", 7683) + .unwrap(); + assert!(miss_host.is_none()); } #[test] diff --git a/src/db/test_support.rs b/src/db/test_support.rs index 143f52080..e0efb22b9 100644 --- a/src/db/test_support.rs +++ b/src/db/test_support.rs @@ -80,5 +80,7 @@ pub(crate) fn make_chat_msg( output_tokens: None, cache_read_tokens: None, cache_creation_tokens: None, + author_participant_id: None, + author_display_name: None, } } diff --git a/src/fork.rs b/src/fork.rs index 2b938cdd5..48437d08d 100644 --- a/src/fork.rs +++ b/src/fork.rs @@ -311,6 +311,8 @@ fn copy_history( output_tokens: msg.output_tokens, cache_read_tokens: msg.cache_read_tokens, cache_creation_tokens: msg.cache_creation_tokens, + author_participant_id: msg.author_participant_id.clone(), + author_display_name: msg.author_display_name.clone(), }; db.insert_chat_message(&copied)?; } @@ -508,6 +510,8 @@ mod tests { output_tokens: None, cache_read_tokens: None, cache_creation_tokens: None, + author_participant_id: None, + author_display_name: None, } } diff --git a/src/lib.rs b/src/lib.rs index 951f88528..5dde8f9f9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,11 +27,13 @@ pub mod permissions; pub mod plugin; pub mod plugin_runtime; pub mod process; +pub mod room; pub mod rpc; pub mod scm; pub mod slash_commands; pub mod snapshot; pub mod workspace_alloc; +pub mod workspace_events; pub mod workspace_sync; use base64::Engine; diff --git a/src/migrations/20260428034257_chat_message_author.sql b/src/migrations/20260428034257_chat_message_author.sql new file mode 100644 index 000000000..225f9583e --- /dev/null +++ b/src/migrations/20260428034257_chat_message_author.sql @@ -0,0 +1,12 @@ +-- Add author identity to chat messages so collaborative sessions can render +-- which connected user prompted the agent. In a collab session, the host +-- stamps `"host"` on its own messages and the per-pairing id on +-- remote-authored ones; outside a collab session (solo, 1:1, all +-- pre-collab history) both columns are NULL and the UI treats the +-- message as authored by the local user (no author chip). + +ALTER TABLE chat_messages + ADD COLUMN author_participant_id TEXT; + +ALTER TABLE chat_messages + ADD COLUMN author_display_name TEXT; diff --git a/src/migrations/mod.rs b/src/migrations/mod.rs index 67407c7ba..aad48b7b7 100644 --- a/src/migrations/mod.rs +++ b/src/migrations/mod.rs @@ -159,6 +159,11 @@ pub const MIGRATIONS: &[Migration] = &[ sql: include_str!("20260425003451_attachments_origin_and_tool_use.sql"), legacy_version: None, }, + Migration { + id: "20260428034257_chat_message_author", + sql: include_str!("20260428034257_chat_message_author.sql"), + legacy_version: None, + }, Migration { id: "20260430030147_pinned_prompts", sql: include_str!("20260430030147_pinned_prompts.sql"), diff --git a/src/model/chat_message.rs b/src/model/chat_message.rs index 092af99b4..a6af6ed95 100644 --- a/src/model/chat_message.rs +++ b/src/model/chat_message.rs @@ -67,4 +67,14 @@ pub struct ChatMessage { /// Per-message cache-creation input tokens (maps to /// `cache_creation_input_tokens` in the Anthropic API). NULL for historical rows. pub cache_creation_tokens: Option, + /// Identifies the connected participant who authored this message in a + /// collaborative session. In a collab session, the host stamps `"host"` + /// (see `claudette::room::ParticipantId::HOST`) on its own messages and + /// the per-pairing id on remote-authored ones. NULL for solo / 1:1 + /// (non-collab) sessions, all Assistant/System rows, and pre-collab + /// legacy history. + pub author_participant_id: Option, + /// Display name captured at submit time so the UI can render an author + /// chip without resolving the participant id at read time. + pub author_display_name: Option, } diff --git a/src/room.rs b/src/room.rs new file mode 100644 index 000000000..90d33d88d --- /dev/null +++ b/src/room.rs @@ -0,0 +1,642 @@ +//! Collaborative-session "rooms". +//! +//! A `Room` is the single source of truth for one collaboratively-shared chat +//! session: it owns the live participant set, a broadcast channel that +//! fans out agent-stream events to every connected client (the local Tauri UI +//! plus any remote WebSocket clients), a turn lock so only one user can drive +//! the agent at a time, and any in-flight plan-consensus vote. +//! +//! Solo / 1:1 legacy remote sessions never touch this: the registry lazily +//! creates a room only when collaborative mode is enabled for a session, and +//! call sites fall back to the existing direct-emit path when no room exists. + +use std::collections::{HashMap, HashSet}; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; +use tokio::sync::{Mutex, RwLock, broadcast}; + +/// A stable per-pairing identity. Derived from the session token (server side) +/// or fixed to [`ParticipantId::HOST`] for the host's own local UI. Strings +/// are opaque to callers. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct ParticipantId(pub String); + +impl ParticipantId { + /// Sentinel value used for the host process itself. Remote clients never + /// receive this id from auth; only the local Tauri layer constructs it. + pub const HOST: &'static str = "host"; + + pub fn host() -> Self { + Self(Self::HOST.to_string()) + } + + pub fn is_host(&self) -> bool { + self.0 == Self::HOST + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ParticipantInfo { + pub id: ParticipantId, + pub display_name: String, + pub is_host: bool, + /// Unix-millis timestamp of when the participant joined this room. + pub joined_at: i64, + /// When true, the server rejects this participant's `send_chat_message` + /// and `vote_plan_approval` RPCs. Mute is per-room, not per-pairing. + pub muted: bool, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "kind", rename_all = "snake_case")] +pub enum Vote { + Approve, + Deny { reason: String }, +} + +impl Vote { + pub fn is_deny(&self) -> bool { + matches!(self, Vote::Deny { .. }) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct QuestionVote { + pub answers: HashMap, +} + +/// Live state for a single open ExitPlanMode consensus vote. +/// +/// `required_voters` is snapshotted at vote-open time so late joiners are +/// observers, not unanticipated blockers. It is pruned on participant +/// disconnect so a ghosted voter doesn't deadlock the agent. +#[derive(Debug, Clone)] +pub struct PendingVote { + pub tool_use_id: String, + pub required_voters: HashSet, + pub votes: HashMap, + pub original_input: serde_json::Value, +} + +impl PendingVote { + pub fn new( + tool_use_id: String, + required_voters: HashSet, + original_input: serde_json::Value, + ) -> Self { + Self { + tool_use_id, + required_voters, + votes: HashMap::new(), + original_input, + } + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct PendingVoteSnapshot { + pub tool_use_id: String, + pub required_voters: Vec, + pub votes: HashMap, + pub input: serde_json::Value, + pub plan_file_path: Option, +} + +#[derive(Debug, Clone, Serialize)] +pub struct PendingQuestionSnapshot { + pub tool_use_id: String, + pub required_voters: Vec, + pub votes: HashMap, + pub input: serde_json::Value, +} + +#[derive(Debug, Clone, Serialize)] +pub struct TurnSettingsSnapshot { + pub model: Option, + pub plan_mode: bool, +} + +type PendingVoteSnapshotFuture = + Pin> + Send + 'static>>; +type PendingVoteSnapshotProvider = + Box PendingVoteSnapshotFuture + Send + Sync + 'static>; +type PendingQuestionSnapshotFuture = + Pin> + Send + 'static>>; +type PendingQuestionSnapshotProvider = + Box PendingQuestionSnapshotFuture + Send + Sync + 'static>; + +/// One broadcast envelope. The payload is already a fully-shaped JSON-RPC +/// event object (`{"event": "...", "payload": {...}}`) so subscribers can +/// forward it to their writer without re-shaping. +#[derive(Debug, Clone)] +pub struct RoomEvent(pub serde_json::Value); + +/// Bounded broadcast capacity. Tuned for live-token streaming: large enough +/// that a normal client won't lag during a single turn, small enough that +/// memory cost is bounded if a client truly stalls. +const ROOM_BROADCAST_CAPACITY: usize = 256; + +pub struct Room { + pub chat_session_id: String, + /// Lossy fan-out of events. Slow subscribers receive `RecvError::Lagged` + /// and are expected to resync via `join_session`. + pub tx: broadcast::Sender, + pub participants: RwLock>, + /// `true` means ExitPlanMode requires unanimous approval (with host veto) + /// before the agent is allowed to leave plan mode. See + /// [`crate::room`] module docs. + pub consensus_required: RwLock, + /// `Some(holder)` while a turn is in flight; new `send_chat_message` + /// requests from any other participant must be rejected. + pub turn_holder: Mutex>, + pub turn_started_at_ms: Mutex>, + pub turn_settings: RwLock>, + pub pending_vote: RwLock>, +} + +impl Room { + pub fn new(chat_session_id: String, consensus_required: bool) -> Arc { + let (tx, _) = broadcast::channel(ROOM_BROADCAST_CAPACITY); + Arc::new(Self { + chat_session_id, + tx, + participants: RwLock::new(HashMap::new()), + consensus_required: RwLock::new(consensus_required), + turn_holder: Mutex::new(None), + turn_started_at_ms: Mutex::new(None), + turn_settings: RwLock::new(None), + pending_vote: RwLock::new(None), + }) + } + + /// Publish an event to every subscriber. A `SendError` (zero subscribers) + /// is silently ignored — that just means nobody is listening *yet*; the + /// event was still persisted upstream by the caller (DB writes happen + /// outside the broadcast). + pub fn publish(&self, event: serde_json::Value) { + let _ = self.tx.send(RoomEvent(event)); + } + + pub fn subscribe(&self) -> broadcast::Receiver { + self.tx.subscribe() + } + + pub async fn add_participant(&self, info: ParticipantInfo) { + self.participants + .write() + .await + .insert(info.id.clone(), info); + } + + pub async fn remove_participant(&self, id: &ParticipantId) -> Option { + self.participants.write().await.remove(id) + } + + pub async fn participant_list(&self) -> Vec { + self.participants.read().await.values().cloned().collect() + } + + pub async fn is_muted(&self, id: &ParticipantId) -> bool { + self.participants + .read() + .await + .get(id) + .map(|p| p.muted) + .unwrap_or(false) + } + + pub async fn set_muted(&self, id: &ParticipantId, muted: bool) -> bool { + let mut guard = self.participants.write().await; + if let Some(p) = guard.get_mut(id) { + p.muted = muted; + true + } else { + false + } + } + + /// Attempt to acquire the turn for `participant`. Returns `Ok(())` if the + /// caller now holds the turn, or `Err(current_holder)` if someone else + /// already does. Hard reject — never queues. + pub async fn try_acquire_turn(&self, participant: &ParticipantId) -> Result<(), ParticipantId> { + let mut holder = self.turn_holder.lock().await; + match holder.as_ref() { + Some(current) if current != participant => Err(current.clone()), + _ => { + *holder = Some(participant.clone()); + Ok(()) + } + } + } + + pub async fn release_turn(&self) { + *self.turn_holder.lock().await = None; + *self.turn_started_at_ms.lock().await = None; + *self.turn_settings.write().await = None; + } + + pub async fn current_turn_holder(&self) -> Option { + self.turn_holder.lock().await.clone() + } + + pub async fn pending_vote_snapshot(&self) -> Option { + let pending = self.pending_vote.read().await.clone()?; + let participants = self.participants.read().await; + let required_voters = pending + .required_voters + .iter() + .filter_map(|id| participants.get(id).cloned()) + .collect(); + let votes = pending + .votes + .into_iter() + .map(|(id, vote)| (id.0, vote)) + .collect(); + + Some(PendingVoteSnapshot { + tool_use_id: pending.tool_use_id, + required_voters, + votes, + input: pending.original_input, + plan_file_path: None, + }) + } +} + +/// Synchronous callback fired exactly once per newly-created room, before +/// the room becomes visible to any other caller. The Tauri host installs +/// this so it can capture a `broadcast::Receiver` (and spawn the local +/// event-mirror / vote-resolver tasks) *before* any handler — including +/// `handle_join_session` on the server side — publishes into the room. +/// +/// Without this, the host would miss the very first `participants-changed` +/// event, because `tokio::sync::broadcast` does not buffer for late +/// subscribers. +type OnCreateHook = Box) + Send + Sync>; + +/// Process-wide registry shared between the Tauri host and the embedded +/// `claudette-server`. Both sides hold the same `Arc` so a +/// publish from either side reaches subscribers on the other. +#[derive(Default)] +pub struct RoomRegistry { + rooms: RwLock>>, + /// Optional hook invoked synchronously during `get_or_create` whenever a + /// brand-new room is constructed. See [`OnCreateHook`] for the rationale. + on_create: std::sync::Mutex>, + pending_vote_snapshot_provider: std::sync::Mutex>, + pending_question_snapshot_provider: std::sync::Mutex>, +} + +impl RoomRegistry { + pub fn new() -> Arc { + Arc::new(Self::default()) + } + + /// Install the creation hook. The callback is fired with the new + /// `Arc` before the room is published into the registry's map, + /// so any `subscribe()` call inside the callback is guaranteed to + /// observe every subsequent publish. There is at most one hook — + /// later calls replace the previous one. + pub fn set_on_create(&self, callback: F) + where + F: Fn(Arc) + Send + Sync + 'static, + { + // `std::sync::Mutex` here (not tokio): the hook is set once at + // startup and read on the (sync) creation path. We never await + // while holding it. + if let Ok(mut guard) = self.on_create.lock() { + *guard = Some(Box::new(callback)); + } + } + + pub fn set_pending_vote_snapshot_provider(&self, callback: F) + where + F: Fn(String) -> Fut + Send + Sync + 'static, + Fut: Future> + Send + 'static, + { + if let Ok(mut guard) = self.pending_vote_snapshot_provider.lock() { + *guard = Some(Box::new(move |session_id| Box::pin(callback(session_id)))); + } + } + + pub fn set_pending_question_snapshot_provider(&self, callback: F) + where + F: Fn(String) -> Fut + Send + Sync + 'static, + Fut: Future> + Send + 'static, + { + if let Ok(mut guard) = self.pending_question_snapshot_provider.lock() { + *guard = Some(Box::new(move |session_id| Box::pin(callback(session_id)))); + } + } + + pub async fn pending_vote_snapshot( + &self, + chat_session_id: &str, + ) -> Option { + if let Some(room) = self.get(chat_session_id).await + && let Some(snapshot) = room.pending_vote_snapshot().await + { + return Some(snapshot); + } + + let provider_future = { + let guard = self.pending_vote_snapshot_provider.lock().ok()?; + guard + .as_ref() + .map(|provider| provider(chat_session_id.to_string())) + }?; + provider_future.await + } + + pub async fn pending_question_snapshot( + &self, + chat_session_id: &str, + ) -> Option { + let provider_future = { + let guard = self.pending_question_snapshot_provider.lock().ok()?; + guard + .as_ref() + .map(|provider| provider(chat_session_id.to_string())) + }?; + provider_future.await + } + + /// Look up an existing room. Returns `None` for solo / 1:1 sessions. + pub async fn get(&self, chat_session_id: &str) -> Option> { + self.rooms.read().await.get(chat_session_id).cloned() + } + + /// Get-or-create. Use when starting a collaborative share. + pub async fn get_or_create( + &self, + chat_session_id: &str, + consensus_required: bool, + ) -> Arc { + // Fast path: read-locked existence check. + if let Some(room) = self.rooms.read().await.get(chat_session_id).cloned() { + return room; + } + let mut guard = self.rooms.write().await; + // Double-check under write lock to handle the racing-creators case. + if let Some(room) = guard.get(chat_session_id).cloned() { + return room; + } + let room = Room::new(chat_session_id.to_string(), consensus_required); + // Fire the creation hook *before* publishing into the map. This is + // load-bearing: callers (e.g. the server's `handle_join_session`) + // call `room.publish(...)` shortly after `get_or_create` returns, + // and `tokio::sync::broadcast` does not deliver historical events + // to subscribers attached after a publish. By calling the hook + // here — synchronously, while we still hold the only `Arc` + // outside the function — we guarantee any subscribers it spawns + // see every subsequent event from turn one. + if let Ok(hook) = self.on_create.lock() + && let Some(cb) = hook.as_ref() + { + cb(room.clone()); + } + guard.insert(chat_session_id.to_string(), room.clone()); + room + } + + /// Tear down a room when its share ends. Subscribers will see their + /// receivers close on next `recv()`. + pub async fn remove(&self, chat_session_id: &str) -> Option> { + self.rooms.write().await.remove(chat_session_id) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + fn pid(s: &str) -> ParticipantId { + ParticipantId(s.to_string()) + } + + fn info(id: &str, host: bool) -> ParticipantInfo { + ParticipantInfo { + id: pid(id), + display_name: id.to_string(), + is_host: host, + joined_at: 0, + muted: false, + } + } + + #[tokio::test] + async fn registry_get_or_create_is_idempotent() { + let reg = RoomRegistry::new(); + let a = reg.get_or_create("s1", false).await; + let b = reg.get_or_create("s1", true).await; + // Same room — `consensus_required` flag from the second call is + // ignored because the room already exists. Callers must mutate the + // flag explicitly via `consensus_required.write()`. + assert!(Arc::ptr_eq(&a, &b)); + } + + #[tokio::test] + async fn publish_fans_out_to_all_subscribers() { + let room = Room::new("s1".into(), false); + let mut rx1 = room.subscribe(); + let mut rx2 = room.subscribe(); + room.publish(json!({"event": "hello"})); + let e1 = rx1.recv().await.unwrap(); + let e2 = rx2.recv().await.unwrap(); + assert_eq!(e1.0, json!({"event": "hello"})); + assert_eq!(e2.0, json!({"event": "hello"})); + } + + #[tokio::test] + async fn publish_with_zero_subscribers_does_not_panic() { + let room = Room::new("s1".into(), false); + room.publish(json!({"event": "into-the-void"})); + } + + #[tokio::test] + async fn turn_lock_rejects_concurrent_acquirers() { + let room = Room::new("s1".into(), false); + let alice = pid("alice"); + let bob = pid("bob"); + room.try_acquire_turn(&alice).await.unwrap(); + let err = room.try_acquire_turn(&bob).await.unwrap_err(); + assert_eq!(err, alice); + // Same participant re-acquiring (e.g. retry of same prompt) is OK — + // they already hold it. + room.try_acquire_turn(&alice).await.unwrap(); + room.release_turn().await; + // After release anyone can take it. + room.try_acquire_turn(&bob).await.unwrap(); + } + + #[tokio::test] + async fn participant_lifecycle() { + let room = Room::new("s1".into(), false); + room.add_participant(info("alice", false)).await; + room.add_participant(info("bob", false)).await; + assert_eq!(room.participant_list().await.len(), 2); + assert!(!room.is_muted(&pid("alice")).await); + assert!(room.set_muted(&pid("alice"), true).await); + assert!(room.is_muted(&pid("alice")).await); + // Muting a non-existent participant returns false rather than + // silently inserting one. + assert!(!room.set_muted(&pid("nobody"), true).await); + assert!(room.remove_participant(&pid("alice")).await.is_some()); + assert!(room.remove_participant(&pid("alice")).await.is_none()); + } + + #[tokio::test] + async fn on_create_hook_subscribes_before_first_publish() { + // Regression test for the publish-before-subscribe race: a hook + // installed on the registry must run synchronously when a brand-new + // room is created, *before* any caller can publish into it. Without + // this guarantee the host UI loses the very first + // `participants-changed` event of every collaborative session. + let reg = RoomRegistry::new(); + let captured: std::sync::Arc< + std::sync::Mutex>>, + > = std::sync::Arc::new(std::sync::Mutex::new(None)); + let captured_clone = captured.clone(); + reg.set_on_create(move |room| { + // Synchronous capture mirrors what the Tauri host does in + // `attach_host_room_subscribers`. + let rx = room.subscribe(); + *captured_clone.lock().unwrap() = Some(rx); + }); + + let room = reg.get_or_create("s1", false).await; + // Simulate the publish that `handle_join_session` does immediately + // after `get_or_create` returns. + room.publish(json!({"event": "participants-changed"})); + + let mut rx = captured + .lock() + .unwrap() + .take() + .expect("hook should have run"); + let evt = rx + .recv() + .await + .expect("first publish must reach hook subscriber"); + assert_eq!(evt.0, json!({"event": "participants-changed"})); + + // Hook fires only on creation, not on subsequent get_or_creates. + let captured2: std::sync::Arc> = + std::sync::Arc::new(std::sync::Mutex::new(0)); + let counter = captured2.clone(); + reg.set_on_create(move |_| { + *counter.lock().unwrap() += 1; + }); + let _ = reg.get_or_create("s1", false).await; // existing → no fire + let _ = reg.get_or_create("s2", false).await; // new → fires once + assert_eq!(*captured2.lock().unwrap(), 1); + } + + #[tokio::test] + async fn pending_vote_carries_required_voters() { + let mut required = HashSet::new(); + required.insert(pid("host")); + required.insert(pid("alice")); + let mut vote = PendingVote::new("tool-1".into(), required, serde_json::json!({})); + vote.votes.insert(pid("host"), Vote::Approve); + assert_eq!(vote.votes.len(), 1); + assert_eq!(vote.required_voters.len(), 2); + } + + #[tokio::test] + async fn pending_vote_snapshot_uses_participant_details() { + let room = Room::new("s1".into(), true); + room.add_participant(info("host", true)).await; + room.add_participant(info("alice", false)).await; + + let mut required = HashSet::new(); + required.insert(pid("host")); + required.insert(pid("alice")); + let mut vote = PendingVote::new( + "tool-1".into(), + required, + serde_json::json!({"allowedPrompts": [{"tool": "Edit", "prompt": "ok"}]}), + ); + vote.votes.insert(pid("alice"), Vote::Approve); + *room.pending_vote.write().await = Some(vote); + + let snapshot = room.pending_vote_snapshot().await.expect("snapshot"); + assert_eq!(snapshot.tool_use_id, "tool-1"); + assert_eq!(snapshot.required_voters.len(), 2); + assert_eq!( + snapshot.votes.get("alice").expect("alice vote"), + &Vote::Approve + ); + assert_eq!( + snapshot.input["allowedPrompts"][0]["tool"], + serde_json::json!("Edit") + ); + } + + #[tokio::test] + async fn registry_pending_vote_snapshot_backfills_from_provider() { + let reg = RoomRegistry::new(); + reg.set_pending_vote_snapshot_provider(|session_id| async move { + Some(PendingVoteSnapshot { + tool_use_id: format!("{session_id}-tool"), + required_voters: vec![info("host", true)], + votes: HashMap::new(), + input: serde_json::json!({"allowedPrompts": []}), + plan_file_path: None, + }) + }); + + let snapshot = reg + .pending_vote_snapshot("s1") + .await + .expect("provider snapshot"); + assert_eq!(snapshot.tool_use_id, "s1-tool"); + } + + #[tokio::test] + async fn registry_pending_question_snapshot_backfills_from_provider() { + let reg = RoomRegistry::new(); + reg.set_pending_question_snapshot_provider(|session_id| async move { + Some(PendingQuestionSnapshot { + tool_use_id: format!("{session_id}-tool"), + required_voters: vec![info("host", true)], + votes: HashMap::new(), + input: serde_json::json!({ + "question": "Pick one", + "options": ["A", "B"], + }), + }) + }); + + let snapshot = reg + .pending_question_snapshot("s1") + .await + .expect("provider snapshot"); + assert_eq!(snapshot.tool_use_id, "s1-tool"); + assert_eq!(snapshot.input["question"], serde_json::json!("Pick one")); + } + + #[tokio::test] + async fn pending_vote_snapshot_allows_non_consensus_prompt() { + let room = Room::new("s1".into(), true); + let vote = PendingVote::new( + "tool-1".into(), + HashSet::new(), + serde_json::json!({"allowedPrompts": []}), + ); + *room.pending_vote.write().await = Some(vote); + + let snapshot = room.pending_vote_snapshot().await.expect("snapshot"); + assert_eq!(snapshot.tool_use_id, "tool-1"); + assert!(snapshot.required_voters.is_empty()); + assert!(snapshot.votes.is_empty()); + } +} diff --git a/src/ui/src/App.tsx b/src/ui/src/App.tsx index fe375f069..d80ad0142 100644 --- a/src/ui/src/App.tsx +++ b/src/ui/src/App.tsx @@ -2,10 +2,11 @@ import { useEffect, useRef } from "react"; import { listen } from "@tauri-apps/api/event"; import { getVersion } from "@tauri-apps/api/app"; import { useAppStore } from "./stores/useAppStore"; -import { loadInitialData, getAppSetting, getHostEnvFlags, listRemoteConnections, listDiscoveredServers, getLocalServerStatus, detectInstalledApps, listSystemFonts, deleteTerminalTab, listAppSettingsWithPrefix } from "./services/tauri"; +import { loadInitialData, getAppSetting, getHostEnvFlags, listRemoteConnections, listDiscoveredServers, getLocalServerStatus, listShares, detectInstalledApps, listSystemFonts, deleteTerminalTab, listAppSettingsWithPrefix } from "./services/tauri"; import { applyTheme, applyUserFonts, loadAllThemes, findTheme, cacheThemePreference, getThemeDataAttr } from "./utils/theme"; import { DEFAULT_THEME_ID, DEFAULT_LIGHT_THEME_ID } from "./styles/themes"; import type { ThemeDefinition } from "./types/theme"; +import type { Workspace } from "./types/workspace"; import { adjustUiFontSize, resetUiFontSize } from "./utils/fontSettings"; import { KEYBINDING_SETTING_PREFIX } from "./hotkeys/bindings"; import { useMcpStatus } from "./hooks/useMcpStatus"; @@ -196,6 +197,16 @@ function App() { }) .catch((err) => console.error("Failed to load local server status:", err)); + // Hydrate the active-shares count once at startup so the sidebar's + // ShareButton reflects persisted shares even before the user opens + // the share modal. ShareModal's own `refresh` keeps this in sync + // afterwards (mint, stop, periodic refresh). + listShares() + .then((shares) => useAppStore.getState().setActiveSharesCount(shares.length)) + .catch(() => { + // Server feature off / no shares — leave count at 0. + }); + detectInstalledApps() .then(setDetectedApps) .catch((err) => console.error("Failed to detect installed apps:", err)); @@ -254,6 +265,18 @@ function App() { setKeybindings(bindings); }) .catch(() => {}); + // Hydrate collaboration preferences. Both default to "off / blank" so a + // failed read leaves the rest of the app working unchanged. + getAppSetting("collab:display_name") + .then((val) => { if (val) useAppStore.getState().setCollabDisplayName(val); }) + .catch(() => {}); + getAppSetting("collab:default_consensus_required") + .then((val) => { + if (val === "true") { + useAppStore.getState().setCollabDefaultConsensusRequired(true); + } + }) + .catch(() => {}); getAppSetting("language") .then((lang) => { if (lang && isSupportedLanguage(lang) && lang !== i18n.language) { @@ -573,6 +596,52 @@ function App() { store.addToast(msg); }); + // Listen for cross-process workspace lifecycle events forwarded from a + // remote claudette-server (today: archive). The host publishes via + // `WorkspaceEventBus`, the WS connection forwards as `{event:"workspace-lifecycle", payload}`, + // `RemoteConnectionManager` re-emits it as a Tauri event, and we land here. + // The remote-side response: drop the workspace from the sidebar, clear + // selection if it was active, and toast the user. The forwarder on the + // host side already filters by the connection's allowed-workspaces scope, + // so we trust the workspace id arrived legitimately. + const unlistenWorkspaceLifecycle = listen<{ + kind: string; + workspace_id?: string; + source_workspace_id?: string; + workspace?: Workspace; + }>( + "workspace-lifecycle", + (event) => { + const { kind, workspace_id, source_workspace_id, workspace } = event.payload; + const store = useAppStore.getState(); + if (kind === "forked" && workspace && source_workspace_id) { + const source = store.workspaces.find((w) => w.id === source_workspace_id); + const remoteWorkspace: Workspace = { + ...workspace, + remote_connection_id: source?.remote_connection_id ?? null, + }; + if (store.workspaces.some((w) => w.id === remoteWorkspace.id)) { + store.updateWorkspace(remoteWorkspace.id, remoteWorkspace); + } else { + store.addWorkspace(remoteWorkspace); + } + store.selectWorkspace(remoteWorkspace.id); + return; + } + if (kind !== "archived" || !workspace_id) return; + // Capture the name BEFORE removing the workspace so the toast can + // identify which one disappeared. The Workspace lookup falls back to + // an unnamed message for the (rare) race where the workspace was + // already gone client-side before the event arrived. + const ws = store.workspaces.find((w) => w.id === workspace_id); + store.removeWorkspace(workspace_id); + const msg = ws?.name + ? i18n.t("sidebar:remote_workspace_archived_named", { name: ws.name }) + : i18n.t("sidebar:remote_workspace_archived_unnamed"); + store.addToast(msg); + }, + ); + return () => { isActive = false; window.clearInterval(discoveredServersPollId); @@ -590,6 +659,7 @@ function App() { unlistenWorkspacesChanged.then((fn) => fn()); unlistenChatTurnSettings.then((fn) => fn()); unlistenChatTurnStarted.then((fn) => fn()); + unlistenWorkspaceLifecycle.then((fn) => fn()); unlistenMissingCli.then((fn) => fn()); }; }, [setRepositories, setWorkspaces, setWorktreeBaseDir, setDefaultBranches, setTerminalFontSize, setLastMessages, setRemoteConnections, setDiscoveredServers, setLocalServerRunning, setLocalServerConnectionString, setCurrentThemeId, setThemeMode, setThemeDark, setThemeLight, setUiFontSize, setFontFamilySans, setFontFamilyMono, setSystemFonts, setDetectedApps, setUsageInsightsEnabled, setClaudetteTerminalEnabled, setShowSidebarRunningCommands, setPluginManagementEnabled, setCommunityRegistryEnabled, setEditorGitGutterBase, setEditorMinimapEnabled, setDisable1mContext, setAppVersion, setVoiceToggleHotkey, setVoiceHoldHotkey, setKeybindings]); diff --git a/src/ui/src/components/chat/AgentQuestionCard.module.css b/src/ui/src/components/chat/AgentQuestionCard.module.css index 385b5fad7..ee6c1ed4a 100644 --- a/src/ui/src/components/chat/AgentQuestionCard.module.css +++ b/src/ui/src/components/chat/AgentQuestionCard.module.css @@ -118,6 +118,55 @@ font-weight: 500; } +.voteBadges { + display: flex; + flex-wrap: wrap; + gap: 4px; + margin-top: 4px; +} + +.voteBadge { + display: inline-flex; + align-items: center; + min-height: 18px; + max-width: 160px; + padding: 2px 7px; + border-radius: 999px; + background: rgba(var(--accent-primary-rgb), 0.12); + color: var(--accent-primary); + font-size: 11px; + font-weight: 650; + line-height: 1.2; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.freeformVotes { + display: flex; + flex-direction: column; + gap: 6px; + margin-top: 8px; +} + +.freeformVote { + display: flex; + align-items: flex-start; + gap: 8px; + padding: 8px 10px; + border: 1px solid var(--divider); + border-radius: 8px; + background: var(--hover-bg-subtle); +} + +.freeformVoteText { + min-width: 0; + color: var(--text-secondary); + font-size: 12px; + line-height: 1.4; + overflow-wrap: anywhere; +} + .optionDesc { font-size: 12px; color: var(--text-dim); diff --git a/src/ui/src/components/chat/AgentQuestionCard.tsx b/src/ui/src/components/chat/AgentQuestionCard.tsx index 9c4d813c5..5b289e74a 100644 --- a/src/ui/src/components/chat/AgentQuestionCard.tsx +++ b/src/ui/src/components/chat/AgentQuestionCard.tsx @@ -20,6 +20,78 @@ export function AgentQuestionCard({ const { t } = useTranslation("chat"); const total = question.questions.length; const isSingleQuestion = total === 1; + const participantNames = new Map( + (question.requiredVoters ?? []).map((p) => [p.id, p.display_name]), + ); + const votes = question.votes ?? {}; + + const voterName = (participantId: string) => + participantNames.get(participantId) ?? participantId; + + const votesForAnswer = (questionText: string, answer: string) => + Object.entries(votes) + .filter(([, vote]) => { + const value = vote.answers[questionText]; + if (!value) return false; + return value + .split(",") + .map((part) => part.trim()) + .includes(answer); + }) + .map(([participantId]) => ({ + participantId, + name: voterName(participantId), + })); + + const freeformVotes = ( + questionText: string, + options: Array<{ label: string }>, + ) => + Object.entries(votes) + .flatMap(([participantId, vote]) => { + const value = vote.answers[questionText]; + if (!value) return []; + const selectedOptions = value.split(",").map((part) => part.trim()); + const isOption = options.some((option) => + selectedOptions.includes(option.label), + ); + return isOption + ? [] + : [{ participantId, name: voterName(participantId), answer: value }]; + }); + + const renderBadges = ( + voters: Array<{ participantId: string; name: string }>, + ) => { + if (voters.length === 0) return null; + return ( + + {voters.map((voter) => ( + + {voter.name} + + ))} + + ); + }; + + const renderFreeformVotes = ( + questionText: string, + options: Array<{ label: string }>, + ) => { + const items = freeformVotes(questionText, options); + if (items.length === 0) return null; + return ( +
+ {items.map((item) => ( + + {item.name} + {item.answer} + + ))} +
+ ); + }; // All hooks declared unconditionally (React rules of hooks) const [selections, setSelections] = useState>>( @@ -76,6 +148,7 @@ export function AgentQuestionCard({ onClick={() => toggleSingle(optIdx)} > {opt.label} + {renderBadges(votesForAnswer(q.question, opt.label))} {opt.description && ( {opt.description} @@ -86,6 +159,7 @@ export function AgentQuestionCard({ })} )} + {renderFreeformVotes(q.question, q.options)} {isMulti && hasSelections && ( + )} + {showMenu && ( +
+ + +
+ )} + + ); + })} + + ); +} + +const moderationItemStyle: CSSProperties = { + display: "flex", + alignItems: "center", + gap: 8, + padding: "6px 10px", + background: "transparent", + border: "none", + textAlign: "left", + cursor: "pointer", + color: "inherit", + fontSize: 12, +}; diff --git a/src/ui/src/components/chat/PlanApprovalCard.module.css b/src/ui/src/components/chat/PlanApprovalCard.module.css index d69b6117d..83fd538b4 100644 --- a/src/ui/src/components/chat/PlanApprovalCard.module.css +++ b/src/ui/src/components/chat/PlanApprovalCard.module.css @@ -282,10 +282,15 @@ transition: background var(--transition-fast); } -.approveBtn:hover { +.approveBtn:hover:not(:disabled) { background: rgba(var(--accent-primary-rgb), 0.18); } +.approveBtn:disabled { + opacity: 0.45; + cursor: not-allowed; +} + .divider { display: flex; align-items: center; @@ -335,6 +340,11 @@ color: var(--text-faint); } +.freeformInput:disabled { + opacity: 0.6; + cursor: not-allowed; +} + .feedbackBtn { background: transparent; border: 1px solid rgba(var(--accent-primary-rgb), 0.25); diff --git a/src/ui/src/components/chat/PlanApprovalCard.tsx b/src/ui/src/components/chat/PlanApprovalCard.tsx index cf235467a..9148e66e5 100644 --- a/src/ui/src/components/chat/PlanApprovalCard.tsx +++ b/src/ui/src/components/chat/PlanApprovalCard.tsx @@ -3,6 +3,8 @@ import { useTranslation } from "react-i18next"; import { writeText as clipboardWriteText } from "@tauri-apps/plugin-clipboard-manager"; import { MessageMarkdown } from "./MessageMarkdown"; import type { PlanApproval } from "../../stores/useAppStore"; +import { useAppStore } from "../../stores/useAppStore"; +import { useSelfParticipantId } from "../../hooks/useSelfParticipantId"; import { readPlanFile, sendRemoteCommand } from "../../services/tauri"; import styles from "./PlanApprovalCard.module.css"; @@ -13,7 +15,7 @@ interface PlanApprovalCardProps { * ExitPlanMode tool's `call()` (which writes the plan file and emits the * real tool_result). `approved=false` sends a deny with the given reason. */ - onRespond: (approved: boolean, reason?: string) => void; + onRespond: (approved: boolean, reason?: string) => void | Promise; remoteConnectionId?: string; } @@ -30,10 +32,14 @@ export function PlanApprovalCard({ const [feedback, setFeedback] = useState(""); const [copied, setCopied] = useState(false); const [copying, setCopying] = useState(false); + const [submitting, setSubmitting] = useState(false); const copyTimeoutRef = useRef(null); + const mountedRef = useRef(true); + const submittingRef = useRef(false); useEffect(() => { return () => { + mountedRef.current = false; if (copyTimeoutRef.current !== null) { window.clearTimeout(copyTimeoutRef.current); } @@ -46,6 +52,7 @@ export function PlanApprovalCard({ let content: string; if (remoteConnectionId) { content = (await sendRemoteCommand(remoteConnectionId, "read_plan_file", { + chat_session_id: approval.sessionId, path: approval.planFilePath, })) as string; } else { @@ -95,6 +102,20 @@ export function PlanApprovalCard({ } }; + const submitResponse = async (approved: boolean, reason?: string) => { + if (submittingRef.current) return; + submittingRef.current = true; + setSubmitting(true); + try { + await onRespond(approved, reason); + } finally { + submittingRef.current = false; + if (mountedRef.current) { + setSubmitting(false); + } + } + }; + return (
{t("plan_approval_label")}
@@ -164,9 +185,14 @@ export function PlanApprovalCard({
)} + + @@ -178,11 +204,12 @@ export function PlanApprovalCard({ className={styles.freeformInput} value={feedback} onChange={(e) => setFeedback(e.target.value)} + disabled={submitting} onKeyDown={(e) => { if (e.key === "Enter" && !e.shiftKey) { e.preventDefault(); const text = feedback.trim(); - if (text) onRespond(false, text); + if (text) void submitResponse(false, text); } }} placeholder={t("plan_approval_feedback_placeholder")} @@ -192,9 +219,9 @@ export function PlanApprovalCard({ className={styles.feedbackBtn} onClick={() => { const text = feedback.trim(); - if (text) onRespond(false, text); + if (text) void submitResponse(false, text); }} - disabled={!feedback.trim()} + disabled={submitting || !feedback.trim()} > {t("plan_approval_send")} @@ -202,3 +229,58 @@ export function PlanApprovalCard({ ); } + + +/** + * Render the per-voter vote state for an open consensus round. No-op when + * the session has no open vote (solo or non-consensus collab) — the card + * then behaves identically to its pre-collab single-shot form. + * + * The local host's voting status is derived from the `votes` map keyed by + * `"host"`, matching what the Rust resolver records. + */ +function ConsensusProgress({ approval }: { approval: PlanApproval }) { + const { t } = useTranslation("chat"); + const vote = useAppStore((s) => s.consensusVotes[approval.sessionId]); + // Compare voter ids against the local participant's id (the workspace's + // self-pid), NOT the literal `"host"` — on a remote client the local + // user's pid is the remote-issued string, so hardcoding `"host"` would + // mark the host as "you" for every remote viewer of the same plan card. + const selectedWorkspaceId = useAppStore((s) => s.selectedWorkspaceId); + const selfParticipantId = useSelfParticipantId(selectedWorkspaceId); + if (!vote || vote.toolUseId !== approval.toolUseId) { + return null; + } + const totalRequired = vote.requiredVoters.length; + const totalVoted = Object.keys(vote.votes).length; + return ( +
+
+ + {t("plan_approval_consensus_required", { + voted: totalVoted, + required: totalRequired, + })} + +
+ {vote.requiredVoters.map((voter) => { + const cast = vote.votes[voter.id]; + const status = cast + ? cast.kind === "approve" + ? t("plan_approval_vote_approved") + : t("plan_approval_vote_denied", { reason: cast.reason }) + : t("plan_approval_vote_waiting"); + const isSelf = voter.id === selfParticipantId; + return ( +
+ {voter.display_name} + {isSelf ? ` ${t("plan_approval_you_marker")}` : ""} + {voter.is_host ? ` · ${t("plan_approval_host_marker")}` : ""} + {": "} + {status} +
+ ); + })} +
+ ); +} diff --git a/src/ui/src/components/chat/SessionTabs.tsx b/src/ui/src/components/chat/SessionTabs.tsx index 9339645f8..7394a01b2 100644 --- a/src/ui/src/components/chat/SessionTabs.tsx +++ b/src/ui/src/components/chat/SessionTabs.tsx @@ -19,6 +19,7 @@ import { renameChatSession, archiveChatSession, reorderChatSessions, + sendRemoteCommand, } from "../../services/tauri"; import { useTabDragReorder } from "../../hooks/useTabDragReorder"; import { TabDragGhost } from "../shared/TabDragGhost"; @@ -146,9 +147,22 @@ export function SessionTabs({ workspaceId }: Props) { const [creating, setCreating] = useState(false); // Load sessions for this workspace on mount / workspace change. + // For remote workspaces, route through the share's WebSocket — the + // local DB on the remote machine doesn't have the host's chat sessions. + // Without this, `activeSessionId` would never populate for remote + // workspaces, ChatPanel's join_session effect would never fire, and + // the collab session would be inert (no participants, no broadcast). useEffect(() => { const version = ++loadVersionRef.current; - listChatSessions(workspaceId, false) + const ws = useAppStore.getState().workspaces.find((w) => w.id === workspaceId); + const remoteConnId = ws?.remote_connection_id ?? null; + const promise = remoteConnId + ? (sendRemoteCommand(remoteConnId, "list_chat_sessions", { + workspace_id: workspaceId, + include_archived: false, + }) as Promise) + : listChatSessions(workspaceId, false); + promise .then((sessions) => { if (version === loadVersionRef.current) { setSessionsForWorkspace(workspaceId, sessions); diff --git a/src/ui/src/components/chat/chatConstants.ts b/src/ui/src/components/chat/chatConstants.ts index 6f93c796d..703e81f8e 100644 --- a/src/ui/src/components/chat/chatConstants.ts +++ b/src/ui/src/components/chat/chatConstants.ts @@ -1,4 +1,5 @@ import type { CompletedTurn, ToolActivity } from "../../stores/useAppStore"; +import type { Participant } from "../../stores/slices/collabSlice"; import type { ChatAttachment } from "../../types/chat"; import type { ConversationCheckpoint } from "../../types/checkpoint"; @@ -13,6 +14,7 @@ export const EMPTY_COMPLETED_TURNS: readonly CompletedTurn[] = Object.freeze([]) export const EMPTY_ACTIVITIES: readonly ToolActivity[] = Object.freeze([]); export const EMPTY_ATTACHMENTS: readonly ChatAttachment[] = Object.freeze([]); export const EMPTY_CHECKPOINTS: readonly ConversationCheckpoint[] = Object.freeze([]); +export const EMPTY_PARTICIPANTS: readonly Participant[] = Object.freeze([]); export type RollbackModalData = { workspaceId: string; diff --git a/src/ui/src/components/chat/planFilePath.test.ts b/src/ui/src/components/chat/planFilePath.test.ts index 9a44d9ccc..817c24cef 100644 --- a/src/ui/src/components/chat/planFilePath.test.ts +++ b/src/ui/src/components/chat/planFilePath.test.ts @@ -25,7 +25,7 @@ function msg( input_tokens: null, output_tokens: null, cache_read_tokens: null, - cache_creation_tokens: null, + cache_creation_tokens: null, author_participant_id: null, author_display_name: null, }; } diff --git a/src/ui/src/components/chat/remoteJoinSessionSnapshot.test.ts b/src/ui/src/components/chat/remoteJoinSessionSnapshot.test.ts new file mode 100644 index 000000000..bce7647c6 --- /dev/null +++ b/src/ui/src/components/chat/remoteJoinSessionSnapshot.test.ts @@ -0,0 +1,202 @@ +import { describe, expect, it, vi } from "vitest"; +import { applyRemoteJoinSessionSnapshot } from "./remoteJoinSessionSnapshot"; +import type { Participant } from "../../stores/slices/collabSlice"; + +const alice: Participant = { + id: "alice-id", + display_name: "Alice", + is_host: false, + joined_at: 1, + muted: false, +}; + +const host: Participant = { + id: "host", + display_name: "Host", + is_host: true, + joined_at: 0, + muted: false, +}; + +describe("applyRemoteJoinSessionSnapshot", () => { + it("hydrates participants from the join_session snapshot", () => { + const setParticipants = vi.fn(); + const setTurnHolder = vi.fn(); + + applyRemoteJoinSessionSnapshot( + "session-1", + { + participants: [host, alice], + turn_holder: null, + }, + { + setParticipants, + setTurnHolder, + }, + ); + + expect(setParticipants).toHaveBeenCalledWith("session-1", [host, alice]); + expect(setTurnHolder).toHaveBeenCalledWith("session-1", null); + }); + + it("hydrates the current turn holder display name when present", () => { + const setParticipants = vi.fn(); + const setTurnHolder = vi.fn(); + const setPromptStartTime = vi.fn(); + const setSelectedModel = vi.fn(); + const setPlanMode = vi.fn(); + + applyRemoteJoinSessionSnapshot( + "session-1", + { + participants: [host, alice], + turn_holder: "alice-id", + turn_started_at_ms: 1710000000000, + turn_settings: { + model: "opus", + plan_mode: true, + }, + }, + { + setParticipants, + setTurnHolder, + setPromptStartTime, + setSelectedModel, + setPlanMode, + }, + ); + + expect(setTurnHolder).toHaveBeenCalledWith("session-1", { + participant_id: "alice-id", + display_name: "Alice", + }); + expect(setPromptStartTime).toHaveBeenCalledWith(1710000000000); + expect(setSelectedModel).toHaveBeenCalledWith("session-1", "opus"); + expect(setPlanMode).toHaveBeenCalledWith("session-1", true); + }); + + it("ignores non-object 1:1 responses", () => { + const setParticipants = vi.fn(); + const setTurnHolder = vi.fn(); + + applyRemoteJoinSessionSnapshot("session-1", null, { + setParticipants, + setTurnHolder, + }); + + expect(setParticipants).not.toHaveBeenCalled(); + expect(setTurnHolder).not.toHaveBeenCalled(); + }); + + it("hydrates pending plan approval consensus after reconnect", () => { + const setParticipants = vi.fn(); + const setTurnHolder = vi.fn(); + const openConsensusVote = vi.fn(); + const recordConsensusVote = vi.fn(); + const setPlanApproval = vi.fn(); + const setPlanMode = vi.fn(); + + applyRemoteJoinSessionSnapshot( + "session-1", + { + participants: [host, alice], + pending_vote: { + tool_use_id: "tool-1", + required_voters: [host, alice], + votes: { "alice-id": { kind: "approve" } }, + plan_file_path: + "/repo/.claude/plans/testing-plan-mode-make-precious-umbrella.md", + input: { + allowedPrompts: [{ tool: "Edit", prompt: "Allowed edit" }], + }, + }, + }, + { + setParticipants, + setTurnHolder, + openConsensusVote, + recordConsensusVote, + setPlanApproval, + setPlanMode, + }, + ); + + expect(openConsensusVote).toHaveBeenCalledWith("session-1", "tool-1", [ + host, + alice, + ]); + expect(recordConsensusVote).toHaveBeenCalledWith( + "session-1", + "tool-1", + "alice-id", + { kind: "approve" }, + ); + expect(setPlanApproval).toHaveBeenCalledWith({ + sessionId: "session-1", + toolUseId: "tool-1", + planFilePath: + "/repo/.claude/plans/testing-plan-mode-make-precious-umbrella.md", + allowedPrompts: [{ tool: "Edit", prompt: "Allowed edit" }], + }); + expect(setPlanMode).toHaveBeenCalledWith("session-1", false); + }); + + it("hydrates pending AskUserQuestion after reconnect", () => { + const setParticipants = vi.fn(); + const setTurnHolder = vi.fn(); + const setAgentQuestion = vi.fn(); + + applyRemoteJoinSessionSnapshot( + "session-1", + { + participants: [host, alice], + pending_question: { + tool_use_id: "question-tool", + required_voters: [host, alice], + votes: { + "alice-id": { + answers: { "What should change?": "Simplify" }, + }, + }, + input: { + question: "What should change?", + options: [ + { + label: "Simplify", + description: "Cut it down", + }, + ], + }, + }, + }, + { + setParticipants, + setTurnHolder, + setAgentQuestion, + }, + ); + + expect(setAgentQuestion).toHaveBeenCalledWith({ + sessionId: "session-1", + toolUseId: "question-tool", + requiredVoters: [host, alice], + votes: { + "alice-id": { + answers: { "What should change?": "Simplify" }, + }, + }, + questions: [ + { + question: "What should change?", + options: [ + { + label: "Simplify", + description: "Cut it down", + }, + ], + multiSelect: false, + }, + ], + }); + }); +}); diff --git a/src/ui/src/components/chat/remoteJoinSessionSnapshot.ts b/src/ui/src/components/chat/remoteJoinSessionSnapshot.ts new file mode 100644 index 000000000..7070d232b --- /dev/null +++ b/src/ui/src/components/chat/remoteJoinSessionSnapshot.ts @@ -0,0 +1,186 @@ +import type { + Participant, + ParticipantVote, +} from "../../stores/slices/collabSlice"; +import { parseAskUserQuestion } from "../../hooks/parseAgentQuestion"; + +interface PendingVoteSnapshot { + tool_use_id?: string; + required_voters?: Participant[]; + votes?: Record; + input?: unknown; + plan_file_path?: string | null; +} + +interface PendingQuestionSnapshot { + tool_use_id?: string; + required_voters?: Participant[]; + votes?: Record }>; + input?: unknown; +} + +interface JoinSessionSnapshot { + participants?: Participant[]; + turn_holder?: string | null; + turn_started_at_ms?: number | null; + turn_settings?: { + model?: string | null; + plan_mode?: boolean; + } | null; + pending_vote?: PendingVoteSnapshot | null; + pending_question?: PendingQuestionSnapshot | null; +} + +interface SnapshotActions { + setParticipants: (sessionId: string, participants: Participant[]) => void; + setTurnHolder: ( + sessionId: string, + holder: { participant_id: string; display_name: string } | null, + ) => void; + setPromptStartTime?: (startedAtMs: number) => void; + setSelectedModel?: (sessionId: string, model: string) => void; + setPlanMode?: (sessionId: string, enabled: boolean) => void; + openConsensusVote?: ( + sessionId: string, + toolUseId: string, + requiredVoters: Participant[], + ) => void; + recordConsensusVote?: ( + sessionId: string, + toolUseId: string, + participantId: string, + vote: ParticipantVote, + ) => void; + setPlanApproval?: (approval: { + sessionId: string; + toolUseId: string; + planFilePath: string | null; + allowedPrompts: Array<{ tool: string; prompt: string }>; + }) => void; + setAgentQuestion?: (question: { + sessionId: string; + toolUseId: string; + questions: ReturnType; + requiredVoters?: Participant[]; + votes?: Record }>; + }) => void; +} + +function asSnapshot(value: unknown): JoinSessionSnapshot | null { + if (!value || typeof value !== "object") return null; + return value as JoinSessionSnapshot; +} + +export function applyRemoteJoinSessionSnapshot( + sessionId: string, + value: unknown, + actions: SnapshotActions, +): void { + const snapshot = asSnapshot(value); + if (!snapshot) return; + + const participants = Array.isArray(snapshot.participants) + ? snapshot.participants + : null; + if (participants) { + actions.setParticipants(sessionId, participants); + } + + if (typeof snapshot.turn_holder === "string") { + const holder = participants?.find((p) => p.id === snapshot.turn_holder); + actions.setTurnHolder(sessionId, { + participant_id: snapshot.turn_holder, + display_name: holder?.display_name ?? snapshot.turn_holder, + }); + } else if (snapshot.turn_holder === null) { + actions.setTurnHolder(sessionId, null); + } + + if ( + typeof snapshot.turn_started_at_ms === "number" && + snapshot.turn_started_at_ms > 0 + ) { + actions.setPromptStartTime?.(snapshot.turn_started_at_ms); + } + + const turnSettings = snapshot.turn_settings; + if (turnSettings && typeof turnSettings === "object") { + if (typeof turnSettings.model === "string" && turnSettings.model) { + actions.setSelectedModel?.(sessionId, turnSettings.model); + } + if (typeof turnSettings.plan_mode === "boolean") { + actions.setPlanMode?.(sessionId, turnSettings.plan_mode); + } + } + + const pendingVote = snapshot.pending_vote; + if ( + pendingVote && + typeof pendingVote.tool_use_id === "string" && + Array.isArray(pendingVote.required_voters) + ) { + actions.openConsensusVote?.( + sessionId, + pendingVote.tool_use_id, + pendingVote.required_voters, + ); + for (const [participantId, vote] of Object.entries(pendingVote.votes ?? {})) { + actions.recordConsensusVote?.( + sessionId, + pendingVote.tool_use_id, + participantId, + vote, + ); + } + actions.setPlanApproval?.({ + sessionId, + toolUseId: pendingVote.tool_use_id, + planFilePath: + typeof pendingVote.plan_file_path === "string" + ? pendingVote.plan_file_path + : null, + allowedPrompts: parseAllowedPrompts(pendingVote.input), + }); + actions.setPlanMode?.(sessionId, false); + } + + const pendingQuestion = snapshot.pending_question; + if ( + pendingQuestion && + typeof pendingQuestion.tool_use_id === "string" && + pendingQuestion.input && + typeof pendingQuestion.input === "object" + ) { + const questions = parseAskUserQuestion( + pendingQuestion.input as Record, + ); + if (questions.length > 0) { + actions.setAgentQuestion?.({ + sessionId, + toolUseId: pendingQuestion.tool_use_id, + questions, + requiredVoters: Array.isArray(pendingQuestion.required_voters) + ? pendingQuestion.required_voters + : undefined, + votes: pendingQuestion.votes, + }); + } + } +} + +function parseAllowedPrompts( + input: unknown, +): Array<{ tool: string; prompt: string }> { + if (!input || typeof input !== "object" || !("allowedPrompts" in input)) { + return []; + } + const allowedPrompts = (input as { allowedPrompts?: unknown }).allowedPrompts; + if (!Array.isArray(allowedPrompts)) return []; + return allowedPrompts.filter( + (item): item is { tool: string; prompt: string } => + !!item && + typeof item === "object" && + typeof (item as { tool?: unknown }).tool === "string" && + typeof (item as { prompt?: unknown }).prompt === "string", + ); +} diff --git a/src/ui/src/components/chat/submitAgentAnswerResponse.test.ts b/src/ui/src/components/chat/submitAgentAnswerResponse.test.ts new file mode 100644 index 000000000..b3e17a016 --- /dev/null +++ b/src/ui/src/components/chat/submitAgentAnswerResponse.test.ts @@ -0,0 +1,49 @@ +import { describe, expect, it, vi } from "vitest"; +import { submitAgentAnswerResponse } from "./submitAgentAnswerResponse"; + +describe("submitAgentAnswerResponse", () => { + it("submits local AskUserQuestion answers through the Tauri command", async () => { + const submitAgentAnswer = vi.fn().mockResolvedValue(undefined); + const sendRemoteCommand = vi.fn(); + + await submitAgentAnswerResponse({ + sessionId: "session-1", + toolUseId: "tool-1", + answers: { Choice: "A" }, + submitAgentAnswer, + sendRemoteCommand, + }); + + expect(submitAgentAnswer).toHaveBeenCalledWith("session-1", "tool-1", { + Choice: "A", + }, undefined); + expect(sendRemoteCommand).not.toHaveBeenCalled(); + }); + + it("submits remote AskUserQuestion answers through the collaboration RPC", async () => { + const submitAgentAnswer = vi.fn(); + const sendRemoteCommand = vi.fn().mockResolvedValue(null); + + await submitAgentAnswerResponse({ + sessionId: "session-1", + toolUseId: "tool-1", + answers: { Choice: "A" }, + annotations: { source: "button" }, + remoteConnectionId: "remote-1", + submitAgentAnswer, + sendRemoteCommand, + }); + + expect(sendRemoteCommand).toHaveBeenCalledWith( + "remote-1", + "submit_agent_answer", + { + chat_session_id: "session-1", + tool_use_id: "tool-1", + answers: { Choice: "A" }, + annotations: { source: "button" }, + }, + ); + expect(submitAgentAnswer).not.toHaveBeenCalled(); + }); +}); diff --git a/src/ui/src/components/chat/submitAgentAnswerResponse.ts b/src/ui/src/components/chat/submitAgentAnswerResponse.ts new file mode 100644 index 000000000..2bf8e87c0 --- /dev/null +++ b/src/ui/src/components/chat/submitAgentAnswerResponse.ts @@ -0,0 +1,40 @@ +export interface SubmitAgentAnswerResponseArgs { + sessionId: string; + toolUseId: string; + answers: Record; + annotations?: unknown; + remoteConnectionId?: string; + submitAgentAnswer: ( + sessionId: string, + toolUseId: string, + answers: Record, + annotations?: unknown, + ) => Promise; + sendRemoteCommand: ( + connectionId: string, + method: string, + params: Record, + ) => Promise; +} + +export async function submitAgentAnswerResponse({ + sessionId, + toolUseId, + answers, + annotations, + remoteConnectionId, + submitAgentAnswer, + sendRemoteCommand, +}: SubmitAgentAnswerResponseArgs): Promise { + if (remoteConnectionId) { + await sendRemoteCommand(remoteConnectionId, "submit_agent_answer", { + chat_session_id: sessionId, + tool_use_id: toolUseId, + answers, + annotations: annotations ?? null, + }); + return; + } + + await submitAgentAnswer(sessionId, toolUseId, answers, annotations); +} diff --git a/src/ui/src/components/chat/submitPlanApprovalResponse.test.ts b/src/ui/src/components/chat/submitPlanApprovalResponse.test.ts new file mode 100644 index 000000000..06d129d88 --- /dev/null +++ b/src/ui/src/components/chat/submitPlanApprovalResponse.test.ts @@ -0,0 +1,64 @@ +import { describe, expect, it, vi } from "vitest"; +import { + isStalePlanApprovalError, + submitPlanApprovalResponse, +} from "./submitPlanApprovalResponse"; + +describe("submitPlanApprovalResponse", () => { + it("routes local approvals through submitPlanApproval", async () => { + const submitPlanApproval = vi.fn().mockResolvedValue(undefined); + const sendRemoteCommand = vi.fn().mockResolvedValue(undefined); + + await submitPlanApprovalResponse({ + sessionId: "session-1", + toolUseId: "tool-1", + approved: true, + submitPlanApproval, + sendRemoteCommand, + }); + + expect(submitPlanApproval).toHaveBeenCalledWith( + "session-1", + "tool-1", + true, + undefined, + ); + expect(sendRemoteCommand).not.toHaveBeenCalled(); + }); + + it("routes remote approvals through vote_plan_approval", async () => { + const submitPlanApproval = vi.fn().mockResolvedValue(undefined); + const sendRemoteCommand = vi.fn().mockResolvedValue(undefined); + + await submitPlanApprovalResponse({ + sessionId: "session-1", + toolUseId: "tool-1", + approved: false, + reason: "Needs changes", + remoteConnectionId: "remote-1", + submitPlanApproval, + sendRemoteCommand, + }); + + expect(sendRemoteCommand).toHaveBeenCalledWith( + "remote-1", + "vote_plan_approval", + { + chat_session_id: "session-1", + tool_use_id: "tool-1", + approved: false, + reason: "Needs changes", + }, + ); + expect(submitPlanApproval).not.toHaveBeenCalled(); + }); + + it("identifies stale plan approval backend errors", () => { + expect( + isStalePlanApprovalError( + "No pending permission request for tool_use_id tool-1 (pending: [])", + ), + ).toBe(true); + expect(isStalePlanApprovalError("network unavailable")).toBe(false); + }); +}); diff --git a/src/ui/src/components/chat/submitPlanApprovalResponse.ts b/src/ui/src/components/chat/submitPlanApprovalResponse.ts new file mode 100644 index 000000000..cc5974fc8 --- /dev/null +++ b/src/ui/src/components/chat/submitPlanApprovalResponse.ts @@ -0,0 +1,47 @@ +interface SubmitPlanApprovalResponseArgs { + sessionId: string; + toolUseId: string; + approved: boolean; + reason?: string; + remoteConnectionId?: string | null; + submitPlanApproval: ( + sessionId: string, + toolUseId: string, + approved: boolean, + reason?: string, + ) => Promise; + sendRemoteCommand: ( + connectionId: string, + method: string, + params: Record, + ) => Promise; +} + +const STALE_PLAN_APPROVAL_ERROR = + "No pending permission request for tool_use_id"; + +export function isStalePlanApprovalError(error: unknown): boolean { + return String(error).includes(STALE_PLAN_APPROVAL_ERROR); +} + +export async function submitPlanApprovalResponse({ + sessionId, + toolUseId, + approved, + reason, + remoteConnectionId, + submitPlanApproval, + sendRemoteCommand, +}: SubmitPlanApprovalResponseArgs): Promise { + if (remoteConnectionId) { + await sendRemoteCommand(remoteConnectionId, "vote_plan_approval", { + chat_session_id: sessionId, + tool_use_id: toolUseId, + approved, + reason: reason ?? null, + }); + return; + } + + await submitPlanApproval(sessionId, toolUseId, approved, reason); +} diff --git a/src/ui/src/components/chat/userMessageAuthorLabel.test.ts b/src/ui/src/components/chat/userMessageAuthorLabel.test.ts new file mode 100644 index 000000000..b81196e5a --- /dev/null +++ b/src/ui/src/components/chat/userMessageAuthorLabel.test.ts @@ -0,0 +1,63 @@ +import { describe, expect, it } from "vitest"; +import { userMessageAuthorLabel } from "./userMessageAuthorLabel"; +import type { Participant } from "../../stores/slices/collabSlice"; + +const host: Participant = { + id: "host", + display_name: "halcyon", + is_host: true, + joined_at: 1, + muted: false, +}; + +const guest: Participant = { + id: "guest-pid", + display_name: "bender", + is_host: false, + joined_at: 2, + muted: false, +}; + +describe("userMessageAuthorLabel", () => { + it("labels the local participant as You", () => { + expect(userMessageAuthorLabel({ + author_participant_id: "guest-pid", + author_display_name: "bender", + }, "guest-pid", [host, guest], "You")).toBe("You"); + }); + + it("labels a stamped host message by host display name on a remote client", () => { + expect(userMessageAuthorLabel({ + author_participant_id: "host", + author_display_name: null, + }, "guest-pid", [host, guest], "You")).toBe("halcyon"); + }); + + it("uses a neutral fallback for an unknown stamped non-self participant", () => { + expect(userMessageAuthorLabel({ + author_participant_id: "missing-pid", + author_display_name: null, + }, "guest-pid", [host, guest], "You", "User")).toBe("User"); + }); + + it("labels an unstamped user message as host on a remote collaborative client", () => { + expect(userMessageAuthorLabel({ + author_participant_id: null, + author_display_name: null, + }, "guest-pid", [host, guest], "You")).toBe("halcyon"); + }); + + it("uses the remote host fallback before the participant roster hydrates", () => { + expect(userMessageAuthorLabel({ + author_participant_id: null, + author_display_name: null, + }, "guest-pid", [], "You", "User", "halcyon")).toBe("halcyon"); + }); + + it("keeps unstamped local host messages labeled as You", () => { + expect(userMessageAuthorLabel({ + author_participant_id: null, + author_display_name: null, + }, "host", [host, guest], "You")).toBe("You"); + }); +}); diff --git a/src/ui/src/components/chat/userMessageAuthorLabel.ts b/src/ui/src/components/chat/userMessageAuthorLabel.ts new file mode 100644 index 000000000..81b517c2c --- /dev/null +++ b/src/ui/src/components/chat/userMessageAuthorLabel.ts @@ -0,0 +1,36 @@ +import type { Participant } from "../../stores/slices/collabSlice"; +import type { ChatMessage } from "../../types/chat"; + +export function userMessageAuthorLabel( + msg: Pick, + selfParticipantId: string | null, + participants: Participant[], + youLabel: string, + userLabel = "User", + hostFallbackName?: string | null, +): string { + const participantNameById = new Map( + participants.map((p) => [p.id, p.display_name]), + ); + + if (msg.author_participant_id != null) { + if (msg.author_participant_id === selfParticipantId) { + return youLabel; + } + return ( + msg.author_display_name ?? + participantNameById.get(msg.author_participant_id) ?? + userLabel + ); + } + + const host = participants.find((p) => p.is_host); + if (host && selfParticipantId !== host.id) { + return host.display_name; + } + if (hostFallbackName && selfParticipantId !== "host") { + return hostFallbackName; + } + + return youLabel; +} diff --git a/src/ui/src/components/command-palette/CommandPalette.tsx b/src/ui/src/components/command-palette/CommandPalette.tsx index cd9b0cbef..42caa8b1f 100644 --- a/src/ui/src/components/command-palette/CommandPalette.tsx +++ b/src/ui/src/components/command-palette/CommandPalette.tsx @@ -164,7 +164,7 @@ export function CommandPalette() { duration_ms: null, created_at: new Date().toISOString(), thinking: null, - input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, + input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, author_participant_id: null, author_display_name: null, }); } // Check for setup script and prompt for confirmation. @@ -189,7 +189,7 @@ export function CommandPalette() { cost_usd: null, duration_ms: null, created_at: new Date().toISOString(), thinking: null, - input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, + input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, author_participant_id: null, author_display_name: null, }); } }).catch((err) => { @@ -202,7 +202,7 @@ export function CommandPalette() { cost_usd: null, duration_ms: null, created_at: new Date().toISOString(), thinking: null, - input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, + input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, author_participant_id: null, author_display_name: null, }); }); } else { diff --git a/src/ui/src/components/modals/ConfirmSetupScriptModal.tsx b/src/ui/src/components/modals/ConfirmSetupScriptModal.tsx index 20cd8e04f..ba217b4d0 100644 --- a/src/ui/src/components/modals/ConfirmSetupScriptModal.tsx +++ b/src/ui/src/components/modals/ConfirmSetupScriptModal.tsx @@ -49,7 +49,7 @@ export function ConfirmSetupScriptModal() { input_tokens: null, output_tokens: null, cache_read_tokens: null, - cache_creation_tokens: null, + cache_creation_tokens: null, author_participant_id: null, author_display_name: null, }); } closeModal(); @@ -67,7 +67,7 @@ export function ConfirmSetupScriptModal() { input_tokens: null, output_tokens: null, cache_read_tokens: null, - cache_creation_tokens: null, + cache_creation_tokens: null, author_participant_id: null, author_display_name: null, }); closeModal(); } diff --git a/src/ui/src/components/modals/ShareModal.tsx b/src/ui/src/components/modals/ShareModal.tsx index 3eec4e518..1e3b2d650 100644 --- a/src/ui/src/components/modals/ShareModal.tsx +++ b/src/ui/src/components/modals/ShareModal.tsx @@ -1,61 +1,393 @@ +import { useEffect, useState } from "react"; import { useTranslation } from "react-i18next"; import { useAppStore } from "../../stores/useAppStore"; -import { stopLocalServer } from "../../services/tauri"; +import { + listShares, + startShare, + stopShare, + type ShareSummary, +} from "../../services/tauri"; import { Modal } from "./Modal"; import shared from "./shared.module.css"; +/** + * Share modal — the canonical entry point for any kind of network sharing + * from this machine. + * + * Each share is a **workspace-scoped authorization grant**: it has its own + * pairing token, a list of workspace ids it permits access to, and a mode + * (1:1 remote control or collaborative). Multiple shares can be active at + * once — useful when the user wants to share work workspaces with one + * group and OSS workspaces with another, for example. + * + * The modal has two views: + * - **Active shares list** — see what's currently shared, copy a + * connection string, or revoke. + * - **New share form** — pick the workspaces, choose 1:1 or collab, name + * the share, mint a pairing token. + */ export function ShareModal() { const { t } = useTranslation("modals"); - const { t: tCommon } = useTranslation("common"); const closeModal = useAppStore((s) => s.closeModal); - const connectionString = useAppStore((s) => s.localServerConnectionString); - const setRunning = useAppStore((s) => s.setLocalServerRunning); - const setConnectionString = useAppStore((s) => s.setLocalServerConnectionString); + const workspaces = useAppStore((s) => s.workspaces); + const repositories = useAppStore((s) => s.repositories); + const collabDefaultConsensus = useAppStore( + (s) => s.collabDefaultConsensusRequired, + ); + // Mirror the active-shares count into the store so ShareButton (and + // any other consumer) can show "active vs idle" styling that reflects + // the real workspace-scoped share state, not the legacy + // `localServerRunning` flag. + const setActiveSharesCount = useAppStore((s) => s.setActiveSharesCount); + + const [shares, setShares] = useState([]); + const [view, setView] = useState<"list" | "new">("list"); + const [error, setError] = useState(null); + const [busy, setBusy] = useState(false); + + // New-share form state. + const [label, setLabel] = useState(""); + const [pickedWorkspaceIds, setPickedWorkspaceIds] = useState>( + new Set(), + ); + const [collaborative, setCollaborative] = useState(false); + const [consensusRequired, setConsensusRequired] = useState( + collabDefaultConsensus, + ); + + // Refresh the active shares list whenever the modal opens or after + // start/stop. The Rust side is authoritative — we never hold state + // optimistically that disagrees with the server. + const refresh = async () => { + try { + const next = await listShares(); + setShares(next); + setActiveSharesCount(next.length); + } catch (e) { + // listShares fails harmlessly when the server feature is off. + console.error("listShares:", e); + } + }; + + useEffect(() => { + void refresh(); + }, []); + + // Filter to local workspaces only — shares are minted on the host, so a + // remote workspace shouldn't appear here (it isn't on this machine). + const localWorkspaces = workspaces.filter((w) => !w.remote_connection_id); + // Group workspaces by repository for nicer presentation in the picker. + const reposById = new Map(repositories.map((r) => [r.id, r])); + + const togglePick = (workspaceId: string) => { + setPickedWorkspaceIds((prev) => { + const next = new Set(prev); + if (next.has(workspaceId)) next.delete(workspaceId); + else next.add(workspaceId); + return next; + }); + }; + + const handleStart = async () => { + if (pickedWorkspaceIds.size === 0) { + setError(t("share_form_validation_pick_workspace")); + return; + } + setError(null); + setBusy(true); + try { + await startShare({ + label: label.trim() || null, + workspaceIds: Array.from(pickedWorkspaceIds), + collaborative, + consensusRequired: collaborative && consensusRequired, + }); + // Reset the form, switch back to list, and pull fresh state. + setLabel(""); + setPickedWorkspaceIds(new Set()); + setCollaborative(false); + setConsensusRequired(collabDefaultConsensus); + setView("list"); + await refresh(); + } catch (e) { + setError(String(e)); + } finally { + setBusy(false); + } + }; - const handleStop = async () => { + const handleStop = async (shareId: string) => { + setError(null); + setBusy(true); try { - await stopLocalServer(); - setRunning(false); - setConnectionString(null); - closeModal(); + await stopShare(shareId); + await refresh(); } catch (e) { - console.error("Failed to stop server:", e); + setError(String(e)); + } finally { + setBusy(false); } }; - const handleCopy = () => { - if (connectionString) { - navigator.clipboard.writeText(connectionString); + const copy = async (s: string) => { + try { + await navigator.clipboard.writeText(s); + } catch (e) { + setError(String(e)); } }; return ( -
- -
- (e.target as HTMLInputElement).select()} - /> - -
-
- {t("share_conn_hint")} -
+ {view === "list" ? ( + <> +
+ {t("share_list_hint")} +
+ + {shares.length === 0 ? ( +
+ {t("share_no_active")} +
+ ) : ( +
+ {shares.map((sh) => ( + handleStop(sh.id)} + onCopy={copy} + busy={busy} + /> + ))} +
+ )} + + {error &&
{error}
} + +
+ + +
+ + ) : ( + <> +
+ {t("share_form_hint")} +
+ +
+ + setLabel(e.target.value)} + /> +
+ +
+ +
+ {localWorkspaces.length === 0 ? ( +
+ {t("share_form_no_local_workspaces")} +
+ ) : ( + localWorkspaces.map((ws) => { + const repo = reposById.get(ws.repository_id); + return ( + + ); + }) + )} +
+
+ +
+ + {collaborative && ( + + )} +
+ + {error &&
{error}
} + +
+ + +
+ + )} + + ); +} + +function ShareRow({ + share, + workspaces, + onStop, + onCopy, + busy, +}: { + share: ShareSummary; + workspaces: { id: string; name: string }[]; + onStop: () => void; + onCopy: (s: string) => void | Promise; + busy: boolean; +}) { + const { t } = useTranslation("modals"); + const wsNames = share.allowed_workspace_ids + .map((id) => workspaces.find((w) => w.id === id)?.name ?? id) + .join(", "); + return ( +
+
+ + {share.label ?? t("share_row_untitled")} + + + {share.collaborative + ? t("share_row_mode_collaborative") + : t("share_row_mode_one_to_one")} + + {share.collaborative && share.consensus_required && ( + + {t("share_row_consensus_badge")} + + )} + + {t("share_row_connected_count", { count: share.session_count })} +
-
- -
- +
); } diff --git a/src/ui/src/components/settings/SettingsPage.tsx b/src/ui/src/components/settings/SettingsPage.tsx index 6215653ad..62000bf0b 100644 --- a/src/ui/src/components/settings/SettingsPage.tsx +++ b/src/ui/src/components/settings/SettingsPage.tsx @@ -60,6 +60,11 @@ const KeyboardSettings = lazy(() => const CliSettings = lazy(() => import("./sections/CliSettings").then((m) => ({ default: m.CliSettings })), ); +const CollaborationSettings = lazy(() => + import("./sections/CollaborationSettings").then((m) => ({ + default: m.CollaborationSettings, + })), +); function SectionContent({ section }: { section: string | null }) { const pluginManagementEnabled = useAppStore((s) => s.pluginManagementEnabled); @@ -75,6 +80,7 @@ function SectionContent({ section }: { section: string | null }) { if (section === "git") return ; if (section === "keyboard") return ; if (section === "cli") return ; + if (section === "collaboration") return ; if (section === "pinned-prompts") return ; if (section === "plugins") return ; if (section === "claude-code-plugins") { diff --git a/src/ui/src/components/settings/SettingsSidebar.tsx b/src/ui/src/components/settings/SettingsSidebar.tsx index a6d1b74aa..47bfa459d 100644 --- a/src/ui/src/components/settings/SettingsSidebar.tsx +++ b/src/ui/src/components/settings/SettingsSidebar.tsx @@ -12,6 +12,7 @@ import { Globe, Keyboard, Terminal, + Users, } from "lucide-react"; import { useTranslation } from "react-i18next"; import { useAppStore } from "../../stores/useAppStore"; @@ -31,6 +32,7 @@ export function getAppSections( { id: "git", icon: GitBranch }, { id: "keyboard", icon: Keyboard }, { id: "cli", icon: Terminal }, + { id: "collaboration", icon: Users }, { id: "pinned-prompts", icon: Bookmark }, { id: "plugins", icon: Puzzle }, ...(communityRegistryEnabled @@ -63,6 +65,7 @@ export function SettingsSidebar() { if (id === "git") return t("settings:nav_git"); if (id === "keyboard") return t("settings:nav_keyboard"); if (id === "cli") return t("settings:nav_cli"); + if (id === "collaboration") return t("settings:nav_collaboration"); if (id === "plugins") return t("settings:nav_plugins"); if (id === "claude-code-plugins") return t("settings:nav_claude_code_plugins"); if (id === "community") return t("settings:nav_community"); diff --git a/src/ui/src/components/settings/sections/CollaborationSettings.tsx b/src/ui/src/components/settings/sections/CollaborationSettings.tsx new file mode 100644 index 000000000..412fea0d3 --- /dev/null +++ b/src/ui/src/components/settings/sections/CollaborationSettings.tsx @@ -0,0 +1,124 @@ +import { useEffect, useState } from "react"; +import { useTranslation } from "react-i18next"; +import { useAppStore } from "../../../stores/useAppStore"; +import { setAppSetting } from "../../../services/tauri"; +import styles from "../Settings.module.css"; + +/** + * Settings for collaborative shared sessions. + * + * Today this surface holds two values: + * + * - **Display name**: stamped onto your user messages and shown in the + * participants roster of every collaborative session you're in. Empty + * means "fall back to the OS hostname" (the same default the legacy + * 1:1 pairing flow used). Persisted as `collab:display_name`. + * - **Default plan-approval consensus**: pre-checks the "require unanimous + * plan approval" toggle in the share dialog so users who always want + * consensus don't have to flip it every time. Persisted as + * `collab:default_consensus_required`. + * + * The section is a single page rather than a modal because the user is + * likely to also adjust it alongside other shared-session preferences as + * we add them (e.g. avatar color, default mute behavior). + */ +export function CollaborationSettings() { + const { t } = useTranslation("settings"); + const displayName = useAppStore((s) => s.collabDisplayName); + const setDisplayName = useAppStore((s) => s.setCollabDisplayName); + const defaultConsensus = useAppStore( + (s) => s.collabDefaultConsensusRequired, + ); + const setDefaultConsensus = useAppStore( + (s) => s.setCollabDefaultConsensusRequired, + ); + + // Local input state lets us defer persistence to blur, matching the + // pattern of other text fields in Settings (Appearance/font sizes). + const [draftName, setDraftName] = useState(displayName); + useEffect(() => { + setDraftName(displayName); + }, [displayName]); + + const [error, setError] = useState(null); + + const persistName = async () => { + const trimmed = draftName.trim(); + if (trimmed === displayName) return; + try { + setError(null); + await setAppSetting("collab:display_name", trimmed); + setDisplayName(trimmed); + } catch (e) { + setDraftName(displayName); + setError(String(e)); + } + }; + + const toggleDefaultConsensus = async () => { + const next = !defaultConsensus; + setDefaultConsensus(next); + try { + setError(null); + await setAppSetting( + "collab:default_consensus_required", + next ? "true" : "false", + ); + } catch (e) { + setDefaultConsensus(!next); + setError(String(e)); + } + }; + + return ( +
+

{t("collab_section_title")}

+ + {error &&
{error}
} + +
+
+
+ {t("collab_display_name_label")} +
+
+ {t("collab_display_name_description")} +
+
+
+ setDraftName(e.target.value)} + onBlur={persistName} + /> +
+
+ +
+
+
+ {t("collab_default_consensus_label")} +
+
+ {t("collab_default_consensus_description")} +
+
+
+ +
+
+
+ ); +} diff --git a/src/ui/src/components/shared/WorkspacePanelHeader.tsx b/src/ui/src/components/shared/WorkspacePanelHeader.tsx index 1d9f261b7..efb133530 100644 --- a/src/ui/src/components/shared/WorkspacePanelHeader.tsx +++ b/src/ui/src/components/shared/WorkspacePanelHeader.tsx @@ -1,15 +1,23 @@ import { GitBranch } from "lucide-react"; import { useAppStore } from "../../stores/useAppStore"; +import { useSelfParticipantId } from "../../hooks/useSelfParticipantId"; import { WorkspaceActions } from "../chat/WorkspaceActions"; +import { ParticipantsRoster } from "../chat/ParticipantsRoster"; import { PanelToggles } from "./PanelToggles"; import styles from "./WorkspacePanelHeader.module.css"; export function WorkspacePanelHeader() { const selectedWorkspaceId = useAppStore((s) => s.selectedWorkspaceId); + const activeSessionId = useAppStore((s) => + s.selectedWorkspaceId + ? s.selectedSessionIdByWorkspaceId[s.selectedWorkspaceId] ?? null + : null, + ); const workspaces = useAppStore((s) => s.workspaces); const repositories = useAppStore((s) => s.repositories); const defaultBranchesMap = useAppStore((s) => s.defaultBranches); const sidebarVisible = useAppStore((s) => s.sidebarVisible); + const selfParticipantId = useSelfParticipantId(selectedWorkspaceId); const ws = workspaces.find((w) => w.id === selectedWorkspaceId); const repo = repositories.find((r) => r.id === ws?.repository_id); @@ -38,6 +46,12 @@ export function WorkspacePanelHeader() { ))}
+ {activeSessionId && ( + + )} diff --git a/src/ui/src/components/sidebar/Sidebar.tsx b/src/ui/src/components/sidebar/Sidebar.tsx index d14e3ad48..4d3bbe697 100644 --- a/src/ui/src/components/sidebar/Sidebar.tsx +++ b/src/ui/src/components/sidebar/Sidebar.tsx @@ -16,13 +16,13 @@ import { removeRemoteConnection, sendRemoteCommand, pairWithServer, - startLocalServer, openUrl, } from "../../services/tauri"; -import { Settings, Link, X, Share2, Plus, Globe, Archive, Trash2, CircleCheck, CircleAlert, CircleQuestionMark, Cog, Filter, LayoutDashboard, CircleDashed, CircleStop, GitPullRequestArrow, GitPullRequestDraft, GitMerge, GitPullRequestClosed, ChevronRight, ChevronDown, CircleHelp } from "lucide-react"; +import { Settings, Link, X, Share2, Plus, Globe, Archive, Trash2, Cog, Filter, LayoutDashboard, ChevronRight, ChevronDown, CircleHelp } from "lucide-react"; import { RepoIcon } from "../shared/RepoIcon"; import { extractRemoteWorkspace } from "./remoteWorkspaceResponse"; import { UpdateBanner } from "../layout/UpdateBanner"; +import { WorkspaceStatusIcon } from "./WorkspaceStatusIcon"; import { getScmSortPriority } from "../../utils/scmSortPriority"; import { useTabDragReorder } from "../../hooks/useTabDragReorder"; import { TabDragGhost } from "../shared/TabDragGhost"; @@ -150,6 +150,7 @@ export const Sidebar = memo(function Sidebar() { created_at: new Date().toISOString(), thinking: null, input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, + author_participant_id: null, author_display_name: null, }); } // Check if a setup script exists and prompt user to review it. @@ -175,6 +176,7 @@ export const Sidebar = memo(function Sidebar() { created_at: new Date().toISOString(), thinking: null, input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, + author_participant_id: null, author_display_name: null, }); } }).catch((err) => { @@ -188,6 +190,7 @@ export const Sidebar = memo(function Sidebar() { created_at: new Date().toISOString(), thinking: null, input_tokens: null, output_tokens: null, cache_read_tokens: null, cache_creation_tokens: null, + author_participant_id: null, author_display_name: null, }); }); } else { @@ -435,21 +438,25 @@ export const Sidebar = memo(function Sidebar() { dragEnabled && workspaceDrag.dropTarget?.id === ws.id && workspaceDrag.dropTarget.placement === "after"; + // Compute the unread/attention badge here only because the row's + // `wsUnread` className depends on it. The actual icon rendering lives + // in `WorkspaceStatusIcon`, which re-derives the same state — keeping + // the duplication in sync isn't a concern because the inputs are the + // same store slices and a divergence would surface immediately. const wsSessions = sessionsByWorkspace[ws.id] ?? []; const hasQuestion = wsSessions.some((s) => agentQuestions[s.id]); const hasPlan = wsSessions.some((s) => planApprovals[s.id]); - const badge: "ask" | "plan" | "done" | null = - hasQuestion ? "ask" : - hasPlan ? "plan" : - unreadCompletions.has(ws.id) && !isAgentBusy(ws.agent_status) ? "done" : - null; + const hasUnreadBadge = + hasQuestion + || hasPlan + || (unreadCompletions.has(ws.id) && !isAgentBusy(ws.agent_status)); return (
{ if (dragEnabled && workspaceDrag.justEnded()) return; selectWorkspace(ws.id); @@ -459,65 +466,7 @@ export const Sidebar = memo(function Sidebar() { onPointerUp={dragHandlers?.onPointerUp} onPointerCancel={dragHandlers?.onPointerCancel} > - {badge === "done" ? ( - - - - ) : badge === "plan" ? ( - - - - ) : badge === "ask" ? ( - - - - ) : ws.agent_status === "Running" || ws.agent_status === "Compacting" ? ( -