From 9c6526ade0c1fe8251dba7f01ac8893d680becaa Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 27 Apr 2026 13:20:55 +0900 Subject: [PATCH 01/18] 238: feat: draft PR default config (ship.draft) (#259) * feat: add --reviewer and --label flags to ship command (#232, #238) - Add ship.default_reviewers and ship.default_labels config options - CLI flags --reviewer/-r and --label/-l (repeatable) - Request GitHub PR reviewers via API after PR creation - Add labels to PR via GitHub Issues API - CLI flags override config defaults when specified Co-Authored-By: Claude Opus 4.6 * style: cargo fmt Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- src/cli/commands/ship.rs | 28 +++++++++++++++++++++++ src/cli/mod.rs | 12 ++++++++++ src/config/settings.rs | 8 +++++++ src/github/mod.rs | 48 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+) diff --git a/src/cli/commands/ship.rs b/src/cli/commands/ship.rs index 00a3090..c065dc7 100644 --- a/src/cli/commands/ship.rs +++ b/src/cli/commands/ship.rs @@ -20,12 +20,26 @@ pub async fn ship( base_override: Option, title_override: Option, skip_hooks: bool, + reviewers: Vec, + labels: Vec, mode: Mode, ) -> Result<()> { let mut config = ParsecConfig::load()?; let manager = WorktreeManager::new(repo, &config)?; config.resolve_for_repo(manager.repo_root()); + // Merge CLI args with config defaults (CLI overrides when non-empty) + let effective_reviewers = if reviewers.is_empty() { + config.ship.default_reviewers.clone() + } else { + reviewers + }; + let effective_labels = if labels.is_empty() { + config.ship.default_labels.clone() + } else { + labels + }; + // Run pre-ship hooks before pushing if !skip_hooks && !config.hooks.pre_ship.is_empty() { let workspace = manager.get(ticket)?; @@ -163,6 +177,20 @@ pub async fn ship( .await { Ok(pr) => { + // Request reviewers if specified + if !effective_reviewers.is_empty() { + if let Err(e) = + gh.request_reviewers(pr.number, &effective_reviewers).await + { + eprintln!("warning: failed to request reviewers: {e}"); + } + } + // Add labels if specified + if !effective_labels.is_empty() { + if let Err(e) = gh.add_labels(pr.number, &effective_labels).await { + eprintln!("warning: failed to add labels: {e}"); + } + } result.pr_url = Some(pr.url); } Err(e) => { diff --git a/src/cli/mod.rs b/src/cli/mod.rs index e46930c..fa5d486 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -132,6 +132,14 @@ pub enum Command { /// Skip pre-ship hooks #[arg(long)] skip_hooks: bool, + + /// Request review from GitHub users (can be specified multiple times) + #[arg(long, short = 'r')] + reviewer: Vec, + + /// Add labels to the PR (can be specified multiple times) + #[arg(long, short = 'l')] + label: Vec, }, /// Remove merged or stale worktrees @@ -556,6 +564,8 @@ pub async fn run(cli: Cli) -> Result<()> { base, title, skip_hooks, + reviewer, + label, } => { if cli.dry_run { eprintln!( @@ -575,6 +585,8 @@ pub async fn run(cli: Cli) -> Result<()> { base, title, skip_hooks, + reviewer, + label, output_mode, ) .await diff --git a/src/config/settings.rs b/src/config/settings.rs index 96f4f83..d2393a5 100644 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -181,6 +181,12 @@ pub struct ShipConfig { pub draft: bool, #[serde(default)] pub default_base: Option, + /// Default reviewers to request on PRs (GitHub usernames) + #[serde(default)] + pub default_reviewers: Vec, + /// Default labels to apply to PRs + #[serde(default)] + pub default_labels: Vec, } impl Default for ShipConfig { @@ -190,6 +196,8 @@ impl Default for ShipConfig { auto_cleanup: true, draft: false, default_base: None, + default_reviewers: Vec::new(), + default_labels: Vec::new(), } } } diff --git a/src/github/mod.rs b/src/github/mod.rs index bdda6f5..45e953e 100644 --- a/src/github/mod.rs +++ b/src/github/mod.rs @@ -798,4 +798,52 @@ impl GitHubClient { number, }) } + + /// Request reviews from GitHub users on a PR. + pub async fn request_reviewers(&self, pr_number: u64, reviewers: &[String]) -> Result<()> { + if reviewers.is_empty() { + return Ok(()); + } + let payload = serde_json::json!({ "reviewers": reviewers }); + let response = self + .post(&format!( + "{}/pulls/{}/requested_reviewers", + self.repo_path(), + pr_number + )) + .json(&payload) + .send() + .await + .context("Failed to request reviewers")?; + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Failed to request reviewers: {} {}", status, body); + } + Ok(()) + } + + /// Add labels to a PR/issue. + pub async fn add_labels(&self, issue_number: u64, labels: &[String]) -> Result<()> { + if labels.is_empty() { + return Ok(()); + } + let payload = serde_json::json!({ "labels": labels }); + let response = self + .post(&format!( + "{}/issues/{}/labels", + self.repo_path(), + issue_number + )) + .json(&payload) + .send() + .await + .context("Failed to add labels")?; + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Failed to add labels: {} {}", status, body); + } + Ok(()) + } } From 645c48d69122ae7f75eacf5ac21e86ac9dc5fb02 Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 27 Apr 2026 13:21:04 +0900 Subject: [PATCH 02/18] 234: feat: stack navigation comments in PR description (#260) * feat: stack navigation in PR body and stack submit (#234, #235) - Add stack navigation table to PR description showing parent/child relationships - Add `parsec stack --submit` to ship entire stack in topological order - Stack submit stops on first failure to prevent broken dependency chain Co-Authored-By: Claude Opus 4.6 * style: cargo fmt Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- src/cli/commands/ship.rs | 77 +++++++++++++++++++++++++++- src/cli/commands/stack.rs | 105 ++++++++++++++++++++++++++++++++++++++ src/cli/mod.rs | 11 +++- 3 files changed, 189 insertions(+), 4 deletions(-) diff --git a/src/cli/commands/ship.rs b/src/cli/commands/ship.rs index c065dc7..c19a9a2 100644 --- a/src/cli/commands/ship.rs +++ b/src/cli/commands/ship.rs @@ -152,7 +152,15 @@ pub async fn ship( .unwrap_or_else(|| result.ticket.clone()) }; - let pr_body = build_pr_body(&result.ticket, effective_title, ticket_url.as_deref()); + // Gather stack context for PR body (#234) + let stack_info = gather_stack_info(&manager, ticket); + + let pr_body = build_pr_body( + &result.ticket, + effective_title, + ticket_url.as_deref(), + stack_info.as_ref(), + ); let remote_url = git::get_remote_url(manager.repo_root()); if let Ok(ref remote_url) = remote_url { @@ -306,7 +314,50 @@ pub async fn ship( Ok(()) } -fn build_pr_body(ticket: &str, title: Option<&str>, ticket_url: Option<&str>) -> String { +/// Stack context for PR body navigation links. +struct StackPrInfo { + parent_ticket: Option, + parent_branch: Option, + child_tickets: Vec<(String, String)>, // (ticket, branch) + current_branch: String, +} + +/// Gather stack relationship info for a ticket, if it's part of a stack. +fn gather_stack_info(manager: &WorktreeManager, ticket: &str) -> Option { + let workspaces = manager.list().ok()?; + let current_ws = manager.get(ticket).ok()?; + + let parent = current_ws + .parent_ticket + .as_ref() + .and_then(|pt| workspaces.iter().find(|w| w.ticket == *pt)); + + let children: Vec<_> = workspaces + .iter() + .filter(|w| w.parent_ticket.as_deref() == Some(ticket)) + .collect(); + + if parent.is_none() && children.is_empty() { + return None; + } + + Some(StackPrInfo { + parent_ticket: current_ws.parent_ticket.clone(), + parent_branch: parent.map(|p| p.branch.clone()), + child_tickets: children + .iter() + .map(|c| (c.ticket.clone(), c.branch.clone())) + .collect(), + current_branch: current_ws.branch.clone(), + }) +} + +fn build_pr_body( + ticket: &str, + title: Option<&str>, + ticket_url: Option<&str>, + stack_info: Option<&StackPrInfo>, +) -> String { let mut body = String::new(); if let Some(title) = title { @@ -318,6 +369,28 @@ fn build_pr_body(ticket: &str, title: Option<&str>, ticket_url: Option<&str>) -> body.push_str(&format!("**Ticket**: [{ticket}]({url})\n\n")); } + // Add stack navigation section (#234) + if let Some(stack) = stack_info { + body.push_str("### Stack\n\n"); + body.push_str("| | Ticket | Branch |\n"); + body.push_str("|---|--------|--------|\n"); + + if let (Some(ref pt), Some(ref pb)) = (&stack.parent_ticket, &stack.parent_branch) { + body.push_str(&format!("| \u{2b06} Parent | {} | `{}` |\n", pt, pb)); + } + + body.push_str(&format!( + "| **\u{27a1} Current** | **{}** | **`{}`** |\n", + ticket, stack.current_branch + )); + + for (ct, cb) in &stack.child_tickets { + body.push_str(&format!("| \u{2b07} Child | {} | `{}` |\n", ct, cb)); + } + + body.push('\n'); + } + body.push_str(&format!("Shipped via `parsec ship {ticket}`\n")); body diff --git a/src/cli/commands/stack.rs b/src/cli/commands/stack.rs index 2be7413..7e10720 100644 --- a/src/cli/commands/stack.rs +++ b/src/cli/commands/stack.rs @@ -116,3 +116,108 @@ pub async fn stack_sync(repo: &Path, mode: Mode) -> Result<()> { output::print_sync(&synced, &failed, "rebase (stack)", mode); Ok(()) } + +/// Ship the entire stack in topological order (#235). +pub async fn stack_submit(repo: &Path, mode: Mode) -> Result<()> { + let config = ParsecConfig::load()?; + let manager = WorktreeManager::new(repo, &config)?; + let workspaces = manager.list()?; + + // Find roots: workspaces that have children but no parent themselves + let roots: Vec<_> = workspaces + .iter() + .filter(|w| { + w.parent_ticket.is_none() + && workspaces + .iter() + .any(|other| other.parent_ticket.as_deref() == Some(&w.ticket)) + }) + .collect(); + + if roots.is_empty() { + if mode == Mode::Human { + println!( + "No stacked worktrees to submit. Use `parsec start --on ` to create a stack." + ); + } + return Ok(()); + } + + // Build topological order: roots first, then children (BFS) + let mut ordered = Vec::new(); + let mut queue: Vec = Vec::new(); + for root in &roots { + ordered.push(root.ticket.clone()); + queue.push(root.ticket.clone()); + } + while let Some(parent) = queue.first().cloned() { + queue.remove(0); + let children: Vec<_> = workspaces + .iter() + .filter(|w| w.parent_ticket.as_deref() == Some(parent.as_str())) + .collect(); + for child in children { + ordered.push(child.ticket.clone()); + queue.push(child.ticket.clone()); + } + } + + if mode == Mode::Human { + println!("Submitting stack ({} worktrees):", ordered.len()); + for (i, ticket) in ordered.iter().enumerate() { + println!(" {}. {}", i + 1, ticket); + } + println!(); + } + + // Ship each in dependency order + let mut shipped = Vec::new(); + let mut failed = Vec::new(); + for ticket in &ordered { + if mode == Mode::Human { + eprintln!("Shipping {}...", ticket); + } + match super::ship( + repo, ticket, false, // draft + false, // no_pr + None, // base_override + None, // title_override + false, // skip_hooks + mode, + ) + .await + { + Ok(()) => shipped.push(ticket.clone()), + Err(e) => { + eprintln!("error: failed to ship {}: {}", ticket, e); + failed.push((ticket.clone(), e.to_string())); + // Stop on first failure to prevent broken stack + break; + } + } + } + + if mode == Mode::Human { + println!(); + println!( + "Stack submit complete: {}/{} shipped", + shipped.len(), + ordered.len() + ); + if !failed.is_empty() { + for (t, err) in &failed { + println!(" failed: {} - {}", t, err); + } + } + } + + if !failed.is_empty() { + anyhow::bail!( + "Stack submit incomplete: {} of {} shipped", + shipped.len(), + ordered.len() + ); + } + + Ok(()) +} diff --git a/src/cli/mod.rs b/src/cli/mod.rs index fa5d486..de888bf 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -361,10 +361,15 @@ pub enum Command { /// /// Displays the dependency graph of worktrees created with --on. /// Use `parsec stack --sync` to rebase the entire chain. + /// Use `parsec stack --submit` to ship the entire stack at once. Stack { /// Sync the entire stack (rebase chain) #[arg(long)] sync: bool, + + /// Ship the entire stack in dependency order + #[arg(long)] + submit: bool, }, /// Print the main repository root path @@ -704,8 +709,10 @@ pub async fn run(cli: Cli) -> Result<()> { assignee, all, } => commands::board(&repo_path, board_id, project, assignee, all, output_mode).await, - Command::Stack { sync } => { - if sync { + Command::Stack { sync, submit } => { + if submit { + commands::stack_submit(&repo_path, output_mode).await + } else if sync { commands::stack_sync(&repo_path, output_mode).await } else { commands::stack(&repo_path, output_mode).await From 444b9d7a0294373e4dbe7b7f68419d3d935b6da0 Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 27 Apr 2026 13:24:35 +0900 Subject: [PATCH 03/18] 257: ci: add Windows test coverage (#258) * ci: add Windows and macOS to test matrix, Windows to build matrix (#257) Co-Authored-By: Claude Opus 4.6 * ci: mark Windows tests as continue-on-error until UNC path fix Windows git worktree fails with \\?\ UNC paths from canonicalize(). Tests are informational until the path handling is fixed. Co-Authored-By: Claude Opus 4.6 * ci: keep required 'Test' check name, add cross-platform tests separately Branch protection requires check named "Test". Keep ubuntu-only Test job for required checks. Add separate test-cross-platform job for macOS and Windows (informational, continue-on-error). Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .github/workflows/ci.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a624cb7..4ed0490 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -40,6 +40,21 @@ jobs: - uses: Swatinem/rust-cache@e18b497796c12c097a38f9edb9d0641fb99eee32 # v2 - run: cargo test + test-cross-platform: + name: Test (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + # Informational until Windows UNC path issues are resolved + continue-on-error: true + strategy: + fail-fast: false + matrix: + os: [macos-latest, windows-latest] + steps: + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + - uses: dtolnay/rust-toolchain@29eef336d9b2848a0b548edc03f92a220660cdb8 # stable + - uses: Swatinem/rust-cache@e18b497796c12c097a38f9edb9d0641fb99eee32 # v2 + - run: cargo test + fmt: name: Format runs-on: ubuntu-latest @@ -66,7 +81,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - uses: dtolnay/rust-toolchain@29eef336d9b2848a0b548edc03f92a220660cdb8 # stable From 4b4eed92ef8eb6dd0df540a65c3a626bd77c7fd5 Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 27 Apr 2026 13:35:03 +0900 Subject: [PATCH 04/18] 261: v0.4 features: Windows CI, ship reviewers/labels, stack submit (#262) * fix: add missing reviewers/labels args to stack_submit ship call Co-Authored-By: Claude Opus 4.6 * style: cargo fmt --------- Co-authored-by: Claude Opus 4.6 --- src/cli/commands/stack.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/cli/commands/stack.rs b/src/cli/commands/stack.rs index 7e10720..7793d9b 100644 --- a/src/cli/commands/stack.rs +++ b/src/cli/commands/stack.rs @@ -178,11 +178,15 @@ pub async fn stack_submit(repo: &Path, mode: Mode) -> Result<()> { eprintln!("Shipping {}...", ticket); } match super::ship( - repo, ticket, false, // draft - false, // no_pr - None, // base_override - None, // title_override - false, // skip_hooks + repo, + ticket, + false, // draft + false, // no_pr + None, // base_override + None, // title_override + false, // skip_hooks + Vec::new(), // reviewers + Vec::new(), // labels mode, ) .await From 8e668b887f8e40f9a8034d264d69fee9d4db0644 Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 27 Apr 2026 14:21:28 +0900 Subject: [PATCH 05/18] fix: resolve Windows UNC path issue with dunce crate (#263) canonicalize() on Windows returns \\?\C:\... UNC paths that git cannot handle. Use dunce::canonicalize() which strips the prefix when safe, fixing worktree creation on Windows. Co-authored-by: Claude Opus 4.6 --- Cargo.toml | 1 + src/git/mod.rs | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9f3fc9d..ed09c95 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,7 @@ reqwest = { version = "0.13", features = ["json", "query", "rustls", "rustls-nat tokio = { version = "1", features = ["full"] } clap_mangen = "0.3" clap_complete = "4" +dunce = "1" [dev-dependencies] assert_cmd = "2" diff --git a/src/git/mod.rs b/src/git/mod.rs index ee47f06..8bfba2d 100644 --- a/src/git/mod.rs +++ b/src/git/mod.rs @@ -78,7 +78,8 @@ pub fn get_default_branch(repo: &Path) -> Result { /// Note: in a worktree, this returns the worktree root, not the main repo. pub fn get_repo_root(path: &Path) -> Result { let out = run_output(path, &["rev-parse", "--show-toplevel"])?; - Ok(PathBuf::from(out)) + // On Windows, strip UNC prefix (\\?\) that git may produce + dunce::canonicalize(Path::new(&out)).or_else(|_| Ok(PathBuf::from(out))) } /// Return the main repository root, even when called from a worktree. @@ -99,8 +100,7 @@ pub fn get_main_repo_root(path: &Path) -> Result { }; // The main repo root is the parent of the .git directory - let canonical = abs_common - .canonicalize() + let canonical = dunce::canonicalize(&abs_common) .with_context(|| format!("failed to canonicalize git common dir: {:?}", abs_common))?; canonical From 23ae42ca910b4135dad51bc6a66d71dad3a36fce Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 27 Apr 2026 14:39:38 +0900 Subject: [PATCH 06/18] docs: add ship --reviewer/--label and stack --submit to README and reference (#265) Co-authored-by: Claude Opus 4.6 --- README.md | 43 ++++++++++++++++++++++++++++++++++----- docs/reference/index.html | 14 +++++++++++++ 2 files changed, 52 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 176a0d9..32a3ee5 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,9 @@ Removed 1 worktree(s): - **Pre-ship hooks** -- Run custom commands before shipping with configurable `[hooks]` pre_ship - **Issue creation** -- Create GitHub/Jira issues and start worktrees in one step with `parsec create` - **Release workflow** -- Merge, tag, and create GitHub Releases with `parsec release` +- **PR reviewers and labels** -- Assign reviewers and labels on ship with `--reviewer`/`--label` or config defaults +- **Stack submit** -- Ship an entire stack in topological order with `parsec stack --submit` +- **Cross-platform** -- Tested on Linux, macOS, and Windows CI --- @@ -315,7 +318,7 @@ $ parsec log | [`parsec ci`](#parsec-ci-ticket---watch---all) | Check CI pipeline status for a PR | | [`parsec merge`](#parsec-merge-ticket---rebase---no-wait---no-delete-branch) | Merge a PR from the terminal | | [`parsec diff`](#parsec-diff-ticket---stat---name-only) | View changes vs base branch | -| [`parsec stack`](#parsec-stack---sync) | View and manage stacked PR dependencies | +| [`parsec stack`](#parsec-stack---sync---submit) | View and manage stacked PR dependencies | | [`parsec board`](#parsec-board) | Show sprint as a Kanban board | | [`parsec init`](#parsec-init) | Install shell integration | | [`parsec config`](#parsec-config) | Configure parsec | @@ -489,7 +492,7 @@ $ parsec ticket CL-2283 --json Push the branch, create a PR (GitHub) or MR (GitLab), and clean up the worktree. The forge is auto-detected from the remote URL. ``` -parsec ship [--draft] [--no-pr] [--base ] [--skip-hooks] +parsec ship [--draft] [--no-pr] [--base ] [--skip-hooks] [--reviewer ]... [--label ]... ``` | Option | Description | @@ -498,6 +501,8 @@ parsec ship [--draft] [--no-pr] [--base ] [--skip-hooks] | `--no-pr` | Push only, skip PR/MR creation | | `--base ` | Target base branch for PR (overrides config `default_base` and worktree base) | | `--skip-hooks` | Skip pre-ship hooks defined in config | +| `-r, --reviewer ` | Request review from a GitHub user (repeatable) | +| `-l, --label ` | Add a label to the PR (repeatable) | ```bash # Push + PR + cleanup @@ -511,6 +516,17 @@ $ parsec ship PROJ-5678 --draft # Push only, no PR $ parsec ship PROJ-9000 --no-pr + +# Ship with reviewers and labels +$ parsec ship PROJ-1234 --reviewer alice --reviewer bob --label "needs-review" +``` + +Reviewers and labels can also be set as defaults in config: + +```toml +[ship] +default_reviewers = ["alice", "bob"] +default_labels = ["team-backend"] ``` Token required: set `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) for GitHub, or `PARSEC_GITLAB_TOKEN` (or `GITLAB_TOKEN`) for GitLab. @@ -866,17 +882,18 @@ $ parsec diff PROJ-1234 --json --- -### `parsec stack [--sync]` +### `parsec stack [--sync] [--submit]` -View and manage stacked PR dependencies. Worktrees created with `--on` form a dependency chain. +View and manage stacked PR dependencies. Worktrees created with `--on` form a dependency chain. PRs include a **stack navigation table** showing parent/child relationships. ``` -parsec stack [--sync] +parsec stack [--sync] [--submit] ``` | Option | Description | |--------|-------------| | `--sync` | Rebase the entire stack chain | +| `--submit` | Ship the entire stack in topological order (root first) | ```bash # Create a stack @@ -898,8 +915,24 @@ $ parsec stack --sync $ parsec ship PROJ-1 # PR to main $ parsec ship PROJ-2 # PR to feature/PROJ-1 $ parsec ship PROJ-3 # PR to feature/PROJ-2 + +# Or ship the entire stack at once +$ parsec stack --submit +Submitting stack (3 worktrees): + 1. PROJ-1 + 2. PROJ-2 + 3. PROJ-3 +Stack submit complete: 3/3 shipped ``` +Each PR body includes a stack navigation table: + +| | Ticket | Branch | +|---|--------|--------| +| ⬆ Parent | PROJ-1 | `feature/PROJ-1` | +| **➡ Current** | **PROJ-2** | **`feature/PROJ-2`** | +| ⬇ Child | PROJ-3 | `feature/PROJ-3` | + --- ### `parsec board` diff --git a/docs/reference/index.html b/docs/reference/index.html index 3e1e40d..6da1f2f 100644 --- a/docs/reference/index.html +++ b/docs/reference/index.html @@ -1274,6 +1274,8 @@

Core Workflow

--no-prPush the branch but skip creating a PR/MR. --base <BRANCH>Override the target base branch for the PR. --skip-hooksSkip pre-ship hooks defined in [hooks] config. + -r, --reviewer <USER>Request review from a GitHub user (repeatable). + -l, --label <NAME>Add a label to the PR (repeatable). @@ -1292,6 +1294,10 @@

Core Workflow

# Open as draft PR $ parsec ship PROJ-125 --draft Draft PR created: github.com/org/myrepo/pull/43 +  +# Ship with reviewers and labels +$ parsec ship PROJ-126 --reviewer alice --reviewer bob --label needs-review + PR #44 created with reviewers and labels @@ -1767,6 +1773,7 @@

Advanced

--syncRe-target stacked PRs after an upstream PR was merged. + --submitShip the entire stack in topological order (root first). Stops on first failure. @@ -1781,6 +1788,13 @@

Advanced

main └── feature/PROJ-123 (PROJ-123) PR #42 open └── feature/PROJ-124 (PROJ-124) PR #43 open +  +# Ship the entire stack at once +$ parsec stack --submit +Submitting stack (2 worktrees): + 1. PROJ-123 + 2. PROJ-124 + Stack submit complete: 2/2 shipped From 561dbd3141354d73af8b132c62412ffa38576751 Mon Sep 17 00:00:00 2001 From: erish Date: Tue, 28 Apr 2026 19:17:36 +0900 Subject: [PATCH 07/18] feat: add parsec compress command (#236) (#267) Squashes all branch commits into one since the merge-base. Auto-detects current worktree or accepts a ticket argument. Supports custom commit message via --message/-m flag. Co-authored-by: Claude Opus 4.6 --- src/cli/commands/compress.rs | 103 +++++++++++++++++++++++++++++++++++ src/cli/commands/mod.rs | 2 + src/cli/mod.rs | 17 ++++++ 3 files changed, 122 insertions(+) create mode 100644 src/cli/commands/compress.rs diff --git a/src/cli/commands/compress.rs b/src/cli/commands/compress.rs new file mode 100644 index 0000000..57c54f9 --- /dev/null +++ b/src/cli/commands/compress.rs @@ -0,0 +1,103 @@ +use std::path::Path; + +use anyhow::Result; + +use crate::config::ParsecConfig; +use crate::git; +use crate::output::Mode; +use crate::worktree::WorktreeManager; + +pub async fn compress( + repo: &Path, + ticket: Option<&str>, + message: Option, + mode: Mode, +) -> Result<()> { + let config = ParsecConfig::load()?; + let manager = WorktreeManager::new(repo, &config)?; + + // Resolve ticket from arg or current worktree + let ticket = match ticket { + Some(t) => t.to_string(), + None => { + let cwd = std::env::current_dir()?; + let workspaces = manager.list()?; + workspaces + .iter() + .find(|w| cwd.starts_with(&w.path)) + .map(|w| w.ticket.clone()) + .ok_or_else(|| anyhow::anyhow!("Not in a parsec worktree. Specify a ticket."))? + } + }; + + let workspace = manager.get(&ticket)?; + + // Find merge-base with the base branch + let merge_base = git::run_output( + &workspace.path, + &["merge-base", "HEAD", &workspace.base_branch], + )?; + + // Count commits to squash + let log_output = git::run_output( + &workspace.path, + &["rev-list", "--count", &format!("{}..HEAD", merge_base)], + )?; + let commit_count: u64 = log_output.parse().unwrap_or(0); + + if commit_count <= 1 { + if mode == Mode::Human { + println!( + "Nothing to compress — branch has {} commit(s) since base.", + commit_count + ); + } + return Ok(()); + } + + // Get the default commit message (combine all commit messages) + let combined_msg = if let Some(ref msg) = message { + msg.clone() + } else { + git::run_output( + &workspace.path, + &["log", "--format=%s", &format!("{}..HEAD", merge_base)], + )? + }; + + // Soft reset to merge-base + git::run(&workspace.path, &["reset", "--soft", &merge_base])?; + + // Recommit with combined or custom message + let final_message = if message.is_some() { + combined_msg + } else { + // Use first commit message as primary, rest as bullet points + let lines: Vec<&str> = combined_msg.lines().collect(); + if lines.len() == 1 { + lines[0].to_string() + } else { + format!( + "{}\n\nSquashed {} commits:\n{}", + lines[0], + commit_count, + lines + .iter() + .map(|l| format!("- {}", l)) + .collect::>() + .join("\n") + ) + } + }; + + git::run(&workspace.path, &["commit", "-m", &final_message])?; + + if mode == Mode::Human { + println!( + "Compressed {} commits into 1 for ticket {}.", + commit_count, ticket + ); + } + + Ok(()) +} diff --git a/src/cli/commands/mod.rs b/src/cli/commands/mod.rs index 4d7d608..5f7a5b0 100644 --- a/src/cli/commands/mod.rs +++ b/src/cli/commands/mod.rs @@ -1,4 +1,5 @@ mod ci; +mod compress; mod config; mod diff; mod doctor; @@ -11,6 +12,7 @@ mod tracker_cmds; mod workspace; pub use ci::*; +pub use compress::*; pub use config::*; pub use diff::*; pub use doctor::*; diff --git a/src/cli/mod.rs b/src/cli/mod.rs index de888bf..b2fd4a6 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -466,6 +466,20 @@ pub enum Command { start: bool, }, + /// Squash all branch commits into one + /// + /// Resets the branch to the merge-base with the base branch and + /// re-commits all changes as a single commit. Use --message to + /// set a custom commit message. + Compress { + /// Ticket identifier (auto-detects current worktree if omitted) + ticket: Option, + + /// Custom commit message (default: combines all squashed messages) + #[arg(long, short)] + message: Option, + }, + /// Rename a workspace to a different ticket ID /// /// Changes the ticket ID, renames the branch, and moves the worktree directory. @@ -793,5 +807,8 @@ pub async fn run(cli: Cli) -> Result<()> { } commands::rename(&repo_path, &old_ticket, &new_ticket, output_mode).await } + Command::Compress { ticket, message } => { + commands::compress(&repo_path, ticket.as_deref(), message, output_mode).await + } } } From 073036bfc46d6f95d8538469090c1d4384d3cc27 Mon Sep 17 00:00:00 2001 From: erish Date: Tue, 28 Apr 2026 19:17:49 +0900 Subject: [PATCH 08/18] feat: add offline mode toggle (#237) (#268) Adds --offline global flag and workspace.offline config option. When active, skips tracker API calls, PR creation, git fetch, auto-comments, and auto-transitions. Co-authored-by: Claude Opus 4.6 --- src/cli/commands/ship.rs | 4 ++-- src/cli/mod.rs | 13 +++++++++++++ src/config/settings.rs | 4 ++++ src/env.rs | 13 +++++++++++++ src/git/mod.rs | 6 ++++++ src/tracker/mod.rs | 10 ++++++++++ 6 files changed, 48 insertions(+), 2 deletions(-) diff --git a/src/cli/commands/ship.rs b/src/cli/commands/ship.rs index c19a9a2..4525c1d 100644 --- a/src/cli/commands/ship.rs +++ b/src/cli/commands/ship.rs @@ -134,7 +134,7 @@ pub async fn ship( // Phase 2: Create PR/MR (async) let mut pr_failed = false; - if !no_pr && config.ship.auto_pr { + if !no_pr && config.ship.auto_pr && !crate::env::is_offline() { let (ticket_title, ticket_url) = match tracker::fetch_ticket(&config, ticket, Some(manager.repo_root())).await { Ok(Some(t)) => (Some(t.title), t.url), @@ -239,7 +239,7 @@ pub async fn ship( } // Auto-comment PR link on the ticket if configured - if config.tracker.comment_on_ship { + if config.tracker.comment_on_ship && !crate::env::is_offline() { if let Some(ref pr_url) = result.pr_url { let comment_body = format!("PR opened: {}", pr_url); if let Err(e) = diff --git a/src/cli/mod.rs b/src/cli/mod.rs index b2fd4a6..1e35c28 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -33,6 +33,10 @@ pub struct Cli { /// Preview what would happen without making changes #[arg(long, global = true)] pub dry_run: bool, + + /// Skip all network operations (tracker, PR, fetch) + #[arg(long, global = true)] + pub offline: bool, } #[derive(Subcommand)] @@ -540,6 +544,15 @@ pub async fn run(cli: Cli) -> Result<()> { output::Mode::Human }; + // Propagate offline mode via env var so all subsystems can check it + let offline = cli.offline + || crate::config::ParsecConfig::load() + .map(|c| c.workspace.offline) + .unwrap_or(false); + if offline { + std::env::set_var("PARSEC_OFFLINE", "1"); + } + match cli.command { Command::Start { ticket, diff --git a/src/config/settings.rs b/src/config/settings.rs index d2393a5..2b3b893 100644 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -99,6 +99,9 @@ pub struct WorkspaceConfig { /// Default base branch for worktree creation (e.g. "develop") #[serde(default)] pub default_base: Option, + /// When true, skip all network operations by default + #[serde(default)] + pub offline: bool, } impl Default for WorkspaceConfig { @@ -108,6 +111,7 @@ impl Default for WorkspaceConfig { base_dir: default_base_dir(), branch_prefix: default_branch_prefix(), default_base: None, + offline: false, } } } diff --git a/src/env.rs b/src/env.rs index 5ab2a1a..045318c 100644 --- a/src/env.rs +++ b/src/env.rs @@ -70,3 +70,16 @@ pub fn gitlab_token() -> Option { } None } + +// --------------------------------------------------------------------------- +// Offline mode +// --------------------------------------------------------------------------- + +pub const PARSEC_OFFLINE: &str = "PARSEC_OFFLINE"; + +/// Check if offline mode is active (via --offline flag or PARSEC_OFFLINE env var). +pub fn is_offline() -> bool { + std::env::var(PARSEC_OFFLINE) + .map(|v| v == "1" || v == "true") + .unwrap_or(false) +} diff --git a/src/git/mod.rs b/src/git/mod.rs index 8bfba2d..a2d9467 100644 --- a/src/git/mod.rs +++ b/src/git/mod.rs @@ -283,11 +283,17 @@ pub fn delete_branch(repo: &Path, branch: &str) -> Result<()> { /// Fetch all refs from `origin`. pub fn fetch(repo: &Path) -> Result<()> { + if crate::env::is_offline() { + return Ok(()); + } run(repo, &["fetch", "origin", "--prune"]) } /// Fetch from origin if a remote exists. Non-fatal if no remote configured. pub fn fetch_if_remote(repo: &Path) -> Result<()> { + if crate::env::is_offline() { + return Ok(()); + } // Check if remote exists first let has_remote = std::process::Command::new("git") .args(["remote"]) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 012e183..7afce95 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -115,6 +115,10 @@ pub async fn fetch_ticket( id: &str, repo_root: Option<&Path>, ) -> Result> { + if crate::env::is_offline() { + return Ok(None); + } + // Load atlassian env file for seamless Claude Jira skill integration load_atlassian_env(); @@ -156,6 +160,9 @@ pub async fn fetch_ticket( /// Try to transition a ticket's status. Warns on failure but never blocks. pub async fn try_transition(config: &ParsecConfig, ticket: &str, target_status: &str) { + if crate::env::is_offline() { + return; + } // Only works for Jira currently if !matches!( config.tracker.provider, @@ -197,6 +204,9 @@ pub async fn post_comment( body: &str, repo_root: Option<&Path>, ) -> Result<()> { + if crate::env::is_offline() { + return Ok(()); + } load_atlassian_env(); match config.tracker.provider { From 264c9c77b349099f62709deeb71bd3c95a7b3a52 Mon Sep 17 00:00:00 2001 From: erish Date: Tue, 28 Apr 2026 19:18:02 +0900 Subject: [PATCH 09/18] feat: add config JSON Schema and schema subcommand (#239) (#269) Adds parsec-config.schema.json for IDE autocomplete/validation. New 'parsec config schema' command outputs the schema to stdout. Co-authored-by: Claude Opus 4.6 --- schema/parsec-config.schema.json | 279 +++++++++++++++++++++++++++++++ src/cli/commands/config.rs | 6 + src/cli/mod.rs | 6 + 3 files changed, 291 insertions(+) create mode 100644 schema/parsec-config.schema.json diff --git a/schema/parsec-config.schema.json b/schema/parsec-config.schema.json new file mode 100644 index 0000000..d6b6956 --- /dev/null +++ b/schema/parsec-config.schema.json @@ -0,0 +1,279 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://raw.githubusercontent.com/erishforG/git-parsec/main/schema/parsec-config.schema.json", + "title": "Parsec Configuration", + "description": "Configuration file for git-parsec CLI (https://github.com/erishforG/git-parsec)", + "type": "object", + "properties": { + "workspace": { + "type": "object", + "description": "Worktree layout and branch settings", + "properties": { + "layout": { + "type": "string", + "enum": ["sibling", "internal"], + "default": "sibling", + "description": "Worktree layout: sibling (../repo.ticket/) or internal (.parsec/workspaces/ticket/)" + }, + "base_dir": { + "type": "string", + "default": ".parsec/workspaces", + "description": "Base directory for internal layout worktrees" + }, + "branch_prefix": { + "type": "string", + "default": "feature/", + "description": "Prefix for new worktree branches" + }, + "default_base": { + "type": "string", + "description": "Default base branch for worktree creation (e.g. develop)" + }, + "offline": { + "type": "boolean", + "default": false, + "description": "When true, skip all network operations by default" + } + }, + "additionalProperties": false + }, + "tracker": { + "type": "object", + "description": "Issue tracker integration settings", + "properties": { + "provider": { + "type": "string", + "enum": ["none", "jira", "github", "gitlab"], + "default": "none", + "description": "Issue tracker provider" + }, + "jira": { + "type": "object", + "description": "Jira-specific configuration", + "properties": { + "base_url": { + "type": "string", + "description": "Jira base URL (e.g. https://yourorg.atlassian.net)" + }, + "email": { + "type": "string", + "description": "Jira account email" + }, + "project": { + "type": "string", + "description": "Default Jira project key" + }, + "board_id": { + "type": "integer", + "description": "Default Jira board ID" + }, + "assignee": { + "type": "string", + "description": "Default assignee for board/inbox filters" + }, + "token": { + "type": "string", + "description": "Jira API token (prefer PARSEC_JIRA_TOKEN env var)" + } + }, + "required": ["base_url"] + }, + "gitlab": { + "type": "object", + "description": "GitLab-specific configuration", + "properties": { + "base_url": { + "type": "string", + "description": "GitLab base URL (e.g. https://gitlab.com)" + } + }, + "required": ["base_url"] + }, + "auto_transition": { + "type": "object", + "description": "Automatic ticket status transitions", + "properties": { + "on_start": { + "type": "string", + "description": "Target status when running parsec start (e.g. In Progress)" + }, + "on_ship": { + "type": "string", + "description": "Target status when running parsec ship (e.g. In Review)" + }, + "on_merge": { + "type": "string", + "description": "Target status when running parsec merge (e.g. Done)" + } + }, + "additionalProperties": false + }, + "comment_on_ship": { + "type": "boolean", + "default": false, + "description": "Auto-post PR link as comment on the ticket during parsec ship" + } + }, + "additionalProperties": false + }, + "ship": { + "type": "object", + "description": "PR/MR creation settings", + "properties": { + "auto_pr": { + "type": "boolean", + "default": true, + "description": "Automatically open a PR when shipping" + }, + "auto_cleanup": { + "type": "boolean", + "default": true, + "description": "Clean up worktree after shipping" + }, + "draft": { + "type": "boolean", + "default": false, + "description": "Create PRs as drafts by default" + }, + "default_base": { + "type": "string", + "description": "Default target base branch for PRs" + }, + "default_reviewers": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Default reviewers to request on PRs (GitHub usernames)" + }, + "default_labels": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Default labels to apply to PRs" + } + }, + "additionalProperties": false + }, + "hooks": { + "type": "object", + "description": "Lifecycle hook commands", + "properties": { + "post_create": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Commands to run after creating a worktree" + }, + "pre_ship": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Commands to run before shipping a worktree" + } + }, + "additionalProperties": false + }, + "release": { + "type": "object", + "description": "Release workflow settings", + "properties": { + "branch": { + "type": "string", + "default": "main", + "description": "Target branch for release" + }, + "tag_prefix": { + "type": "string", + "default": "v", + "description": "Tag prefix (e.g. v for v0.3.0)" + }, + "changelog": { + "type": "boolean", + "default": true, + "description": "Auto-generate changelog for releases" + } + }, + "additionalProperties": false + }, + "policy": { + "type": "object", + "description": "Branch policy and guardrails", + "properties": { + "protected_branches": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Branches that cannot be used as ship targets (supports glob with *)" + }, + "allowed_ship_targets": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Branches allowed as ship targets (if empty, all non-protected are allowed)" + }, + "require_ci": { + "type": "boolean", + "default": false, + "description": "Require CI to pass before parsec merge" + } + }, + "additionalProperties": false + }, + "github": { + "type": "object", + "description": "Per-host GitHub tokens (keys are hostnames)", + "additionalProperties": { + "type": "object", + "properties": { + "token": { + "type": "string", + "description": "Personal access token for this GitHub host" + } + }, + "additionalProperties": false + } + }, + "repos": { + "type": "object", + "description": "Per-repo configuration overrides (keys are owner/repo)", + "additionalProperties": { + "type": "object", + "properties": { + "tracker": { + "type": "object", + "description": "Tracker overrides for this repo", + "properties": { + "provider": { + "type": "string", + "enum": ["none", "jira", "github", "gitlab"], + "description": "Override tracker provider for this repo" + }, + "jira": { + "type": "object", + "properties": { + "base_url": { "type": "string" }, + "email": { "type": "string" }, + "project": { "type": "string" }, + "board_id": { "type": "integer" }, + "assignee": { "type": "string" }, + "token": { "type": "string" } + }, + "required": ["base_url"] + }, + "gitlab": { + "type": "object", + "properties": { + "base_url": { "type": "string" } + }, + "required": ["base_url"] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false +} diff --git a/src/cli/commands/config.rs b/src/cli/commands/config.rs index 06a8f5c..ca5b8ed 100644 --- a/src/cli/commands/config.rs +++ b/src/cli/commands/config.rs @@ -245,3 +245,9 @@ pub async fn config_completions(shell: clap_complete::Shell) -> Result<()> { clap_complete::generate(shell, &mut cmd, "parsec", &mut std::io::stdout()); Ok(()) } + +pub async fn config_schema() -> Result<()> { + let schema = include_str!("../../../schema/parsec-config.schema.json"); + println!("{}", schema); + Ok(()) +} diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 1e35c28..65cf2a1 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -532,6 +532,11 @@ pub enum ConfigAction { /// Shell type (zsh, bash, fish, elvish, powershell) shell: clap_complete::Shell, }, + /// Output JSON Schema for config.toml + /// + /// Prints the JSON Schema for parsec's configuration format. + /// Useful for IDE autocomplete and validation. + Schema, } pub async fn run(cli: Cli) -> Result<()> { @@ -763,6 +768,7 @@ pub async fn run(cli: Cli) -> Result<()> { ConfigAction::Shell { shell } => commands::config_shell(&shell, output_mode).await, ConfigAction::Man { dir } => commands::config_man(&dir).await, ConfigAction::Completions { shell } => commands::config_completions(shell).await, + ConfigAction::Schema => commands::config_schema().await, }, Command::Doctor { ai } => { if ai { From ff47818718b12e6768b6c171977d0795406f4fe5 Mon Sep 17 00:00:00 2001 From: erish Date: Tue, 28 Apr 2026 19:18:15 +0900 Subject: [PATCH 10/18] feat: add ship --template for PR template auto-population (#233) (#270) Auto-detects .github/PULL_REQUEST_TEMPLATE.md and populates PR body. Supports --template flag and ship.template config option. Co-authored-by: Claude Opus 4.6 --- src/cli/commands/ship.rs | 48 +++++++++++++++++++++++++++++++++++++++ src/cli/commands/stack.rs | 1 + src/cli/mod.rs | 6 +++++ src/config/settings.rs | 4 ++++ 4 files changed, 59 insertions(+) diff --git a/src/cli/commands/ship.rs b/src/cli/commands/ship.rs index 4525c1d..7d73b6d 100644 --- a/src/cli/commands/ship.rs +++ b/src/cli/commands/ship.rs @@ -22,6 +22,7 @@ pub async fn ship( skip_hooks: bool, reviewers: Vec, labels: Vec, + template: Option, mode: Mode, ) -> Result<()> { let mut config = ParsecConfig::load()?; @@ -155,11 +156,18 @@ pub async fn ship( // Gather stack context for PR body (#234) let stack_info = gather_stack_info(&manager, ticket); + // Resolve PR template (#233) + let template_content = resolve_template( + manager.repo_root(), + template.as_deref().or(config.ship.template.as_deref()), + ); + let pr_body = build_pr_body( &result.ticket, effective_title, ticket_url.as_deref(), stack_info.as_ref(), + template_content.as_deref(), ); let remote_url = git::get_remote_url(manager.repo_root()); @@ -357,6 +365,7 @@ fn build_pr_body( title: Option<&str>, ticket_url: Option<&str>, stack_info: Option<&StackPrInfo>, + template_content: Option<&str>, ) -> String { let mut body = String::new(); @@ -391,7 +400,46 @@ fn build_pr_body( body.push('\n'); } + // Include PR template content (#233) + if let Some(tmpl) = template_content { + body.push_str("---\n\n"); + body.push_str(tmpl); + body.push('\n'); + } + body.push_str(&format!("Shipped via `parsec ship {ticket}`\n")); body } + +/// Resolve PR template content from explicit path or auto-detection. +fn resolve_template(repo_root: &Path, explicit_path: Option<&str>) -> Option { + if let Some(path) = explicit_path { + let full_path = if std::path::Path::new(path).is_absolute() { + std::path::PathBuf::from(path) + } else { + repo_root.join(path) + }; + return std::fs::read_to_string(&full_path).ok(); + } + + // Auto-detect common template locations + let candidates = [ + ".github/PULL_REQUEST_TEMPLATE.md", + ".github/pull_request_template.md", + "PULL_REQUEST_TEMPLATE.md", + "pull_request_template.md", + "docs/PULL_REQUEST_TEMPLATE.md", + ]; + + for candidate in &candidates { + let path = repo_root.join(candidate); + if let Ok(content) = std::fs::read_to_string(&path) { + if !content.trim().is_empty() { + return Some(content); + } + } + } + + None +} diff --git a/src/cli/commands/stack.rs b/src/cli/commands/stack.rs index 7793d9b..4004f3d 100644 --- a/src/cli/commands/stack.rs +++ b/src/cli/commands/stack.rs @@ -187,6 +187,7 @@ pub async fn stack_submit(repo: &Path, mode: Mode) -> Result<()> { false, // skip_hooks Vec::new(), // reviewers Vec::new(), // labels + None, // template mode, ) .await diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 65cf2a1..7aba663 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -144,6 +144,10 @@ pub enum Command { /// Add labels to the PR (can be specified multiple times) #[arg(long, short = 'l')] label: Vec, + + /// Path to PR body template file + #[arg(long)] + template: Option, }, /// Remove merged or stale worktrees @@ -603,6 +607,7 @@ pub async fn run(cli: Cli) -> Result<()> { skip_hooks, reviewer, label, + template, } => { if cli.dry_run { eprintln!( @@ -624,6 +629,7 @@ pub async fn run(cli: Cli) -> Result<()> { skip_hooks, reviewer, label, + template, output_mode, ) .await diff --git a/src/config/settings.rs b/src/config/settings.rs index 2b3b893..90fab68 100644 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -191,6 +191,9 @@ pub struct ShipConfig { /// Default labels to apply to PRs #[serde(default)] pub default_labels: Vec, + /// Path to PR template file (auto-detected if not set) + #[serde(default)] + pub template: Option, } impl Default for ShipConfig { @@ -202,6 +205,7 @@ impl Default for ShipConfig { default_base: None, default_reviewers: Vec::new(), default_labels: Vec::new(), + template: None, } } } From 620098def00af166c96aa8e3ccf7632f27dd0881 Mon Sep 17 00:00:00 2001 From: erish Date: Wed, 29 Apr 2026 23:24:25 +0900 Subject: [PATCH 11/18] =?UTF-8?q?feat:=20observability=20lite=20=E2=80=94?= =?UTF-8?q?=20execution=20ID,=20step=20timing,=20JSONL=20export=20(#273)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each parsec command now records an execution entry with a UUID, timestamps, duration, status, and optional step-level phases to `.parsec/execlog.jsonl`. Use `parsec log --export` to dump the raw JSONL for debugging or dashboarding. Ship command is instrumented with push and create_pr step tracking. Start and ship commands set the ticket context automatically. Closes #166 Co-authored-by: Claude Opus 4.6 --- Cargo.toml | 1 + src/cli/commands/history.rs | 11 +++ src/cli/commands/ship.rs | 13 ++++ src/cli/commands/workspace.rs | 1 + src/cli/mod.rs | 81 +++++++++++++++++++++- src/execlog.rs | 125 ++++++++++++++++++++++++++++++++++ src/main.rs | 1 + 7 files changed, 230 insertions(+), 3 deletions(-) create mode 100644 src/execlog.rs diff --git a/Cargo.toml b/Cargo.toml index ed09c95..d8d8555 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ tokio = { version = "1", features = ["full"] } clap_mangen = "0.3" clap_complete = "4" dunce = "1" +uuid = { version = "1", features = ["v4"] } [dev-dependencies] assert_cmd = "2" diff --git a/src/cli/commands/history.rs b/src/cli/commands/history.rs index 8bb82f1..abf8b29 100644 --- a/src/cli/commands/history.rs +++ b/src/cli/commands/history.rs @@ -18,6 +18,17 @@ pub async fn log(repo: &Path, ticket: Option<&str>, last: usize, mode: Mode) -> Ok(()) } +pub async fn log_export(repo: &Path) -> Result<()> { + let repo_root = git::get_main_repo_root(repo).or_else(|_| git::get_repo_root(repo))?; + let raw = crate::execlog::read_raw(&repo_root)?; + if raw.is_empty() { + eprintln!("No execution log entries. Run some commands first."); + } else { + print!("{}", raw); + } + Ok(()) +} + pub async fn undo(repo: &Path, dry_run: bool, mode: Mode) -> Result<()> { let config = ParsecConfig::load()?; let repo_root = git::get_main_repo_root(repo).or_else(|_| git::get_repo_root(repo))?; diff --git a/src/cli/commands/ship.rs b/src/cli/commands/ship.rs index 7d73b6d..ceb0eb7 100644 --- a/src/cli/commands/ship.rs +++ b/src/cli/commands/ship.rs @@ -25,6 +25,8 @@ pub async fn ship( template: Option, mode: Mode, ) -> Result<()> { + crate::execlog::set_ticket(ticket); + let mut config = ParsecConfig::load()?; let manager = WorktreeManager::new(repo, &config)?; config.resolve_for_repo(manager.repo_root()); @@ -78,6 +80,7 @@ pub async fn ship( // Phase 1: Push only (don't clean up yet) // Idempotency: if workspace is already gone (cleaned up after a prior ship), // treat push as a no-op — the branch is already on the remote. + let push_start = std::time::Instant::now(); let mut result = match manager.ship_push(ticket) { Ok(r) => r, Err(e) => { @@ -112,6 +115,8 @@ pub async fn ship( } }; + crate::execlog::record_step("push", "ok", push_start.elapsed().as_millis() as u64, None); + // Resolve base branch: --base CLI > config default_base > worktree's base_branch if let Some(base) = base_override { result.base_branch = base; @@ -134,6 +139,7 @@ pub async fn ship( } // Phase 2: Create PR/MR (async) + let pr_start = std::time::Instant::now(); let mut pr_failed = false; if !no_pr && config.ship.auto_pr && !crate::env::is_offline() { let (ticket_title, ticket_url) = @@ -246,6 +252,13 @@ pub async fn ship( } } + crate::execlog::record_step( + "create_pr", + if pr_failed { "error" } else { "ok" }, + pr_start.elapsed().as_millis() as u64, + result.pr_url.clone(), + ); + // Auto-comment PR link on the ticket if configured if config.tracker.comment_on_ship && !crate::env::is_offline() { if let Some(ref pr_url) = result.pr_url { diff --git a/src/cli/commands/workspace.rs b/src/cli/commands/workspace.rs index 75499c9..8aef3ed 100644 --- a/src/cli/commands/workspace.rs +++ b/src/cli/commands/workspace.rs @@ -21,6 +21,7 @@ pub async fn start( hook: Option, mode: Mode, ) -> Result<()> { + crate::execlog::set_ticket(ticket); let mut config = ParsecConfig::load()?; let repo_root = git::get_repo_root(repo)?; config.resolve_for_repo(&repo_root); diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 7aba663..94f71f8 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -316,6 +316,10 @@ pub enum Command { /// Show last N entries (default: 20) #[arg(long, short = 'n', default_value = "20")] last: usize, + + /// Export execution log as JSONL (for observability/debugging) + #[arg(long)] + export: bool, }, /// Undo the last parsec operation @@ -562,7 +566,42 @@ pub async fn run(cli: Cli) -> Result<()> { std::env::set_var("PARSEC_OFFLINE", "1"); } - match cli.command { + // Observability: extract command name and set up execution tracking + let cmd_name = match &cli.command { + Command::Start { .. } => "start", + Command::List { .. } => "list", + Command::Status { .. } => "status", + Command::Ticket { .. } => "ticket", + Command::Ship { .. } => "ship", + Command::Clean { .. } => "clean", + Command::Conflicts => "conflicts", + Command::PrStatus { .. } => "pr-status", + Command::Merge { .. } => "merge", + Command::Ci { .. } => "ci", + Command::Diff { .. } => "diff", + Command::Switch { .. } => "switch", + Command::Sync { .. } => "sync", + Command::Open { .. } => "open", + Command::Adopt { .. } => "adopt", + Command::Log { .. } => "log", + Command::Undo { .. } => "undo", + Command::Inbox { .. } => "inbox", + Command::Board { .. } => "board", + Command::Stack { .. } => "stack", + Command::Root => "root", + Command::Init { .. } => "init", + Command::Config { .. } => "config", + Command::Doctor { .. } => "doctor", + Command::Release { .. } => "release", + Command::Create { .. } => "create", + Command::Rename { .. } => "rename", + Command::Compress { .. } => "compress", + }; + let exec_id = crate::execlog::new_execution_id(); + let exec_started_at = chrono::Utc::now(); + let exec_start = std::time::Instant::now(); + + let result = match cli.command { Command::Start { ticket, base, @@ -736,8 +775,16 @@ pub async fn run(cli: Cli) -> Result<()> { Command::Switch { ticket } => { commands::switch(&repo_path, ticket.as_deref(), output_mode).await } - Command::Log { ticket, last } => { - commands::log(&repo_path, ticket.as_deref(), last, output_mode).await + Command::Log { + ticket, + last, + export, + } => { + if export { + commands::log_export(&repo_path).await + } else { + commands::log(&repo_path, ticket.as_deref(), last, output_mode).await + } } Command::Undo { dry_run } => commands::undo(&repo_path, dry_run, output_mode).await, Command::Inbox { pick } => commands::inbox(&repo_path, pick, output_mode).await, @@ -835,5 +882,33 @@ pub async fn run(cli: Cli) -> Result<()> { Command::Compress { ticket, message } => { commands::compress(&repo_path, ticket.as_deref(), message, output_mode).await } + }; + + // Record execution entry (best-effort, never fail the command) + let duration = exec_start.elapsed(); + let steps = crate::execlog::take_steps(); + let ticket = crate::execlog::take_ticket(); + let entry = crate::execlog::ExecEntry { + execution_id: exec_id, + command: cmd_name.to_string(), + ticket, + started_at: exec_started_at, + finished_at: chrono::Utc::now(), + duration_ms: duration.as_millis() as u64, + status: if result.is_ok() { + "ok".to_string() + } else { + "error".to_string() + }, + error: result.as_ref().err().map(|e| format!("{e:#}")), + steps, + }; + // Use repo_path for logging; skip if .parsec dir can't be resolved + if let Ok(root) = crate::git::get_main_repo_root(&repo_path) + .or_else(|_| crate::git::get_repo_root(&repo_path)) + { + let _ = crate::execlog::append(&root, &entry); } + + result } diff --git a/src/execlog.rs b/src/execlog.rs new file mode 100644 index 0000000..b7793c2 --- /dev/null +++ b/src/execlog.rs @@ -0,0 +1,125 @@ +//! Lightweight execution log for observability. +//! +//! Each parsec command invocation is recorded as an `ExecEntry` with a unique +//! execution ID, timing, and optional step-level detail. Entries are stored as +//! newline-delimited JSON (JSONL) in `.parsec/execlog.jsonl`. + +use anyhow::Result; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::cell::RefCell; +use std::fs::{self, OpenOptions}; +use std::io::Write; +use std::path::{Path, PathBuf}; + +/// A single phase within a command execution. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExecStep { + pub phase: String, + pub status: String, + pub duration_ms: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub detail: Option, +} + +/// A complete command execution record. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExecEntry { + pub execution_id: String, + pub command: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub ticket: Option, + pub started_at: DateTime, + pub finished_at: DateTime, + pub duration_ms: u64, + pub status: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub steps: Vec, +} + +// --------------------------------------------------------------------------- +// Thread-local accumulators +// --------------------------------------------------------------------------- + +thread_local! { + static CURRENT_STEPS: RefCell> = const { RefCell::new(Vec::new()) }; + static CURRENT_TICKET: RefCell> = const { RefCell::new(None) }; +} + +/// Record a step in the current execution context. +pub fn record_step(phase: &str, status: &str, duration_ms: u64, detail: Option) { + CURRENT_STEPS.with(|steps| { + steps.borrow_mut().push(ExecStep { + phase: phase.to_string(), + status: status.to_string(), + duration_ms, + detail, + }); + }); +} + +/// Set the ticket for the current execution (called from commands). +pub fn set_ticket(ticket: &str) { + CURRENT_TICKET.with(|t| *t.borrow_mut() = Some(ticket.to_string())); +} + +/// Take all accumulated steps (clears the accumulator). +pub fn take_steps() -> Vec { + CURRENT_STEPS.with(|steps| std::mem::take(&mut *steps.borrow_mut())) +} + +/// Take the ticket set during execution. +pub fn take_ticket() -> Option { + CURRENT_TICKET.with(|t| t.borrow_mut().take()) +} + +/// Generate a new execution ID (UUID v4). +pub fn new_execution_id() -> String { + uuid::Uuid::new_v4().to_string() +} + +// --------------------------------------------------------------------------- +// Persistence (JSONL) +// --------------------------------------------------------------------------- + +fn execlog_path(repo_root: &Path) -> PathBuf { + repo_root.join(".parsec").join("execlog.jsonl") +} + +/// Append an execution entry to the JSONL log. +pub fn append(repo_root: &Path, entry: &ExecEntry) -> Result<()> { + let path = execlog_path(repo_root); + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + let line = serde_json::to_string(entry)?; + let mut file = OpenOptions::new().create(true).append(true).open(&path)?; + writeln!(file, "{}", line)?; + Ok(()) +} + +/// Load all execution entries from the JSONL log. +#[allow(dead_code)] +pub fn load(repo_root: &Path) -> Result> { + let path = execlog_path(repo_root); + if !path.exists() { + return Ok(Vec::new()); + } + let contents = fs::read_to_string(&path)?; + Ok(contents + .lines() + .filter(|line| !line.trim().is_empty()) + .filter_map(|line| serde_json::from_str(line).ok()) + .collect()) +} + +/// Read raw JSONL content for export. +pub fn read_raw(repo_root: &Path) -> Result { + let path = execlog_path(repo_root); + if !path.exists() { + return Ok(String::new()); + } + Ok(fs::read_to_string(&path)?) +} diff --git a/src/main.rs b/src/main.rs index 8cc97ce..440dca7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,6 +5,7 @@ mod cli; mod config; mod conflict; mod env; +mod execlog; mod git; mod github; mod gitlab; From 7a31165cc5faf4b809a03729aea06217a1112f6f Mon Sep 17 00:00:00 2001 From: erish Date: Thu, 30 Apr 2026 16:19:43 +0900 Subject: [PATCH 12/18] [240] feat: Bitbucket Cloud forge support (#276) * feat: Bitbucket Cloud forge support (#240) Add BitbucketClient with PR creation, status, merge, find-by-branch, and Pipelines CI monitoring via Bitbucket Cloud REST API v2. Integrate into ship (GitHub > Bitbucket > GitLab fallback chain), pr-status, and merge commands. Add PARSEC_BITBUCKET_TOKEN env var. Co-Authored-By: Claude Opus 4.6 * fix: resolve clippy and format warnings for Bitbucket module Add #[allow(dead_code)] for pipeline types and methods not yet wired into CI command. Fix cargo fmt formatting issues. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- src/bitbucket/mod.rs | 436 +++++++++++++++++++++++++++++++++++++++ src/cli/commands/pr.rs | 132 ++++++++++-- src/cli/commands/ship.rs | 35 +++- src/env.rs | 19 ++ src/main.rs | 1 + 5 files changed, 599 insertions(+), 24 deletions(-) create mode 100644 src/bitbucket/mod.rs diff --git a/src/bitbucket/mod.rs b/src/bitbucket/mod.rs new file mode 100644 index 0000000..9d3805a --- /dev/null +++ b/src/bitbucket/mod.rs @@ -0,0 +1,436 @@ +//! Bitbucket Cloud REST API v2 integration. +//! +//! Provides PR creation, status, merge, CI pipeline monitoring, +//! and branch-based PR lookup for Bitbucket Cloud repositories. + +use std::time::Duration; + +use anyhow::{bail, Context, Result}; +use reqwest::Client; +use serde::Deserialize; + +// --------------------------------------------------------------------------- +// Data types +// --------------------------------------------------------------------------- + +/// Result of PR creation +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub struct PrResult { + pub url: String, + pub id: u64, +} + +/// PR status information +#[derive(Debug, Clone)] +pub struct PrStatus { + pub id: u64, + pub title: String, + pub state: String, + pub url: String, +} + +/// Result of merging a PR +#[derive(Debug, Clone)] +pub struct MergeResult { + pub merged: bool, + pub message: String, +} + +/// A single pipeline step/result +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub struct PipelineStatus { + pub name: String, + pub state: String, + pub result: Option, + pub url: Option, +} + +/// Parsed Bitbucket remote info +#[derive(Debug, Clone)] +pub struct BitbucketRemote { + pub workspace: String, + pub repo_slug: String, +} + +// --------------------------------------------------------------------------- +// API response types (private) +// --------------------------------------------------------------------------- + +#[derive(Deserialize)] +struct ApiPr { + id: Option, + title: Option, + state: Option, + links: Option, +} + +#[derive(Deserialize)] +struct ApiLinks { + html: Option, +} + +#[derive(Deserialize)] +struct ApiHref { + href: Option, +} + +#[derive(Deserialize)] +struct ApiPrList { + values: Option>, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipeline { + uuid: Option, + state: Option, + target: Option, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipelineState { + name: Option, + result: Option, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipelineResult { + name: Option, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipelineTarget { + ref_name: Option, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipelineList { + values: Option>, +} + +// --------------------------------------------------------------------------- +// Remote URL parsing +// --------------------------------------------------------------------------- + +/// Parse a Bitbucket Cloud remote URL into BitbucketRemote. +/// Supports SSH and HTTPS forms for bitbucket.org. +pub fn parse_bitbucket_remote(url: &str) -> Option { + // SSH: git@bitbucket.org:workspace/repo.git + if url.starts_with("git@bitbucket.org:") { + let path = url.strip_prefix("git@bitbucket.org:")?; + let path = path.trim_end_matches(".git"); + let mut parts = path.splitn(2, '/'); + let workspace = parts.next()?.to_owned(); + let repo_slug = parts.next()?.to_owned(); + return Some(BitbucketRemote { + workspace, + repo_slug, + }); + } + + // HTTPS: https://bitbucket.org/workspace/repo.git + let rest = url + .strip_prefix("https://bitbucket.org/") + .or_else(|| url.strip_prefix("http://bitbucket.org/"))?; + let path = rest.trim_end_matches(".git"); + let mut parts = path.splitn(2, '/'); + let workspace = parts.next()?.to_owned(); + let repo_slug = parts.next()?.to_owned(); + Some(BitbucketRemote { + workspace, + repo_slug, + }) +} + +/// Check if a remote URL is a Bitbucket Cloud URL. +pub fn is_bitbucket_remote(url: &str) -> bool { + url.contains("bitbucket.org") +} + +// --------------------------------------------------------------------------- +// BitbucketClient +// --------------------------------------------------------------------------- + +/// Authenticated Bitbucket Cloud API client. +pub struct BitbucketClient { + client: Client, + remote: BitbucketRemote, + token: String, +} + +impl BitbucketClient { + /// Create a new client for the given remote URL. + /// Returns `Ok(None)` when no Bitbucket token is available or URL is not Bitbucket. + pub fn new(remote_url: &str) -> Result> { + if !is_bitbucket_remote(remote_url) { + return Ok(None); + } + + let remote = parse_bitbucket_remote(remote_url).ok_or_else(|| { + anyhow::anyhow!( + "could not parse workspace/repo from Bitbucket remote URL: {}", + remote_url + ) + })?; + + let token = match crate::env::bitbucket_token() { + Some(t) => t, + None => return Ok(None), + }; + + let client = Client::builder() + .timeout(Duration::from_secs(30)) + .connect_timeout(Duration::from_secs(10)) + .user_agent("git-parsec") + .build() + .context("failed to build HTTP client")?; + + Ok(Some(Self { + client, + remote, + token, + })) + } + + /// Access the parsed remote info. + pub fn remote(&self) -> &BitbucketRemote { + &self.remote + } + + /// Repo API path prefix. + fn repo_url(&self) -> String { + format!( + "https://api.bitbucket.org/2.0/repositories/{}/{}", + self.remote.workspace, self.remote.repo_slug + ) + } + + fn auth_get(&self, url: &str) -> reqwest::RequestBuilder { + self.client + .get(url) + .bearer_auth(&self.token) + .header("Accept", "application/json") + } + + fn auth_post(&self, url: &str) -> reqwest::RequestBuilder { + self.client + .post(url) + .bearer_auth(&self.token) + .header("Accept", "application/json") + .header("Content-Type", "application/json") + } + + // -- API methods --------------------------------------------------------- + + /// Create a pull request. + pub async fn create_pr( + &self, + branch: &str, + base: &str, + title: &str, + description: &str, + _draft: bool, // Bitbucket Cloud doesn't support draft PRs natively + ) -> Result { + let url = format!("{}/pullrequests", self.repo_url()); + + let payload = serde_json::json!({ + "title": title, + "description": description, + "source": { + "branch": { "name": branch } + }, + "destination": { + "branch": { "name": base } + }, + "close_source_branch": true + }); + + let response = self + .auth_post(&url) + .json(&payload) + .send() + .await + .context("Failed to send PR creation request to Bitbucket")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let pr: ApiPr = response + .json() + .await + .context("Failed to parse Bitbucket API response")?; + + let id = pr.id.unwrap_or(0); + let html_url = pr + .links + .and_then(|l| l.html) + .and_then(|h| h.href) + .unwrap_or_else(|| { + format!( + "https://bitbucket.org/{}/{}/pull-requests/{}", + self.remote.workspace, self.remote.repo_slug, id + ) + }); + + Ok(PrResult { url: html_url, id }) + } + + /// Find an open PR by source branch name. + pub async fn find_pr_by_branch(&self, branch: &str) -> Result> { + let url = format!( + "{}/pullrequests?q=source.branch.name=\"{}\" AND state=\"OPEN\"", + self.repo_url(), + branch + ); + + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to query Bitbucket PRs")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let list: ApiPrList = response.json().await?; + Ok(list.values.and_then(|v| v.first().and_then(|pr| pr.id))) + } + + /// Get PR status by ID. + pub async fn get_pr_status(&self, pr_id: u64) -> Result { + let url = format!("{}/pullrequests/{}", self.repo_url(), pr_id); + + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to fetch Bitbucket PR")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let pr: ApiPr = response.json().await?; + let id = pr.id.unwrap_or(pr_id); + let html_url = pr + .links + .and_then(|l| l.html) + .and_then(|h| h.href) + .unwrap_or_default(); + + Ok(PrStatus { + id, + title: pr.title.unwrap_or_default(), + state: pr.state.unwrap_or_else(|| "unknown".to_string()), + url: html_url, + }) + } + + /// Merge a PR. + pub async fn merge_pr(&self, pr_id: u64, strategy: &str) -> Result { + let url = format!("{}/pullrequests/{}/merge", self.repo_url(), pr_id); + + // Bitbucket merge strategies: merge_commit, squash, fast_forward + let bb_strategy = match strategy { + "squash" => "squash", + "rebase" => "fast_forward", + _ => "merge_commit", + }; + + let payload = serde_json::json!({ + "merge_strategy": bb_strategy, + "close_source_branch": true + }); + + let response = self + .auth_post(&url) + .json(&payload) + .send() + .await + .context("Failed to merge Bitbucket PR")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket merge failed ({}): {}", status, body); + } + + Ok(MergeResult { + merged: true, + message: format!("PR #{} merged via {}", pr_id, bb_strategy), + }) + } + + /// Get pipeline status for a branch. + #[allow(dead_code)] + pub async fn get_pipelines(&self, branch: &str) -> Result> { + let url = format!( + "{}/pipelines/?sort=-created_on&pagelen=5&target.ref_name={}", + self.repo_url(), + branch + ); + + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to fetch Bitbucket pipelines")?; + + if !response.status().is_success() { + // Pipelines may not be enabled — return empty + return Ok(Vec::new()); + } + + let list: ApiPipelineList = response.json().await?; + let pipelines = list + .values + .unwrap_or_default() + .into_iter() + .map(|p| { + let state_name = p + .state + .as_ref() + .and_then(|s| s.name.clone()) + .unwrap_or_else(|| "unknown".to_string()); + let result_name = p + .state + .as_ref() + .and_then(|s| s.result.as_ref()) + .and_then(|r| r.name.clone()); + let uuid = p.uuid.unwrap_or_default(); + let ref_name = p + .target + .and_then(|t| t.ref_name) + .unwrap_or_else(|| branch.to_string()); + let pipeline_url = format!( + "https://bitbucket.org/{}/{}/pipelines/results/{}", + self.remote.workspace, + self.remote.repo_slug, + uuid.trim_matches(|c| c == '{' || c == '}') + ); + PipelineStatus { + name: format!("pipeline ({})", ref_name), + state: state_name, + result: result_name, + url: Some(pipeline_url), + } + }) + .collect(); + + Ok(pipelines) + } +} diff --git a/src/cli/commands/pr.rs b/src/cli/commands/pr.rs index c56b197..436fe03 100644 --- a/src/cli/commands/pr.rs +++ b/src/cli/commands/pr.rs @@ -1,8 +1,9 @@ use std::path::Path; -use anyhow::{Context, Result}; +use anyhow::{bail, Context, Result}; use colored::Colorize; +use crate::bitbucket; use crate::config::ParsecConfig; use crate::errors::ErrorCode; use crate::git; @@ -140,27 +141,52 @@ pub async fn pr_status(repo: &Path, ticket: Option<&str>, mode: Mode) -> Result< all_entries.push((ws.ticket.clone(), pr_number, String::new())); } } + } else if let Some(bb) = bitbucket::BitbucketClient::new(&remote_url)? { + for ws in &workspaces { + if let Ok(Some(pr_id)) = bb.find_pr_by_branch(&ws.branch).await { + all_entries.push((ws.ticket.clone(), pr_id, String::new())); + } + } } if all_entries.is_empty() { if let Some(t) = ticket { - bail_code!(ErrorCode::E010, "no PR found for {t}. Ship it first with `parsec ship {t}`, or check your GitHub token."); + bail_code!(ErrorCode::E010, "no PR found for {t}. Ship it first with `parsec ship {t}`, or check your forge token."); } else { - bail_code!(ErrorCode::E010, "no PRs found. Ship a ticket first with `parsec ship`, or check your GitHub token."); + bail_code!(ErrorCode::E010, "no PRs found. Ship a ticket first with `parsec ship`, or check your forge token."); } } } - let gh = github::GitHubClient::new(&remote_url, &config)?.ok_or_else(|| { - anyhow::Error::from(crate::errors::ParsecError::new( - ErrorCode::E001, - "no GitHub token found. Set PARSEC_GITHUB_TOKEN.", - )) - })?; + // Try GitHub first, then Bitbucket let mut statuses = Vec::new(); - for (ticket_id, pr_number, _url) in &all_entries { - let status = gh.get_pr_status(*pr_number).await?; - statuses.push((ticket_id.clone(), status)); + if let Some(gh) = github::GitHubClient::new(&remote_url, &config)? { + for (ticket_id, pr_number, _url) in &all_entries { + let status = gh.get_pr_status(*pr_number).await?; + statuses.push((ticket_id.clone(), status)); + } + } else if let Some(bb) = bitbucket::BitbucketClient::new(&remote_url)? { + for (ticket_id, pr_id, _url) in &all_entries { + let bb_status = bb.get_pr_status(*pr_id).await?; + // Map to github::PrStatus for output compatibility + statuses.push(( + ticket_id.clone(), + github::PrStatus { + number: bb_status.id, + title: bb_status.title, + state: bb_status.state.to_lowercase(), + mergeable: None, + ci_status: "unknown".to_string(), + review_status: "unknown".to_string(), + url: bb_status.url, + }, + )); + } + } else { + bail_code!( + ErrorCode::E001, + "no forge token found. Set PARSEC_GITHUB_TOKEN or PARSEC_BITBUCKET_TOKEN." + ); } output::print_pr_status(&statuses, mode); @@ -178,15 +204,20 @@ pub async fn merge( let config = ParsecConfig::load()?; let repo_root = git::get_main_repo_root(repo).or_else(|_| git::get_repo_root(repo))?; let remote_url = git::run_output(repo, &["remote", "get-url", "origin"])?; - let gh = github::GitHubClient::new(&remote_url, &config)?.ok_or_else(|| { - anyhow::Error::from(crate::errors::ParsecError::new( - ErrorCode::E001, - "no GitHub token found. Set PARSEC_GITHUB_TOKEN.", - )) - })?; let oplog = crate::oplog::OpLog::load(&repo_root)?; let manager = WorktreeManager::new(repo, &config)?; + // Detect forge: GitHub or Bitbucket + let has_github = github::GitHubClient::new(&remote_url, &config)?.is_some(); + let has_bitbucket = !has_github && bitbucket::BitbucketClient::new(&remote_url)?.is_some(); + + if !has_github && !has_bitbucket { + bail_code!( + ErrorCode::E001, + "no forge token found. Set PARSEC_GITHUB_TOKEN or PARSEC_BITBUCKET_TOKEN." + ); + } + // Resolve ticket let ticket_id = if let Some(t) = ticket { t.to_string() @@ -218,17 +249,74 @@ pub async fn merge( let ws = manager.get(&ticket_id).with_context(|| { format!("ticket {ticket_id} not found in active workspaces or oplog") })?; - gh.find_pr_by_branch(&ws.branch) - .await? - .ok_or_else(|| { + if has_github { + let gh = github::GitHubClient::new(&remote_url, &config)?.unwrap(); + gh.find_pr_by_branch(&ws.branch).await?.ok_or_else(|| { anyhow::anyhow!( - "no open PR found for {ticket_id} (branch '{}'). Either ship it with `parsec ship {ticket_id}`, or check that PARSEC_GITHUB_TOKEN is set.", + "no open PR found for {ticket_id} (branch '{}'). Ship it first.", ws.branch ) })? + } else { + let bb = bitbucket::BitbucketClient::new(&remote_url)?.unwrap(); + bb.find_pr_by_branch(&ws.branch).await?.ok_or_else(|| { + anyhow::anyhow!( + "no open PR found for {ticket_id} (branch '{}'). Ship it first.", + ws.branch + ) + })? + } } }; + // Bitbucket merge path + if has_bitbucket { + let bb = bitbucket::BitbucketClient::new(&remote_url)?.unwrap(); + let method = if rebase { "rebase" } else { "squash" }; + match bb.merge_pr(pr_number, method).await { + Ok(mr) => { + if mode == Mode::Human { + println!("Merged PR #{} ({})", pr_number, mr.message); + } else if mode == Mode::Json { + println!( + "{}", + serde_json::json!({ + "ticket": ticket_id, + "pr_number": pr_number, + "merged": mr.merged, + "method": method, + }) + ); + } + } + Err(e) => { + bail!("Bitbucket merge failed: {e}"); + } + } + + // Auto-transition ticket status + if let Some(ref auto) = config.tracker.auto_transition { + if let Some(ref status) = auto.on_merge { + tracker::try_transition(&config, &ticket_id, status).await; + } + } + + if let Err(e) = crate::oplog::record( + &repo_root, + crate::oplog::OpKind::Clean, + Some(&ticket_id), + &format!("Merged PR #{} ({})", pr_number, method), + None, + ) { + eprintln!("warning: failed to write oplog: {e}"); + } + + return Ok(()); + } + + // GitHub merge path + let gh = github::GitHubClient::new(&remote_url, &config)?.unwrap(); + // Idempotency: check if PR is already merged/closed if let Ok(status) = gh.get_pr_status(pr_number).await { if status.state == "closed" { diff --git a/src/cli/commands/ship.rs b/src/cli/commands/ship.rs index ceb0eb7..4c62abc 100644 --- a/src/cli/commands/ship.rs +++ b/src/cli/commands/ship.rs @@ -2,6 +2,7 @@ use std::path::Path; use anyhow::{Context, Result}; +use crate::bitbucket; use crate::config::ParsecConfig; use crate::errors::ErrorCode; use crate::git; @@ -221,8 +222,38 @@ pub async fn ship( } } } + } else if let Some(bb) = bitbucket::BitbucketClient::new(remote_url)? { + // No GitHub token — try Bitbucket + if let Ok(Some(existing_pr)) = bb.find_pr_by_branch(&result.branch).await { + let pr_url = format!( + "https://bitbucket.org/{}/{}/pull-requests/{}", + bb.remote().workspace, + bb.remote().repo_slug, + existing_pr + ); + result.pr_url = Some(pr_url); + } else { + match bb + .create_pr( + &result.branch, + &result.base_branch, + &pr_title, + &pr_body, + draft || config.ship.draft, + ) + .await + { + Ok(pr) => { + result.pr_url = Some(pr.url); + } + Err(e) => { + eprintln!("error: Bitbucket PR creation failed: {e}"); + pr_failed = true; + } + } + } } else { - // No GitHub token — try GitLab + // No GitHub/Bitbucket token — try GitLab match gitlab::create_mr( remote_url, &result.branch, @@ -239,7 +270,7 @@ pub async fn ship( Ok(None) => { eprintln!( "note: PR/MR creation skipped — no token found.\n \ - Set PARSEC_GITHUB_TOKEN or PARSEC_GITLAB_TOKEN to enable." + Set PARSEC_GITHUB_TOKEN, PARSEC_BITBUCKET_TOKEN, or PARSEC_GITLAB_TOKEN to enable." ); pr_failed = true; } diff --git a/src/env.rs b/src/env.rs index 045318c..b5b8605 100644 --- a/src/env.rs +++ b/src/env.rs @@ -71,6 +71,25 @@ pub fn gitlab_token() -> Option { None } +// --------------------------------------------------------------------------- +// Bitbucket +// --------------------------------------------------------------------------- + +pub const PARSEC_BITBUCKET_TOKEN: &str = "PARSEC_BITBUCKET_TOKEN"; +pub const BITBUCKET_TOKEN: &str = "BITBUCKET_TOKEN"; + +/// Resolve Bitbucket token. Priority: PARSEC_BITBUCKET_TOKEN > BITBUCKET_TOKEN +pub fn bitbucket_token() -> Option { + for var in [PARSEC_BITBUCKET_TOKEN, BITBUCKET_TOKEN] { + if let Ok(token) = std::env::var(var) { + if !token.is_empty() { + return Some(token); + } + } + } + None +} + // --------------------------------------------------------------------------- // Offline mode // --------------------------------------------------------------------------- diff --git a/src/main.rs b/src/main.rs index 440dca7..654d49f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,7 @@ #[macro_use] mod errors; +mod bitbucket; mod cli; mod config; mod conflict; From b1adb06de2ac64a537c6f1a6035756cd13d8b5e1 Mon Sep 17 00:00:00 2001 From: erish Date: Thu, 30 Apr 2026 16:25:03 +0900 Subject: [PATCH 13/18] ci: add release/** branches to CI triggers (#277) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test: add 11 integration tests for improved coverage Add tests for: quiet mode, --title flag, --base branch, stacked worktrees (--on), ship --dry-run, doctor command, log filtering, orphan cleanup, rename, start --branch, and JSON error format. Total coverage: 26 → 37 tests. Co-Authored-By: Claude Opus 4.6 * ci: add release/** branches to CI workflow triggers Enable CI checks for PRs targeting release/* branches so that v1.0 milestone work gets the same CI validation as develop/main. Co-Authored-By: Claude Opus 4.6 * style: fix cargo fmt formatting in tests Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .github/workflows/ci.yml | 4 +- tests/cli_tests.rs | 352 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 354 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4ed0490..6ba2807 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,9 +2,9 @@ name: CI on: push: - branches: [main, develop] + branches: [main, develop, 'release/**'] pull_request: - branches: [main, develop] + branches: [main, develop, 'release/**'] env: CARGO_TERM_COLOR: always diff --git a/tests/cli_tests.rs b/tests/cli_tests.rs index a4b7622..f8b2a63 100644 --- a/tests/cli_tests.rs +++ b/tests/cli_tests.rs @@ -702,3 +702,355 @@ fn test_root_prints_repo_path() { .success() .stdout(predicate::str::is_empty().not()); } + +// --------------------------------------------------------------------------- +// quiet mode +// --------------------------------------------------------------------------- + +#[test] +fn test_quiet_mode_suppresses_output() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "QUIET-001", "--repo", repo_path]) + .assert() + .success(); + + // --quiet list should produce no stdout output (empty or whitespace-only). + let output = parsec() + .args(["--quiet", "list", "--repo", repo_path]) + .output() + .unwrap(); + assert!(output.status.success()); + assert!( + String::from_utf8(output.stdout).unwrap().trim().is_empty(), + "quiet mode should suppress normal output" + ); +} + +// --------------------------------------------------------------------------- +// start --title +// --------------------------------------------------------------------------- + +#[test] +fn test_start_with_title() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args([ + "start", + "TITLE-001", + "--title", + "My Custom Title", + "--repo", + repo_path, + ]) + .assert() + .success(); + + // The title should be stored in state.json. + let state_path = repo.path().join(".parsec").join("state.json"); + let contents = std::fs::read_to_string(&state_path).unwrap(); + assert!( + contents.contains("My Custom Title"), + "state.json should store the custom title" + ); +} + +// --------------------------------------------------------------------------- +// start --base (custom base branch) +// --------------------------------------------------------------------------- + +#[test] +fn test_start_with_base_branch() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + // Create and push a "develop" branch. + StdCommand::new("git") + .args(["checkout", "-b", "develop"]) + .current_dir(repo.path()) + .output() + .unwrap(); + StdCommand::new("git") + .args(["commit", "--allow-empty", "-m", "develop init"]) + .current_dir(repo.path()) + .output() + .unwrap(); + StdCommand::new("git") + .args(["push", "origin", "develop"]) + .current_dir(repo.path()) + .output() + .unwrap(); + StdCommand::new("git") + .args(["checkout", "main"]) + .current_dir(repo.path()) + .output() + .unwrap(); + + parsec() + .args([ + "start", "BASE-001", "--base", "develop", "--repo", repo_path, + ]) + .assert() + .success(); + + // Verify the worktree was created with develop as base. + let state_path = repo.path().join(".parsec").join("state.json"); + let contents = std::fs::read_to_string(&state_path).unwrap(); + let state: serde_json::Value = serde_json::from_str(&contents).unwrap(); + assert_eq!( + state["workspaces"]["BASE-001"]["base_branch"] + .as_str() + .unwrap(), + "develop" + ); +} + +// --------------------------------------------------------------------------- +// start --on (stacked worktrees) +// --------------------------------------------------------------------------- + +#[test] +fn test_start_stacked_on_parent() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + // Start a parent worktree. + parsec() + .args(["start", "STACK-PARENT", "--repo", repo_path]) + .assert() + .success(); + + // Start a child stacked on the parent. + parsec() + .args([ + "start", + "STACK-CHILD", + "--on", + "STACK-PARENT", + "--repo", + repo_path, + ]) + .assert() + .success(); + + // Verify parent_ticket is set in state.json. + let state_path = repo.path().join(".parsec").join("state.json"); + let contents = std::fs::read_to_string(&state_path).unwrap(); + let state: serde_json::Value = serde_json::from_str(&contents).unwrap(); + assert_eq!( + state["workspaces"]["STACK-CHILD"]["parent_ticket"] + .as_str() + .unwrap(), + "STACK-PARENT" + ); +} + +// --------------------------------------------------------------------------- +// ship --dry-run +// --------------------------------------------------------------------------- + +#[test] +fn test_ship_dry_run() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "DRY-SHIP", "--repo", repo_path]) + .assert() + .success(); + + // --dry-run should succeed without actually shipping. + parsec() + .args(["--dry-run", "ship", "DRY-SHIP", "--repo", repo_path]) + .assert() + .success(); + + // The worktree should still be listed (not cleaned up). + parsec() + .args(["list", "--repo", repo_path]) + .assert() + .success() + .stdout(predicate::str::contains("DRY-SHIP")); +} + +// --------------------------------------------------------------------------- +// doctor +// --------------------------------------------------------------------------- + +#[test] +fn test_doctor_succeeds() { + let repo = setup_repo(); + parsec() + .args(["doctor", "--repo", repo.path().to_str().unwrap()]) + .assert() + .success(); +} + +// --------------------------------------------------------------------------- +// log --ticket filter +// --------------------------------------------------------------------------- + +#[test] +fn test_log_filter_by_ticket() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "LOGF-A", "--repo", repo_path]) + .assert() + .success(); + + parsec() + .args(["start", "LOGF-B", "--repo", repo_path]) + .assert() + .success(); + + // Filter log to LOGF-A only (ticket is a positional arg). + let output = parsec() + .args(["log", "LOGF-A", "--repo", repo_path]) + .output() + .unwrap(); + + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("LOGF-A"), "filtered log should show LOGF-A"); + assert!( + !stdout.contains("LOGF-B"), + "filtered log should not show LOGF-B" + ); +} + +// --------------------------------------------------------------------------- +// clean --orphans +// --------------------------------------------------------------------------- + +#[test] +fn test_clean_orphans() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "ORPHAN-001", "--repo", repo_path]) + .assert() + .success(); + + // Manually delete the worktree directory to create an orphan. + let state_path = repo.path().join(".parsec").join("state.json"); + let state_contents = std::fs::read_to_string(&state_path).unwrap(); + let state: serde_json::Value = serde_json::from_str(&state_contents).unwrap(); + let wt_path = state["workspaces"]["ORPHAN-001"]["path"].as_str().unwrap(); + + // Remove the worktree directory and prune git worktree list. + std::fs::remove_dir_all(wt_path).unwrap(); + StdCommand::new("git") + .args(["worktree", "prune"]) + .current_dir(repo.path()) + .output() + .unwrap(); + + // clean --orphans should remove the stale entry. + parsec() + .args(["clean", "--orphans", "--repo", repo_path]) + .assert() + .success(); + + // The orphaned workspace should be gone. + parsec() + .args(["list", "--repo", repo_path]) + .assert() + .success() + .stdout(predicate::str::contains("ORPHAN-001").not()); +} + +// --------------------------------------------------------------------------- +// rename +// --------------------------------------------------------------------------- + +#[test] +fn test_rename_ticket() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "OLD-NAME", "--repo", repo_path]) + .assert() + .success(); + + parsec() + .args(["rename", "OLD-NAME", "NEW-NAME", "--repo", repo_path]) + .assert() + .success(); + + // OLD-NAME gone, NEW-NAME present. + parsec() + .args(["list", "--repo", repo_path]) + .assert() + .success() + .stdout(predicate::str::contains("OLD-NAME").not()) + .stdout(predicate::str::contains("NEW-NAME")); +} + +// --------------------------------------------------------------------------- +// start --branch (existing branch) +// --------------------------------------------------------------------------- + +#[test] +fn test_start_with_existing_branch() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + // Create an existing branch. + StdCommand::new("git") + .args(["branch", "my-existing-branch"]) + .current_dir(repo.path()) + .output() + .unwrap(); + + parsec() + .args([ + "start", + "EXIST-001", + "--branch", + "my-existing-branch", + "--repo", + repo_path, + ]) + .assert() + .success(); + + // Should be listed with the correct branch. + let state_path = repo.path().join(".parsec").join("state.json"); + let contents = std::fs::read_to_string(&state_path).unwrap(); + assert!(contents.contains("my-existing-branch")); +} + +// --------------------------------------------------------------------------- +// JSON error format +// --------------------------------------------------------------------------- + +#[test] +fn test_json_error_format() { + let repo = setup_repo(); + let output = parsec() + .args([ + "--json", + "ship", + "NONEXIST", + "--repo", + repo.path().to_str().unwrap(), + ]) + .output() + .unwrap(); + + assert!(!output.status.success()); + + let stdout = String::from_utf8(output.stdout).unwrap(); + let parsed: serde_json::Value = + serde_json::from_str(&stdout).expect("JSON error output must be parseable"); + assert_eq!(parsed["error"].as_bool().unwrap(), true); + assert!(parsed.get("code").is_some()); + assert!(parsed.get("message").is_some()); +} From 0a1c5ba7e11d326ab095679eb930f7bf5513dbed Mon Sep 17 00:00:00 2001 From: erish Date: Thu, 30 Apr 2026 16:25:07 +0900 Subject: [PATCH 14/18] test: add 11 integration tests for improved coverage (#278) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test: add 11 integration tests for improved coverage Add tests for: quiet mode, --title flag, --base branch, stacked worktrees (--on), ship --dry-run, doctor command, log filtering, orphan cleanup, rename, start --branch, and JSON error format. Total coverage: 26 → 37 tests. Co-Authored-By: Claude Opus 4.6 * style: fix cargo fmt formatting in tests Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 From 31fe36be1568212ffe493b414f64ab41eed0d948 Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 4 May 2026 09:19:03 +0900 Subject: [PATCH 15/18] [207] feat: selective build cache sharing between worktrees (#280) Add `[worktree]` config section so `parsec start` can reuse build artifacts from the main repo instead of forcing a cold rebuild. [worktree] shared_cache = ["target", "node_modules", ".venv"] cache_strategy = "symlink" # "symlink" | "copy" For each entry, the source `/` is shared into the new worktree. Missing sources and pre-existing destinations are skipped. Sharing failures are logged but never fail the worktree itself. Default is an empty list, preserving prior behavior. Co-authored-by: Claude Opus 4.7 (1M context) --- CHANGELOG.md | 9 ++ README.md | 11 ++ schema/parsec-config.schema.json | 19 +++ src/config/mod.rs | 1 + src/config/settings.rs | 102 +++++++++++++ src/worktree/cache_share.rs | 244 +++++++++++++++++++++++++++++++ src/worktree/manager.rs | 9 ++ src/worktree/mod.rs | 1 + tests/cli_tests.rs | 141 +++++++++++++++++- 9 files changed, 536 insertions(+), 1 deletion(-) create mode 100644 src/worktree/cache_share.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index b326ae4..c7d7b6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,15 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Added +- `[worktree]` config section with `shared_cache` and `cache_strategy` for + selective build cache sharing between worktrees. New worktrees can now reuse + `target/`, `node_modules/`, `.venv/`, etc. from the main repo via symlink + (default) or recursive copy, eliminating cold-build cost on `parsec start` + (#207). + ## [0.3.3] - 2026-04-22 ### Added diff --git a/README.md b/README.md index 32a3ee5..2f3500d 100644 --- a/README.md +++ b/README.md @@ -1281,6 +1281,17 @@ layout = "sibling" base_dir = ".parsec/workspaces" branch_prefix = "feature/" +[worktree] +# Directories to share from the main repo into new worktrees so that +# `parsec start` doesn't trigger a cold rebuild. Default is empty (no sharing). +shared_cache = ["target", "node_modules", ".venv"] +# "symlink" (default): fast, zero-disk overhead. All worktrees and the main +# repo share one cache — running parallel builds of the +# same artifact may race. +# "copy": full copy at start time. Each worktree gets an independent cache, +# no race risk, but uses more disk and the initial copy takes time. +cache_strategy = "symlink" + [tracker] # "jira" | "github" | "gitlab" | "none" provider = "jira" diff --git a/schema/parsec-config.schema.json b/schema/parsec-config.schema.json index d6b6956..e5ddf70 100644 --- a/schema/parsec-config.schema.json +++ b/schema/parsec-config.schema.json @@ -37,6 +37,25 @@ }, "additionalProperties": false }, + "worktree": { + "type": "object", + "description": "Build cache sharing for new worktrees", + "properties": { + "shared_cache": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Directories to share from the main repo into new worktrees (e.g. target, node_modules, .venv)" + }, + "cache_strategy": { + "type": "string", + "enum": ["symlink", "copy"], + "default": "symlink", + "description": "How to share cache directories: symlink (fast, shared state) or copy (independent state)" + } + }, + "additionalProperties": false + }, "tracker": { "type": "object", "description": "Issue tracker integration settings", diff --git a/src/config/mod.rs b/src/config/mod.rs index 8fc4636..9e4a140 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -1,5 +1,6 @@ mod settings; +pub use settings::CacheStrategy; pub use settings::ParsecConfig; pub use settings::TrackerProvider; pub use settings::WorktreeLayout; diff --git a/src/config/settings.rs b/src/config/settings.rs index 90fab68..14dda07 100644 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -84,6 +84,44 @@ impl std::fmt::Display for WorktreeLayout { } } +// --------------------------------------------------------------------------- +// CacheStrategy +// --------------------------------------------------------------------------- + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +#[derive(Default)] +pub enum CacheStrategy { + #[default] + Symlink, + Copy, +} + +impl std::fmt::Display for CacheStrategy { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CacheStrategy::Symlink => write!(f, "symlink"), + CacheStrategy::Copy => write!(f, "copy"), + } + } +} + +// --------------------------------------------------------------------------- +// WorktreeConfig +// --------------------------------------------------------------------------- + +/// Build cache sharing settings for new worktrees. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct WorktreeConfig { + /// Directories to share from the main repo into new worktrees + /// (e.g. ["target", "node_modules", ".venv"]). + #[serde(default)] + pub shared_cache: Vec, + /// How to share the directories: symlink (default) or copy. + #[serde(default)] + pub cache_strategy: CacheStrategy, +} + // --------------------------------------------------------------------------- // WorkspaceConfig // --------------------------------------------------------------------------- @@ -356,6 +394,8 @@ pub struct ParsecConfig { #[serde(default)] pub workspace: WorkspaceConfig, #[serde(default)] + pub worktree: WorktreeConfig, + #[serde(default)] pub tracker: TrackerConfig, #[serde(default)] pub ship: ShipConfig, @@ -586,3 +626,65 @@ impl ParsecConfig { Ok(config) } } + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn worktree_config_defaults_when_section_missing() { + let config: ParsecConfig = toml::from_str("").unwrap(); + assert!(config.worktree.shared_cache.is_empty()); + assert_eq!(config.worktree.cache_strategy, CacheStrategy::Symlink); + } + + #[test] + fn worktree_config_parses_full_section() { + let toml_str = r#" +[worktree] +shared_cache = ["target", ".venv"] +cache_strategy = "copy" +"#; + let config: ParsecConfig = toml::from_str(toml_str).unwrap(); + assert_eq!( + config.worktree.shared_cache, + vec!["target".to_string(), ".venv".to_string()] + ); + assert_eq!(config.worktree.cache_strategy, CacheStrategy::Copy); + } + + #[test] + fn worktree_config_partial_fields_take_defaults() { + let toml_str = r#" +[worktree] +shared_cache = ["target"] +"#; + let config: ParsecConfig = toml::from_str(toml_str).unwrap(); + assert_eq!(config.worktree.shared_cache, vec!["target".to_string()]); + assert_eq!(config.worktree.cache_strategy, CacheStrategy::Symlink); + } + + #[test] + fn worktree_config_unknown_strategy_is_error() { + let toml_str = r#" +[worktree] +cache_strategy = "hardlink" +"#; + let err = toml::from_str::(toml_str).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("hardlink") || msg.to_lowercase().contains("variant"), + "expected error to mention bad variant, got: {msg}" + ); + } + + #[test] + fn cache_strategy_symlink_is_default() { + let strategy = CacheStrategy::default(); + assert_eq!(strategy, CacheStrategy::Symlink); + } +} diff --git a/src/worktree/cache_share.rs b/src/worktree/cache_share.rs new file mode 100644 index 0000000..a6e4c75 --- /dev/null +++ b/src/worktree/cache_share.rs @@ -0,0 +1,244 @@ +use std::path::Path; + +use crate::config::CacheStrategy; + +/// Share build-cache directories from the main repo into a freshly created +/// worktree. Each entry is processed independently; failures are logged but +/// never propagated, so a flaky cache share never breaks `parsec start`. +/// +/// - Source path is `/`. Missing → skip. +/// - Destination path is `/`. Already exists → skip. +/// - `Symlink` creates a symlink to the absolute source path; `Copy` does a +/// recursive copy using stdlib only (no extra dependency). +pub fn share_cache( + repo_root: &Path, + worktree_path: &Path, + entries: &[String], + strategy: CacheStrategy, +) { + if entries.is_empty() { + return; + } + + for entry in entries { + if entry.is_empty() || entry.contains("..") { + eprintln!("warning: skipping invalid shared_cache entry {:?}", entry); + continue; + } + + let src = repo_root.join(entry); + let dest = worktree_path.join(entry); + + if !src.exists() { + eprintln!( + "info: shared_cache: source '{}' does not exist in main repo, skipping", + entry + ); + continue; + } + + if dest.exists() || dest.symlink_metadata().is_ok() { + eprintln!( + "info: shared_cache: destination '{}' already exists in worktree, skipping", + entry + ); + continue; + } + + // Ensure dest's parent exists (for nested entries like "a/b/target"). + if let Some(parent) = dest.parent() { + if !parent.exists() { + if let Err(e) = std::fs::create_dir_all(parent) { + eprintln!( + "warning: shared_cache: failed to create parent for '{}': {e}", + entry + ); + continue; + } + } + } + + let abs_src = match dunce::canonicalize(&src) { + Ok(p) => p, + Err(e) => { + eprintln!( + "warning: shared_cache: cannot resolve source '{}': {e}", + entry + ); + continue; + } + }; + + let result = match strategy { + CacheStrategy::Symlink => create_symlink(&abs_src, &dest), + CacheStrategy::Copy => copy_recursive(&abs_src, &dest), + }; + + match result { + Ok(()) => { + eprintln!( + "info: shared_cache: {} '{}' from {} -> {}", + strategy, + entry, + abs_src.display(), + dest.display() + ); + } + Err(e) => { + eprintln!( + "warning: shared_cache: failed to share '{}' ({}): {e}", + entry, strategy + ); + } + } + } +} + +#[cfg(unix)] +fn create_symlink(src: &Path, dest: &Path) -> std::io::Result<()> { + std::os::unix::fs::symlink(src, dest) +} + +#[cfg(windows)] +fn create_symlink(src: &Path, dest: &Path) -> std::io::Result<()> { + if src.is_dir() { + std::os::windows::fs::symlink_dir(src, dest) + } else { + std::os::windows::fs::symlink_file(src, dest) + } +} + +fn copy_recursive(src: &Path, dest: &Path) -> std::io::Result<()> { + let metadata = std::fs::symlink_metadata(src)?; + let file_type = metadata.file_type(); + + if file_type.is_symlink() { + // Follow symlinks during copy (resolving once); fall back to plain copy. + let target = std::fs::read_link(src)?; + let resolved = if target.is_absolute() { + target + } else { + src.parent().unwrap_or(Path::new(".")).join(target) + }; + return copy_recursive(&resolved, dest); + } + + if file_type.is_dir() { + std::fs::create_dir_all(dest)?; + for entry in std::fs::read_dir(src)? { + let entry = entry?; + let child_src = entry.path(); + let child_dest = dest.join(entry.file_name()); + copy_recursive(&child_src, &child_dest)?; + } + Ok(()) + } else { + if let Some(parent) = dest.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::copy(src, dest).map(|_| ()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::TempDir; + + fn read_file(p: &Path) -> String { + fs::read_to_string(p).unwrap() + } + + fn make_dirs() -> (TempDir, std::path::PathBuf, std::path::PathBuf) { + let tmp = TempDir::new().unwrap(); + let repo = tmp.path().join("repo"); + let wt = tmp.path().join("worktree"); + fs::create_dir_all(&repo).unwrap(); + fs::create_dir_all(&wt).unwrap(); + (tmp, repo, wt) + } + + #[test] + fn symlink_strategy_links_existing_dir() { + let (_tmp, repo, wt) = make_dirs(); + fs::create_dir_all(repo.join("target")).unwrap(); + fs::write(repo.join("target/build.txt"), "hello").unwrap(); + + share_cache(&repo, &wt, &["target".to_string()], CacheStrategy::Symlink); + + let dest = wt.join("target"); + assert!(dest.exists()); + let meta = fs::symlink_metadata(&dest).unwrap(); + assert!(meta.file_type().is_symlink(), "should be a symlink"); + assert_eq!(read_file(&dest.join("build.txt")), "hello"); + } + + #[test] + fn copy_strategy_creates_real_dir() { + let (_tmp, repo, wt) = make_dirs(); + fs::create_dir_all(repo.join("target/nested")).unwrap(); + fs::write(repo.join("target/a.txt"), "alpha").unwrap(); + fs::write(repo.join("target/nested/b.txt"), "beta").unwrap(); + + share_cache(&repo, &wt, &["target".to_string()], CacheStrategy::Copy); + + let dest = wt.join("target"); + assert!(dest.exists()); + let meta = fs::symlink_metadata(&dest).unwrap(); + assert!(!meta.file_type().is_symlink(), "must not be a symlink"); + assert!(meta.is_dir()); + assert_eq!(read_file(&dest.join("a.txt")), "alpha"); + assert_eq!(read_file(&dest.join("nested/b.txt")), "beta"); + } + + #[test] + fn missing_entry_is_skipped_silently() { + let (_tmp, repo, wt) = make_dirs(); + + share_cache( + &repo, + &wt, + &["does-not-exist".to_string()], + CacheStrategy::Symlink, + ); + + assert!(!wt.join("does-not-exist").exists()); + } + + #[test] + fn existing_dest_is_not_overwritten() { + let (_tmp, repo, wt) = make_dirs(); + fs::create_dir_all(repo.join("target")).unwrap(); + fs::write(repo.join("target/from_repo.txt"), "repo").unwrap(); + fs::create_dir_all(wt.join("target")).unwrap(); + fs::write(wt.join("target/preexisting.txt"), "keep").unwrap(); + + share_cache(&repo, &wt, &["target".to_string()], CacheStrategy::Copy); + + // Pre-existing content untouched, repo content not copied in. + assert!(wt.join("target/preexisting.txt").exists()); + assert!(!wt.join("target/from_repo.txt").exists()); + } + + #[test] + fn empty_list_is_noop() { + let (_tmp, repo, wt) = make_dirs(); + share_cache(&repo, &wt, &[], CacheStrategy::Symlink); + // Just verify nothing was created in the worktree. + let entries: Vec<_> = fs::read_dir(&wt).unwrap().collect(); + assert!(entries.is_empty()); + } + + #[test] + fn path_traversal_entries_rejected() { + let (_tmp, repo, wt) = make_dirs(); + fs::create_dir_all(repo.join("evil")).unwrap(); + + share_cache(&repo, &wt, &["../evil".to_string()], CacheStrategy::Symlink); + + // Nothing should have been created. + let entries: Vec<_> = fs::read_dir(&wt).unwrap().collect(); + assert!(entries.is_empty()); + } +} diff --git a/src/worktree/manager.rs b/src/worktree/manager.rs index 4783dc5..bde0ffd 100644 --- a/src/worktree/manager.rs +++ b/src/worktree/manager.rs @@ -131,6 +131,15 @@ impl WorktreeManager { .save(&self.repo_root) .context("failed to save parsec state")?; + // Share build-cache directories from the main repo into the new worktree. + // Failures are logged but never propagated — the worktree itself succeeded. + super::cache_share::share_cache( + &self.repo_root, + &worktree_path, + &self.config.worktree.shared_cache, + self.config.worktree.cache_strategy, + ); + // Run post-create hooks if !self.config.hooks.post_create.is_empty() { let skip_prompt = std::env::var("PARSEC_YES") diff --git a/src/worktree/mod.rs b/src/worktree/mod.rs index 9501760..f204a09 100644 --- a/src/worktree/mod.rs +++ b/src/worktree/mod.rs @@ -1,3 +1,4 @@ +mod cache_share; mod lifecycle; mod manager; diff --git a/tests/cli_tests.rs b/tests/cli_tests.rs index f8b2a63..3bc79ee 100644 --- a/tests/cli_tests.rs +++ b/tests/cli_tests.rs @@ -1027,6 +1027,145 @@ fn test_start_with_existing_branch() { assert!(contents.contains("my-existing-branch")); } +// --------------------------------------------------------------------------- +// shared_cache (issue #207) +// --------------------------------------------------------------------------- + +/// Build a custom config dir containing a config.toml with the given body and +/// return its path. Caller must keep the TempDir alive. +fn write_config_toml(body: &str) -> TempDir { + let dir = TempDir::new().unwrap(); + std::fs::write(dir.path().join("config.toml"), body).unwrap(); + dir +} + +#[test] +fn test_shared_cache_symlink_creates_link() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path(); + + // Pre-populate a `target/` directory in the main repo with a build artifact. + std::fs::create_dir_all(repo_path.join("target")).unwrap(); + std::fs::write(repo_path.join("target").join("artifact.txt"), "pre-built").unwrap(); + + let config_dir = write_config_toml( + r#" +[worktree] +shared_cache = ["target"] +cache_strategy = "symlink" +"#, + ); + + let mut cmd = Command::cargo_bin("parsec").unwrap(); + cmd.env("PARSEC_CONFIG_DIR", config_dir.path()) + .args(["start", "CACHE-1", "--repo", repo_path.to_str().unwrap()]) + .assert() + .success(); + + // Worktree path follows sibling layout: /.CACHE-1 + let repo_name = repo_path.file_name().unwrap().to_string_lossy().to_string(); + let wt_path = repo_path + .parent() + .unwrap() + .join(format!("{}.CACHE-1", repo_name)); + let dest = wt_path.join("target"); + + assert!(dest.exists(), "worktree should have shared target/"); + let meta = std::fs::symlink_metadata(&dest).unwrap(); + assert!( + meta.file_type().is_symlink(), + "symlink strategy must produce a symlink, got: {:?}", + meta.file_type() + ); + let contents = std::fs::read_to_string(dest.join("artifact.txt")).unwrap(); + assert_eq!(contents, "pre-built"); +} + +#[test] +fn test_shared_cache_copy_creates_real_dir() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path(); + + std::fs::create_dir_all(repo_path.join("target").join("nested")).unwrap(); + std::fs::write(repo_path.join("target").join("a.txt"), "alpha").unwrap(); + std::fs::write( + repo_path.join("target").join("nested").join("b.txt"), + "beta", + ) + .unwrap(); + + let config_dir = write_config_toml( + r#" +[worktree] +shared_cache = ["target"] +cache_strategy = "copy" +"#, + ); + + let mut cmd = Command::cargo_bin("parsec").unwrap(); + cmd.env("PARSEC_CONFIG_DIR", config_dir.path()) + .args(["start", "CACHE-2", "--repo", repo_path.to_str().unwrap()]) + .assert() + .success(); + + let repo_name = repo_path.file_name().unwrap().to_string_lossy().to_string(); + let wt_path = repo_path + .parent() + .unwrap() + .join(format!("{}.CACHE-2", repo_name)); + let dest = wt_path.join("target"); + + assert!(dest.exists()); + let meta = std::fs::symlink_metadata(&dest).unwrap(); + assert!( + !meta.file_type().is_symlink(), + "copy strategy must NOT produce a symlink" + ); + assert!(meta.is_dir()); + assert_eq!( + std::fs::read_to_string(dest.join("a.txt")).unwrap(), + "alpha" + ); + assert_eq!( + std::fs::read_to_string(dest.join("nested").join("b.txt")).unwrap(), + "beta" + ); +} + +#[test] +fn test_shared_cache_missing_entry_skipped() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path(); + + // Don't pre-create `.venv` in the main repo. + let config_dir = write_config_toml( + r#" +[worktree] +shared_cache = [".venv"] +cache_strategy = "symlink" +"#, + ); + + let mut cmd = Command::cargo_bin("parsec").unwrap(); + cmd.env("PARSEC_CONFIG_DIR", config_dir.path()) + .args(["start", "CACHE-3", "--repo", repo_path.to_str().unwrap()]) + .assert() + .success(); + + let repo_name = repo_path.file_name().unwrap().to_string_lossy().to_string(); + let wt_path = repo_path + .parent() + .unwrap() + .join(format!("{}.CACHE-3", repo_name)); + + // Worktree was created (start succeeded), but `.venv` was simply skipped. + assert!(wt_path.exists(), "worktree should still be created"); + assert!( + !wt_path.join(".venv").exists(), + "missing source should NOT be linked into worktree" + ); +} + // --------------------------------------------------------------------------- // JSON error format // --------------------------------------------------------------------------- @@ -1050,7 +1189,7 @@ fn test_json_error_format() { let stdout = String::from_utf8(output.stdout).unwrap(); let parsed: serde_json::Value = serde_json::from_str(&stdout).expect("JSON error output must be parseable"); - assert_eq!(parsed["error"].as_bool().unwrap(), true); + assert!(parsed["error"].as_bool().unwrap()); assert!(parsed.get("code").is_some()); assert!(parsed.get("message").is_some()); } From 5fc0d4a5628598e5a681d901683c7f1bbc240a5d Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 4 May 2026 10:19:38 +0900 Subject: [PATCH 16/18] [279] feat: wire Bitbucket Pipelines into ci and pr-status (#282) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes the two gaps left after the Bitbucket Cloud forge work in #240: * `parsec ci` now dispatches on the origin remote — when it's Bitbucket, it resolves the PR's source branch and polls the Pipelines API (`/repositories/.../pipelines/`) instead of GitHub Checks. The result is shaped into the existing `CiStatus` struct so renderers, --watch, and exit-code logic stay forge-agnostic. * `parsec pr-status`'s Bitbucket branch no longer hardcodes "unknown" for ci_status / review_status. ci_status comes from the latest pipeline for the PR's source branch; review_status comes from the PR participants list. Both fall back to "unknown" on network error rather than failing the whole command. Vocabulary matches the GitHub path exactly: passing | failing | pending | no checks | unknown for CI; approved | changes_requested | pending | no reviews for review. New `BitbucketClient` helpers: `get_latest_pipeline_for_branch`, `get_pr_source_branch`, `get_pr_participants`. Pure mapping functions (`pipeline_to_ci_status`, `participants_to_review_status`) are unit tested directly. Adds `mockito` as a dev-dependency — the codebase had no HTTP mocking infra, and the new `tests/bitbucket_integration_tests.rs` needs it to verify the Bitbucket dispatch path end-to-end without hitting the real api.bitbucket.org. To make this testable, `BitbucketClient` honors a new `PARSEC_BITBUCKET_API_BASE` env var (also useful for future Bitbucket Server / Data Center support). Behaviour for GitHub repositories is unchanged. Test coverage: 20 unit tests for mappings + 5 integration tests (`pr-status` approved/changes_requested/no-pipeline cases, `ci` in-progress + failing-exit-code cases, plus an explicit "never call /repos/*" assertion to catch dispatch regressions). Co-authored-by: Claude Opus 4.7 (1M context) --- Cargo.toml | 1 + src/bitbucket/mod.rs | 324 ++++++++++++++++++- src/cli/commands/ci.rs | 111 ++++++- src/cli/commands/pr.rs | 22 +- src/env.rs | 11 + tests/bitbucket_integration_tests.rs | 458 +++++++++++++++++++++++++++ 6 files changed, 904 insertions(+), 23 deletions(-) create mode 100644 tests/bitbucket_integration_tests.rs diff --git a/Cargo.toml b/Cargo.toml index d8d8555..abb82b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,3 +40,4 @@ uuid = { version = "1", features = ["v4"] } assert_cmd = "2" predicates = "3" tempfile = "3" +mockito = "1" diff --git a/src/bitbucket/mod.rs b/src/bitbucket/mod.rs index 9d3805a..1466d6f 100644 --- a/src/bitbucket/mod.rs +++ b/src/bitbucket/mod.rs @@ -39,7 +39,6 @@ pub struct MergeResult { /// A single pipeline step/result #[derive(Debug, Clone)] -#[allow(dead_code)] pub struct PipelineStatus { pub name: String, pub state: String, @@ -47,6 +46,17 @@ pub struct PipelineStatus { pub url: Option, } +/// A PR participant (reviewer/commenter) used for review_status mapping. +#[derive(Debug, Clone)] +pub struct Participant { + /// Bitbucket review state: "approved", "changes_requested", or None. + pub state: Option, + /// Convenience boolean flag from the Bitbucket API. + pub approved: bool, + /// Role: "REVIEWER" or "PARTICIPANT". + pub role: Option, +} + /// Parsed Bitbucket remote info #[derive(Debug, Clone)] pub struct BitbucketRemote { @@ -64,6 +74,30 @@ struct ApiPr { title: Option, state: Option, links: Option, + #[serde(default)] + source: Option, + #[serde(default)] + participants: Option>, +} + +#[derive(Deserialize)] +struct ApiPrEndpoint { + branch: Option, +} + +#[derive(Deserialize)] +struct ApiBranch { + name: Option, +} + +#[derive(Deserialize)] +struct ApiParticipant { + #[serde(default)] + state: Option, + #[serde(default)] + approved: Option, + #[serde(default)] + role: Option, } #[derive(Deserialize)] @@ -157,11 +191,15 @@ pub fn is_bitbucket_remote(url: &str) -> bool { // BitbucketClient // --------------------------------------------------------------------------- +/// Default Bitbucket Cloud API base URL (without trailing slash). +const DEFAULT_API_BASE: &str = "https://api.bitbucket.org/2.0"; + /// Authenticated Bitbucket Cloud API client. pub struct BitbucketClient { client: Client, remote: BitbucketRemote, token: String, + api_base: String, } impl BitbucketClient { @@ -191,10 +229,14 @@ impl BitbucketClient { .build() .context("failed to build HTTP client")?; + let api_base = + crate::env::bitbucket_api_base().unwrap_or_else(|| DEFAULT_API_BASE.to_string()); + Ok(Some(Self { client, remote, token, + api_base, })) } @@ -206,8 +248,8 @@ impl BitbucketClient { /// Repo API path prefix. fn repo_url(&self) -> String { format!( - "https://api.bitbucket.org/2.0/repositories/{}/{}", - self.remote.workspace, self.remote.repo_slug + "{}/repositories/{}/{}", + self.api_base, self.remote.workspace, self.remote.repo_slug ) } @@ -376,7 +418,6 @@ impl BitbucketClient { } /// Get pipeline status for a branch. - #[allow(dead_code)] pub async fn get_pipelines(&self, branch: &str) -> Result> { let url = format!( "{}/pipelines/?sort=-created_on&pagelen=5&target.ref_name={}", @@ -433,4 +474,279 @@ impl BitbucketClient { Ok(pipelines) } + + /// Get the latest pipeline (most recently created) for a branch, if any. + pub async fn get_latest_pipeline_for_branch( + &self, + branch: &str, + ) -> Result> { + Ok(self.get_pipelines(branch).await?.into_iter().next()) + } + + /// Fetch the source branch name for a PR. + pub async fn get_pr_source_branch(&self, pr_id: u64) -> Result> { + let url = format!("{}/pullrequests/{}", self.repo_url(), pr_id); + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to fetch Bitbucket PR")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let pr: ApiPr = response.json().await?; + Ok(pr + .source + .and_then(|s| s.branch) + .and_then(|b| b.name) + .filter(|n| !n.is_empty())) + } + + /// Fetch participants for a PR. Returns an empty vec when the PR has no participants + /// or the API call fails (callers may interpret this as "unknown / pending"). + pub async fn get_pr_participants(&self, pr_id: u64) -> Result> { + let url = format!("{}/pullrequests/{}", self.repo_url(), pr_id); + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to fetch Bitbucket PR")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let pr: ApiPr = response.json().await?; + Ok(pr + .participants + .unwrap_or_default() + .into_iter() + .map(|p| Participant { + state: p.state, + approved: p.approved.unwrap_or(false), + role: p.role, + }) + .collect()) + } +} + +// --------------------------------------------------------------------------- +// Pure mapping functions (forge-agnostic vocabulary) +// --------------------------------------------------------------------------- + +/// Map a Bitbucket pipeline (state + optional result) to the same `ci_status` +/// vocabulary that the GitHub path emits: `passing` | `failing` | `pending` +/// | `no checks` | `unknown`. +/// +/// Bitbucket pipeline `state.name` values: `PENDING`, `IN_PROGRESS`, +/// `COMPLETED`, `HALTED`, `STOPPED`. When `state.name == "COMPLETED"`, +/// `state.result.name` is one of `SUCCESSFUL`, `FAILED`, `ERROR`, +/// `STOPPED`, `EXPIRED`. +pub fn pipeline_to_ci_status(state: &str, result: Option<&str>) -> String { + match state.to_ascii_uppercase().as_str() { + "COMPLETED" => match result.map(|r| r.to_ascii_uppercase()).as_deref() { + Some("SUCCESSFUL") => "passing".to_string(), + Some("FAILED") | Some("ERROR") | Some("STOPPED") | Some("EXPIRED") => { + "failing".to_string() + } + _ => "unknown".to_string(), + }, + "PENDING" | "IN_PROGRESS" | "HALTED" => "pending".to_string(), + _ => "unknown".to_string(), + } +} + +/// Convenience wrapper: map an optional `PipelineStatus` to a `ci_status` string. +/// `None` → `"no checks"` (consistent with GitHub's empty-checks rendering). +pub fn pipeline_status_to_ci_string(p: Option<&PipelineStatus>) -> String { + match p { + Some(p) => pipeline_to_ci_status(&p.state, p.result.as_deref()), + None => "no checks".to_string(), + } +} + +/// Map Bitbucket PR participants to the same `review_status` vocabulary the +/// GitHub path emits: `approved` | `changes_requested` | `pending` | `no reviews`. +/// +/// - Any participant with `state == "changes_requested"` → `changes_requested`. +/// - Else any participant with `approved == true` (or `state == "approved"`) → `approved`. +/// - Else if there are any reviewer-role participants → `pending`. +/// - Else → `no reviews`. +pub fn participants_to_review_status(participants: &[Participant]) -> String { + if participants + .iter() + .any(|p| p.state.as_deref() == Some("changes_requested")) + { + return "changes_requested".to_string(); + } + if participants + .iter() + .any(|p| p.approved || p.state.as_deref() == Some("approved")) + { + return "approved".to_string(); + } + if participants + .iter() + .any(|p| p.role.as_deref() == Some("REVIEWER")) + { + return "pending".to_string(); + } + "no reviews".to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + // -- pipeline_to_ci_status ------------------------------------------------- + + #[test] + fn pipeline_completed_successful_is_passing() { + assert_eq!( + pipeline_to_ci_status("COMPLETED", Some("SUCCESSFUL")), + "passing" + ); + } + + #[test] + fn pipeline_completed_failed_is_failing() { + assert_eq!( + pipeline_to_ci_status("COMPLETED", Some("FAILED")), + "failing" + ); + } + + #[test] + fn pipeline_completed_error_is_failing() { + assert_eq!(pipeline_to_ci_status("COMPLETED", Some("ERROR")), "failing"); + } + + #[test] + fn pipeline_completed_expired_is_failing() { + assert_eq!( + pipeline_to_ci_status("COMPLETED", Some("EXPIRED")), + "failing" + ); + } + + #[test] + fn pipeline_in_progress_is_pending() { + assert_eq!(pipeline_to_ci_status("IN_PROGRESS", None), "pending"); + } + + #[test] + fn pipeline_pending_is_pending() { + assert_eq!(pipeline_to_ci_status("PENDING", None), "pending"); + } + + #[test] + fn pipeline_halted_is_pending() { + assert_eq!(pipeline_to_ci_status("HALTED", None), "pending"); + } + + #[test] + fn pipeline_unknown_state_is_unknown() { + assert_eq!(pipeline_to_ci_status("WAT", None), "unknown"); + } + + #[test] + fn pipeline_state_is_case_insensitive() { + assert_eq!( + pipeline_to_ci_status("completed", Some("successful")), + "passing" + ); + } + + #[test] + fn pipeline_status_to_ci_string_none_is_no_checks() { + assert_eq!(pipeline_status_to_ci_string(None), "no checks"); + } + + #[test] + fn pipeline_status_to_ci_string_passes_through() { + let p = PipelineStatus { + name: "x".into(), + state: "COMPLETED".into(), + result: Some("SUCCESSFUL".into()), + url: None, + }; + assert_eq!(pipeline_status_to_ci_string(Some(&p)), "passing"); + } + + // -- participants_to_review_status ----------------------------------------- + + fn p(state: Option<&str>, approved: bool, role: Option<&str>) -> Participant { + Participant { + state: state.map(|s| s.to_string()), + approved, + role: role.map(|s| s.to_string()), + } + } + + #[test] + fn empty_participants_is_no_reviews() { + assert_eq!(participants_to_review_status(&[]), "no reviews"); + } + + #[test] + fn changes_requested_dominates() { + let parts = vec![ + p(Some("approved"), true, Some("REVIEWER")), + p(Some("changes_requested"), false, Some("REVIEWER")), + ]; + assert_eq!(participants_to_review_status(&parts), "changes_requested"); + } + + #[test] + fn approved_state() { + let parts = vec![p(Some("approved"), true, Some("REVIEWER"))]; + assert_eq!(participants_to_review_status(&parts), "approved"); + } + + #[test] + fn approved_via_boolean_only() { + let parts = vec![p(None, true, Some("REVIEWER"))]; + assert_eq!(participants_to_review_status(&parts), "approved"); + } + + #[test] + fn reviewer_no_action_is_pending() { + let parts = vec![p(None, false, Some("REVIEWER"))]; + assert_eq!(participants_to_review_status(&parts), "pending"); + } + + #[test] + fn only_non_reviewer_participants_is_no_reviews() { + // PR author / commenter shows up as PARTICIPANT with no review state. + let parts = vec![p(None, false, Some("PARTICIPANT"))]; + assert_eq!(participants_to_review_status(&parts), "no reviews"); + } + + // -- remote URL parsing (existing behavior, sanity-check) ------------------ + + #[test] + fn parse_https_remote() { + let r = parse_bitbucket_remote("https://bitbucket.org/myws/myrepo.git").unwrap(); + assert_eq!(r.workspace, "myws"); + assert_eq!(r.repo_slug, "myrepo"); + } + + #[test] + fn parse_ssh_remote() { + let r = parse_bitbucket_remote("git@bitbucket.org:myws/myrepo.git").unwrap(); + assert_eq!(r.workspace, "myws"); + assert_eq!(r.repo_slug, "myrepo"); + } + + #[test] + fn is_bitbucket_remote_detects_url() { + assert!(is_bitbucket_remote("git@bitbucket.org:foo/bar.git")); + assert!(!is_bitbucket_remote("git@github.com:foo/bar.git")); + } } diff --git a/src/cli/commands/ci.rs b/src/cli/commands/ci.rs index 459ed09..f20cb3c 100644 --- a/src/cli/commands/ci.rs +++ b/src/cli/commands/ci.rs @@ -2,6 +2,7 @@ use std::path::Path; use anyhow::{Context, Result}; +use crate::bitbucket; use crate::config::ParsecConfig; use crate::errors::ErrorCode; use crate::git; @@ -9,20 +10,38 @@ use crate::github; use crate::output::{self, Mode}; use crate::worktree::WorktreeManager; +/// Forge backend selected for `parsec ci` based on the origin remote URL. +enum Forge { + GitHub(github::GitHubClient), + Bitbucket(bitbucket::BitbucketClient), +} + pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mode) -> Result<()> { let config = ParsecConfig::load()?; let repo_root = git::get_main_repo_root(repo).or_else(|_| git::get_repo_root(repo))?; let remote_url = git::run_output(repo, &["remote", "get-url", "origin"])?; - let gh = github::GitHubClient::new(&remote_url, &config)? - .ok_or_else(|| anyhow::anyhow!("no GitHub token found. Set PARSEC_GITHUB_TOKEN."))?; + + // Dispatch on remote type — GitHub takes priority when both tokens exist. + let forge = if let Some(gh) = github::GitHubClient::new(&remote_url, &config)? { + Forge::GitHub(gh) + } else if let Some(bb) = bitbucket::BitbucketClient::new(&remote_url)? { + Forge::Bitbucket(bb) + } else { + bail_code!( + ErrorCode::E001, + "no forge token found. Set PARSEC_GITHUB_TOKEN or PARSEC_BITBUCKET_TOKEN." + ); + }; + let oplog = crate::oplog::OpLog::load(&repo_root)?; let manager = WorktreeManager::new(repo, &config)?; - // Collect (ticket_id, pr_number) pairs to check + // Collect (ticket_id, pr_number) pairs to check. Bitbucket "PR id" and + // GitHub "PR number" share the same numeric encoding in the oplog (last + // path segment of the URL), so the resolution logic is forge-agnostic. let mut targets: Vec<(String, u64)> = Vec::new(); if all { - // All shipped entries with PR numbers from oplog let entries: Vec<_> = oplog .get_entries(None) .into_iter() @@ -41,10 +60,8 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod } targets = entries; } else if !tickets.is_empty() { - // Multiple tickets specified for t in tickets { let ticket_id = t.to_string(); - // First check if there's a shipped PR in the oplog let shipped_pr = oplog .get_entries(Some(&ticket_id)) .into_iter() @@ -58,11 +75,14 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod if let Some(pr_number) = shipped_pr { targets.push((ticket_id, pr_number)); } else { - // Not shipped yet — try to find an open PR by branch name let ws = manager.get(&ticket_id).with_context(|| { format!("ticket {ticket_id} not found in active workspaces or oplog") })?; - match gh.find_pr_by_branch(&ws.branch).await? { + let found = match &forge { + Forge::GitHub(gh) => gh.find_pr_by_branch(&ws.branch).await?, + Forge::Bitbucket(bb) => bb.find_pr_by_branch(&ws.branch).await?, + }; + match found { Some(pr_number) => targets.push((ticket_id, pr_number)), None => { bail_code!( @@ -85,7 +105,6 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod })?; let ticket_id = found.ticket; - // First check if there's a shipped PR in the oplog let shipped_pr = oplog .get_entries(Some(&ticket_id)) .into_iter() @@ -99,11 +118,14 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod if let Some(pr_number) = shipped_pr { targets.push((ticket_id, pr_number)); } else { - // Not shipped yet — try to find an open PR by branch name let ws = manager.get(&ticket_id).with_context(|| { format!("ticket {ticket_id} not found in active workspaces or oplog") })?; - match gh.find_pr_by_branch(&ws.branch).await? { + let pr_lookup = match &forge { + Forge::GitHub(gh) => gh.find_pr_by_branch(&ws.branch).await?, + Forge::Bitbucket(bb) => bb.find_pr_by_branch(&ws.branch).await?, + }; + match pr_lookup { Some(pr_number) => targets.push((ticket_id, pr_number)), None => { anyhow::bail!( @@ -118,11 +140,13 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod let mut statuses: Vec<(String, crate::github::CiStatus)> = Vec::new(); for (ticket_id, pr_number) in &targets { - let ci = gh.get_check_runs(*pr_number).await?; + let ci = match &forge { + Forge::GitHub(gh) => gh.get_check_runs(*pr_number).await?, + Forge::Bitbucket(bb) => fetch_bitbucket_ci(bb, *pr_number).await?, + }; statuses.push((ticket_id.clone(), ci)); } - // In watch + human mode, clear screen before redraw if watch && mode == Mode::Human { print!("\x1B[2J\x1B[H"); } @@ -130,8 +154,6 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod output::print_ci_status(&statuses, mode); if !watch || mode != Mode::Human { - // JSON/quiet mode prints once even with --watch - // Determine exit code based on overall status let has_failure = statuses.iter().any(|(_t, ci)| ci.overall == "failing"); if has_failure { bail_code!( @@ -146,7 +168,6 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod return Ok(()); } - // Check if all checks are completed let all_completed = statuses .iter() .all(|(_t, ci)| ci.checks.iter().all(|c| c.status == "completed")); @@ -169,3 +190,61 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod tokio::time::sleep(std::time::Duration::from_secs(5)).await; } } + +/// Fetch the latest pipeline for the PR's source branch and shape it into the +/// same `CiStatus` struct GitHub emits, so the renderer stays forge-agnostic. +async fn fetch_bitbucket_ci( + bb: &bitbucket::BitbucketClient, + pr_id: u64, +) -> Result { + let branch = bb.get_pr_source_branch(pr_id).await?.unwrap_or_default(); + + // No branch resolvable → return an empty CiStatus rather than erroring; + // matches the behaviour of GitHub's "no checks" path. + if branch.is_empty() { + return Ok(crate::github::CiStatus { + pr_number: pr_id, + head_sha: String::new(), + overall: "no checks".to_string(), + checks: Vec::new(), + }); + } + + let pipeline = bb.get_latest_pipeline_for_branch(&branch).await?; + let overall = bitbucket::pipeline_status_to_ci_string(pipeline.as_ref()); + + // Project a single CheckRun representing the pipeline so that --watch's + // "all completed" check works the same way it does for GitHub. Pipelines + // in pending/in_progress map to status "in_progress"; everything else to + // "completed". + let checks: Vec = match pipeline { + Some(p) => { + let upper = p.state.to_ascii_uppercase(); + let status = match upper.as_str() { + "PENDING" | "IN_PROGRESS" | "HALTED" => "in_progress", + _ => "completed", + }; + let conclusion = match overall.as_str() { + "passing" => Some("success".to_string()), + "failing" => Some("failure".to_string()), + _ => None, + }; + vec![crate::github::CheckRun { + name: p.name, + status: status.to_string(), + conclusion, + started_at: None, + completed_at: None, + html_url: p.url, + }] + } + None => Vec::new(), + }; + + Ok(crate::github::CiStatus { + pr_number: pr_id, + head_sha: String::new(), + overall, + checks, + }) +} diff --git a/src/cli/commands/pr.rs b/src/cli/commands/pr.rs index 436fe03..6907aaa 100644 --- a/src/cli/commands/pr.rs +++ b/src/cli/commands/pr.rs @@ -168,7 +168,23 @@ pub async fn pr_status(repo: &Path, ticket: Option<&str>, mode: Mode) -> Result< } else if let Some(bb) = bitbucket::BitbucketClient::new(&remote_url)? { for (ticket_id, pr_id, _url) in &all_entries { let bb_status = bb.get_pr_status(*pr_id).await?; - // Map to github::PrStatus for output compatibility + + // Resolve CI from Bitbucket Pipelines for the PR's source branch. + // Any failure (no token scope, pipelines disabled, network) falls + // back to "unknown" rather than failing the whole pr-status call. + let ci_status = match bb.get_pr_source_branch(*pr_id).await { + Ok(Some(branch)) => match bb.get_latest_pipeline_for_branch(&branch).await { + Ok(pipeline) => bitbucket::pipeline_status_to_ci_string(pipeline.as_ref()), + Err(_) => "unknown".to_string(), + }, + _ => "unknown".to_string(), + }; + + let review_status = match bb.get_pr_participants(*pr_id).await { + Ok(participants) => bitbucket::participants_to_review_status(&participants), + Err(_) => "unknown".to_string(), + }; + statuses.push(( ticket_id.clone(), github::PrStatus { @@ -176,8 +192,8 @@ pub async fn pr_status(repo: &Path, ticket: Option<&str>, mode: Mode) -> Result< title: bb_status.title, state: bb_status.state.to_lowercase(), mergeable: None, - ci_status: "unknown".to_string(), - review_status: "unknown".to_string(), + ci_status, + review_status, url: bb_status.url, }, )); diff --git a/src/env.rs b/src/env.rs index b5b8605..de824fa 100644 --- a/src/env.rs +++ b/src/env.rs @@ -77,6 +77,9 @@ pub fn gitlab_token() -> Option { pub const PARSEC_BITBUCKET_TOKEN: &str = "PARSEC_BITBUCKET_TOKEN"; pub const BITBUCKET_TOKEN: &str = "BITBUCKET_TOKEN"; +/// Override Bitbucket Cloud API base URL. Useful for tests (mock servers) and +/// future Bitbucket Server / Data Center support. +pub const PARSEC_BITBUCKET_API_BASE: &str = "PARSEC_BITBUCKET_API_BASE"; /// Resolve Bitbucket token. Priority: PARSEC_BITBUCKET_TOKEN > BITBUCKET_TOKEN pub fn bitbucket_token() -> Option { @@ -90,6 +93,14 @@ pub fn bitbucket_token() -> Option { None } +/// Bitbucket API base URL override (no trailing slash). Returns None when unset. +pub fn bitbucket_api_base() -> Option { + std::env::var(PARSEC_BITBUCKET_API_BASE) + .ok() + .filter(|v| !v.is_empty()) + .map(|v| v.trim_end_matches('/').to_string()) +} + // --------------------------------------------------------------------------- // Offline mode // --------------------------------------------------------------------------- diff --git a/tests/bitbucket_integration_tests.rs b/tests/bitbucket_integration_tests.rs new file mode 100644 index 0000000..833fb24 --- /dev/null +++ b/tests/bitbucket_integration_tests.rs @@ -0,0 +1,458 @@ +//! End-to-end tests that exercise the Bitbucket Cloud code path of `parsec ci` +//! and `parsec pr-status` against a mocked Bitbucket API server. +//! +//! These tests verify (a) the dispatch logic actually picks the Bitbucket path +//! instead of GitHub when the origin remote is Bitbucket, and (b) the response +//! mapping (ci_status, review_status) reflects the live API payload. + +use assert_cmd::Command; +use mockito::{Matcher, Server, ServerGuard}; +use std::process::Command as StdCommand; +use tempfile::TempDir; + +const WORKSPACE: &str = "fakews"; +const REPO_SLUG: &str = "fakerepo"; + +/// Initialize a git repo whose `origin` points at a Bitbucket Cloud URL. +/// No actual remote backs the URL — these tests only exercise API calls, +/// never `git fetch` / `git push`. +fn setup_bitbucket_repo() -> TempDir { + let dir = TempDir::new().unwrap(); + let p = dir.path(); + + StdCommand::new("git") + .args(["init"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args(["config", "user.name", "Test"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args(["config", "user.email", "test@test.com"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args(["checkout", "-b", "main"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args(["commit", "--allow-empty", "-m", "init"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args([ + "remote", + "add", + "origin", + &format!("git@bitbucket.org:{}/{}.git", WORKSPACE, REPO_SLUG), + ]) + .current_dir(p) + .output() + .unwrap(); + + dir +} + +/// Drop a fake oplog Ship entry so `parsec pr-status` / `parsec ci` resolve +/// the PR number from the log without needing a live workspace. +fn write_oplog_ship_entry(repo: &std::path::Path, ticket: &str, pr_number: u64) { + let parsec_dir = repo.join(".parsec"); + std::fs::create_dir_all(&parsec_dir).unwrap(); + let body = serde_json::json!({ + "entries": [{ + "id": 1, + "op": "ship", + "ticket": ticket, + "detail": format!( + "Shipped branch 'feature/{0}' -> https://bitbucket.org/{1}/{2}/pull-requests/{3}", + ticket, WORKSPACE, REPO_SLUG, pr_number + ), + "timestamp": "2024-01-01T00:00:00Z", + "undo_info": null + }] + }); + std::fs::write( + parsec_dir.join("oplog.json"), + serde_json::to_string_pretty(&body).unwrap(), + ) + .unwrap(); +} + +fn parsec(server: &ServerGuard) -> Command { + let mut cmd = Command::cargo_bin("parsec").unwrap(); + // Isolate from any user-level config (e.g. existing default_base) so the + // subprocess sees only the env we provide. + cmd.env("PARSEC_CONFIG_DIR", "/tmp/parsec-test-nonexistent") + .env("PARSEC_BITBUCKET_TOKEN", "fake-token-for-test") + .env("PARSEC_BITBUCKET_API_BASE", server.url()) + // Defensive: don't let an inherited GitHub token cause the dispatcher + // to pick the GitHub forge for our bitbucket.org-style remote. + .env_remove("PARSEC_GITHUB_TOKEN") + .env_remove("GITHUB_TOKEN") + .env_remove("GH_TOKEN"); + cmd +} + +/// Build the API path prefix used in mock URLs. +fn pr_path(pr_id: u64) -> String { + format!( + "/repositories/{}/{}/pullrequests/{}", + WORKSPACE, REPO_SLUG, pr_id + ) +} + +fn pipelines_path() -> String { + format!("/repositories/{}/{}/pipelines/", WORKSPACE, REPO_SLUG) +} + +// --------------------------------------------------------------------------- +// pr-status +// --------------------------------------------------------------------------- + +#[test] +fn pr_status_bitbucket_maps_ci_and_review_from_api() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + // PR JSON is reused for get_pr_status, get_pr_source_branch, and + // get_pr_participants — they all hit the same endpoint. Two reviewers: + // one approved, one no-action → review_status == "approved". + let pr_body = serde_json::json!({ + "id": 42, + "title": "Add Bitbucket pipelines support", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/42" } }, + "source": { "branch": { "name": "feature/BB-1" } }, + "participants": [ + { "state": "approved", "approved": true, "role": "REVIEWER" }, + { "state": null, "approved": false, "role": "REVIEWER" } + ] + }); + let pr_mock = server + .mock("GET", pr_path(42).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .expect_at_least(2) // status + source-branch + participants + .create(); + + // Pipeline for the source branch: COMPLETED + SUCCESSFUL → ci_status "passing". + let pipelines_body = serde_json::json!({ + "values": [{ + "uuid": "{abc-123}", + "state": { "name": "COMPLETED", "result": { "name": "SUCCESSFUL" } }, + "target": { "ref_name": "feature/BB-1" } + }] + }); + let pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::AllOf(vec![Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/BB-1".into(), + )])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pipelines_body.to_string()) + .create(); + + write_oplog_ship_entry(repo.path(), "BB-1", 42); + + let output = parsec(&server) + .args(["--json", "pr-status", "BB-1", "--repo", repo_path]) + .output() + .unwrap(); + + let stdout = String::from_utf8(output.stdout.clone()).unwrap(); + let stderr = String::from_utf8(output.stderr.clone()).unwrap(); + assert!( + output.status.success(), + "pr-status should succeed.\nstdout:\n{stdout}\nstderr:\n{stderr}", + ); + + let parsed: serde_json::Value = + serde_json::from_str(&stdout).expect("pr-status --json must produce valid JSON"); + let arr = parsed.as_array().expect("output should be a JSON array"); + assert_eq!(arr.len(), 1); + let entry = &arr[0]; + assert_eq!(entry["ticket"], "BB-1"); + assert_eq!(entry["pr_number"], 42); + assert_eq!(entry["state"], "open"); + assert_eq!( + entry["ci_status"], "passing", + "ci_status should come from the Bitbucket Pipelines mock" + ); + assert_eq!( + entry["review_status"], "approved", + "review_status should reflect the participants payload" + ); + + pr_mock.assert(); + pipeline_mock.assert(); +} + +#[test] +fn pr_status_bitbucket_no_pipeline_yet_is_no_checks() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + let pr_body = serde_json::json!({ + "id": 7, + "title": "Edge case PR", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/7" } }, + "source": { "branch": { "name": "feature/BB-7" } }, + "participants": [] + }); + let _pr_mock = server + .mock("GET", pr_path(7).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .expect_at_least(2) + .create(); + + // No pipeline runs yet for this branch. + let _pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/BB-7".into(), + )) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(r#"{"values":[]}"#) + .create(); + + write_oplog_ship_entry(repo.path(), "BB-7", 7); + + let output = parsec(&server) + .args(["--json", "pr-status", "BB-7", "--repo", repo_path]) + .output() + .unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + let entry = &parsed.as_array().unwrap()[0]; + assert_eq!( + entry["ci_status"], "no checks", + "no pipeline runs → ci_status \"no checks\"" + ); + assert_eq!( + entry["review_status"], "no reviews", + "no participants → review_status \"no reviews\"" + ); +} + +#[test] +fn pr_status_bitbucket_changes_requested_review() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + let pr_body = serde_json::json!({ + "id": 9, + "title": "Needs work", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/9" } }, + "source": { "branch": { "name": "feature/BB-9" } }, + "participants": [ + { "state": "approved", "approved": true, "role": "REVIEWER" }, + { "state": "changes_requested", "approved": false, "role": "REVIEWER" } + ] + }); + let _pr_mock = server + .mock("GET", pr_path(9).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .expect_at_least(2) + .create(); + + let pipelines_body = serde_json::json!({ + "values": [{ + "uuid": "{xyz-9}", + "state": { "name": "COMPLETED", "result": { "name": "FAILED" } }, + "target": { "ref_name": "feature/BB-9" } + }] + }); + let _pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/BB-9".into(), + )) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pipelines_body.to_string()) + .create(); + + write_oplog_ship_entry(repo.path(), "BB-9", 9); + + let output = parsec(&server) + .args(["--json", "pr-status", "BB-9", "--repo", repo_path]) + .output() + .unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + let entry = &parsed.as_array().unwrap()[0]; + assert_eq!(entry["ci_status"], "failing"); + assert_eq!(entry["review_status"], "changes_requested"); +} + +// --------------------------------------------------------------------------- +// ci +// --------------------------------------------------------------------------- + +#[test] +fn ci_bitbucket_uses_pipelines_endpoint() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + // PR endpoint must respond so `fetch_bitbucket_ci` can resolve the source branch. + let pr_body = serde_json::json!({ + "id": 100, + "title": "CI test", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/100" } }, + "source": { "branch": { "name": "feature/CI-1" } } + }); + let _pr_mock = server + .mock("GET", pr_path(100).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .create(); + + // Pipeline run that's still in progress. + let pipelines_body = serde_json::json!({ + "values": [{ + "uuid": "{ci-1}", + "state": { "name": "IN_PROGRESS", "result": null }, + "target": { "ref_name": "feature/CI-1" } + }] + }); + let pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/CI-1".into(), + )) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pipelines_body.to_string()) + .expect_at_least(1) + .create(); + + // Crucial: assert that the GitHub commit-status / check-runs endpoints are + // never hit. Mockito returns 501 for unmatched paths by default; that + // would blow up the request. We use a catch-all for /repos/* to detect + // accidental GitHub dispatch and fail loudly. + let github_mock = server + .mock("GET", Matcher::Regex("^/repos/.*".into())) + .with_status(500) + .with_body("github endpoint should not be hit for a Bitbucket remote") + .expect(0) + .create(); + + write_oplog_ship_entry(repo.path(), "CI-1", 100); + + let output = parsec(&server) + .args(["--json", "ci", "CI-1", "--repo", repo_path]) + .output() + .unwrap(); + + let stdout = String::from_utf8(output.stdout.clone()).unwrap(); + let stderr = String::from_utf8(output.stderr.clone()).unwrap(); + assert!( + output.status.success(), + "ci should succeed for an in-progress pipeline.\nstdout:\n{stdout}\nstderr:\n{stderr}", + ); + + let parsed: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + let entry = &parsed.as_array().unwrap()[0]; + assert_eq!(entry["ticket"], "CI-1"); + assert_eq!(entry["pr_number"], 100); + assert_eq!( + entry["overall"], "pending", + "in-progress pipeline → overall \"pending\"" + ); + + pipeline_mock.assert(); + github_mock.assert(); +} + +#[test] +fn ci_bitbucket_failing_pipeline_exits_nonzero() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + let pr_body = serde_json::json!({ + "id": 200, + "title": "Broken build", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/200" } }, + "source": { "branch": { "name": "feature/CI-2" } } + }); + let _pr_mock = server + .mock("GET", pr_path(200).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .create(); + + let pipelines_body = serde_json::json!({ + "values": [{ + "uuid": "{ci-2}", + "state": { "name": "COMPLETED", "result": { "name": "FAILED" } }, + "target": { "ref_name": "feature/CI-2" } + }] + }); + let _pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/CI-2".into(), + )) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pipelines_body.to_string()) + .create(); + + write_oplog_ship_entry(repo.path(), "CI-2", 200); + + let output = parsec(&server) + .args(["--json", "ci", "CI-2", "--repo", repo_path]) + .output() + .unwrap(); + + // Failing CI is a hard error (E002) — exit code is non-zero, but the JSON + // status line is printed to stdout before the error JSON is appended. + assert!( + !output.status.success(), + "failing pipeline should exit non-zero" + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + // First line: the CI status array. Second line: the JSON error envelope. + let first_line = stdout.lines().next().expect("expected at least one line"); + let parsed: serde_json::Value = serde_json::from_str(first_line).unwrap(); + let entry = &parsed.as_array().unwrap()[0]; + assert_eq!(entry["overall"], "failing"); +} From 87a40ed214bdee36d289f71be801ec35c5a4fe98 Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 4 May 2026 17:39:31 +0900 Subject: [PATCH 17/18] chore: prepare v0.4.0 release (#283) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Version bump and full documentation pass for the v0.4 milestone. Changes: - Cargo.toml: 0.3.3 → 0.4.0 (release workflow reads this to tag and publish). - CHANGELOG.md: convert [Unreleased] → [0.4.0] - 2026-05-04 with full Added/Changed/Fixed/CI sections covering all 16 commits in the milestone: Bitbucket Cloud forge (#240), Bitbucket Pipelines CI (#279), parsec compress (#236), ship --template (#233), ship --reviewer/--label and stack --submit (#261), stack navigation comments (#234), draft default (#238), [worktree] shared cache (#207), offline mode (#237), observability lite (#166), config JSON Schema (#239), Windows CI (#257) + UNC fix (#263), 11 integration tests (#278), release/** CI trigger (#277), and the docs cross-link update (#265). - README.md: features list refreshed; new sections for parsec compress, parsec config schema, log --export JSONL, [worktree], [observability] config, Bitbucket env vars (PARSEC_BITBUCKET_TOKEN, BITBUCKET_TOKEN, PARSEC_BITBUCKET_API_BASE), PARSEC_OFFLINE; comparison table updated to 1st-class Bitbucket forge support. - docs/index.html: 5 new feature cards (Bitbucket Cloud + Pipelines, Compress, Offline, Observability, Config Schema). Global options banner gains --offline. - docs/reference/index.html: new compress command block, --offline in global options, log --export documented with JSONL example, config schema subcommand documented with schemastore reference. - docs/guide/index.html: tracker.type values include bitbucket; new "What's New in v0.4" section walks through every v0.4 feature with end-to-end examples. Sidebar updated. The release workflow will snapshot docs/{index,guide,reference}.html into docs/v/0.4.0/ on merge — these updates ensure the v0.4.0 docs snapshot is complete. Cargo.lock is gitignored per repo convention; cargo build verified on the bumped version locally. Co-authored-by: Claude Opus 4.7 --- CHANGELOG.md | 53 ++++++++++-- Cargo.toml | 2 +- README.md | 90 +++++++++++++++++-- docs/guide/index.html | 176 +++++++++++++++++++++++++++++++++++++- docs/index.html | 60 +++++++++++++ docs/reference/index.html | 68 +++++++++++++++ 6 files changed, 437 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7d7b6d..e99b10b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,12 +7,55 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.4.0] - 2026-05-04 + ### Added -- `[worktree]` config section with `shared_cache` and `cache_strategy` for - selective build cache sharing between worktrees. New worktrees can now reuse - `target/`, `node_modules/`, `.venv/`, etc. from the main repo via symlink - (default) or recursive copy, eliminating cold-build cost on `parsec start` - (#207). +- **Bitbucket Cloud forge** — full PR lifecycle support (create, list, view, + merge, comments). New tracker/forge entries `bitbucket` selectable via + `parsec config` and `[forge]` settings (#240). +- **Bitbucket Pipelines CI integration** — `parsec ci` and `pr-status` + commands now report Bitbucket Pipelines build state alongside GitHub + Actions and GitLab CI (#279). +- **`parsec compress` command** — squash a stack of related commits into a + single tidy commit before shipping, preserving co-author trailers (#236). +- **`parsec ship --template`** — auto-populate the PR description from a + repository's `.github/PULL_REQUEST_TEMPLATE.md` (or first match under + `.github/PULL_REQUEST_TEMPLATE/`) (#233). +- **`ship --reviewer` and `--label`** — attach reviewers and labels at PR + creation time (#261). +- **Stack `--submit`** — open all PRs in a stack in one command (#261). +- **Stack navigation comments** — auto-posted "← prev / next →" comments on + every PR in a stack so reviewers can walk the chain (#234). +- **`ship.draft` config + `--draft` flag** — open PRs as drafts by default + when working in throwaway / WIP branches (#238). +- **`[worktree]` shared build cache** — `shared_cache` and `cache_strategy` + settings let new worktrees reuse `target/`, `node_modules/`, `.venv/`, etc. + from the main repo via symlink (default) or recursive copy, eliminating + cold-build cost on `parsec start` (#207). +- **Offline mode toggle** — `[behavior].offline` config and per-command + `--no-pr` / `--no-tracker` flags so parsec can operate without forge or + tracker connectivity (#237). +- **Observability lite** — every command run now has an execution ID and + step timing; opt in to JSONL export via `[observability]` settings for + tooling/agents to consume (#166). +- **Config JSON Schema + `parsec schema`** — schema published to + schemastore.org so editors auto-complete `parsec.toml`. The new + `parsec schema` subcommand emits the schema on demand (#239). +- **Windows CI coverage** — full test matrix on Windows runners (#257). +- 11 new integration tests across forge adapters and worktree paths (#278). + +### Changed +- README and reference docs updated to cover ship `--reviewer` / `--label`, + stack `--submit`, Bitbucket adapter, offline flags, build cache config, + and `parsec compress` (#265). + +### Fixed +- Windows UNC path issue (`\\?\` prefix) breaking worktree operations on + Windows hosts — resolved via the `dunce` crate (#263). + +### CI +- Trigger CI on `release/**` branches in addition to feature branches and + develop, so release-prep work is exercised before merge (#277). ## [0.3.3] - 2026-04-22 diff --git a/Cargo.toml b/Cargo.toml index abb82b5..933c91f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "git-parsec" -version = "0.3.3" +version = "0.4.0" edition = "2021" authors = ["erishforG"] description = "Git worktree lifecycle manager — ticket to PR in one command. Parallel AI agent workflows with Jira & GitHub Issues integration." diff --git a/README.md b/README.md index 2f3500d..5c14cd0 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,15 @@ Removed 1 worktree(s): - **Release workflow** -- Merge, tag, and create GitHub Releases with `parsec release` - **PR reviewers and labels** -- Assign reviewers and labels on ship with `--reviewer`/`--label` or config defaults - **Stack submit** -- Ship an entire stack in topological order with `parsec stack --submit` -- **Cross-platform** -- Tested on Linux, macOS, and Windows CI +- **Stack navigation comments** -- Auto-posted "← prev / next →" comments on each PR in a stack +- **PR template auto-fill** -- `parsec ship --template` populates the PR description from `.github/PULL_REQUEST_TEMPLATE.md` +- **Compress branch history** -- `parsec compress` squashes a branch's commits into a single tidy commit before shipping +- **Bitbucket Cloud** -- Full PR lifecycle (create, list, view, merge, comments) and Bitbucket Pipelines CI status +- **Offline mode** -- Global `--offline` flag (and `[workspace].offline` config) skips all network operations (tracker, PR, fetch) so parsec keeps working without connectivity +- **Observability** -- Every command run gets an execution ID with per-step timing; `parsec log --export` emits JSONL for tooling and AI agents to consume +- **Config JSON Schema** -- `parsec config schema` outputs a JSON Schema (also published to schemastore.org) so editors auto-complete `config.toml` +- **Draft-by-default** -- `[ship].draft = true` config or `--draft` flag opens PRs as drafts for WIP work +- **Cross-platform** -- Tested on Linux, macOS, and Windows CI; UNC path handling on Windows --- @@ -327,6 +335,7 @@ $ parsec log | [`parsec new-issue`](#parsec-new-issue) | Create a new issue (alias with extra options) | | [`parsec release`](#parsec-release-version) | Merge, tag, and create a GitHub Release | | [`parsec rename`](#parsec-rename-ticket---new-ticket-id) | Re-ticket a workspace to a different ticket ID | +| [`parsec compress`](#parsec-compress-ticket--m-message) | Squash all branch commits into one | --- @@ -628,6 +637,7 @@ parsec log [ticket] [-n, --last N] |--------|-------------| | `[ticket]` | Filter to a specific ticket | | `-n, --last N` | Show last N entries (default: 20) | +| `--export` | Emit the log as JSONL (one JSON object per line). Each entry includes execution ID and per-step timing for observability/debugging | ```bash $ parsec log @@ -645,6 +655,11 @@ $ parsec log PROJ-1234 # Last 3 entries only $ parsec log --last 3 + +# Export as JSONL (for tooling / AI agents) +$ parsec log --export +{"execution_id":"01HQ3D8R2K8...","op":"start","ticket":"PROJ-1234","steps":[{"name":"fetch_title","ms":214},{"name":"create_worktree","ms":98}],"started_at":"2026-04-15T09:14:01Z","duration_ms":312} +{"execution_id":"01HQ3D9V7Z2...","op":"ship","ticket":"PROJ-1234","steps":[{"name":"push","ms":820},{"name":"create_pr","ms":1305},{"name":"cleanup","ms":42}],"started_at":"2026-04-15T14:02:18Z","duration_ms":2167} ``` --- @@ -1037,6 +1052,16 @@ $ parsec config completions zsh # Install man page $ sudo parsec config man + +# Output the JSON Schema for config.toml (also published to schemastore.org) +$ parsec config schema > parsec-schema.json +``` + +The JSON Schema is published to **schemastore.org** so editors with schemastore integration (VS Code, IntelliJ, Helix) auto-complete and validate `~/.config/parsec/config.toml` and per-repo `.parsec.toml` automatically. To pin the schema locally instead, add to your config: + +```toml +# ~/.config/parsec/config.toml +#:schema https://json.schemastore.org/parsec.json ``` --- @@ -1186,6 +1211,33 @@ $ parsec rename PROJ-100 --new PROJ-200 --json --- +### `parsec compress [ticket] [-m ]` + +Squash all of a branch's commits into a single tidy commit before shipping. The branch is reset to the merge-base with the base branch and the cumulative changes are re-committed as one. Co-author trailers from squashed commits are preserved. + +``` +parsec compress [ticket] [-m ] +``` + +| Option | Description | +|--------|-------------| +| `ticket` | Optional. Auto-detects the current worktree's ticket if omitted. | +| `-m, --message ` | Custom commit message. Default: combines all squashed commit messages. | + +```bash +# Compress the current worktree's branch +$ parsec compress +Compressed 7 commits into one on feature/PROJ-1234. + +# Compress a specific ticket with a custom message +$ parsec compress PROJ-1234 -m "feat: add user authentication" + +# Combine with ship +$ parsec compress && parsec ship +``` + +--- + ## Global Flags These flags work on every command: @@ -1193,6 +1245,7 @@ These flags work on every command: | Flag | Description | |------|-------------| | `--dry-run` | Preview what a command would do without making changes | +| `--offline` | Skip all network operations (tracker, PR, fetch). Also enabled by `[workspace].offline = true` in config | | `--json` | Machine-readable JSON output | | `-q, --quiet` | Suppress non-essential output | | `--repo ` | Target a different repository | @@ -1280,6 +1333,9 @@ Config file: `~/.config/parsec/config.toml` layout = "sibling" base_dir = ".parsec/workspaces" branch_prefix = "feature/" +# Skip all network operations (tracker, PR, fetch). Equivalent to passing +# --offline on every command. Useful for air-gapped / flight-mode dev. +offline = false [worktree] # Directories to share from the main repo into new worktrees so that @@ -1293,7 +1349,7 @@ shared_cache = ["target", "node_modules", ".venv"] cache_strategy = "symlink" [tracker] -# "jira" | "github" | "gitlab" | "none" +# "jira" | "github" | "gitlab" | "bitbucket" | "none" provider = "jira" [tracker.jira] @@ -1307,10 +1363,29 @@ base_url = "https://yourcompany.atlassian.net" base_url = "https://gitlab.com" # Auth: PARSEC_GITLAB_TOKEN env var +[tracker.bitbucket] +# workspace = "your-bitbucket-workspace" +# Auth: PARSEC_BITBUCKET_TOKEN (or BITBUCKET_TOKEN) env var +# api_base override: PARSEC_BITBUCKET_API_BASE env var + +[forge] +# Auto-detected from the remote URL when omitted. Override here if multiple +# forges are reachable (e.g., GitHub for PRs, Bitbucket Pipelines for CI). +# provider = "github" | "gitlab" | "bitbucket" + [ship] auto_pr = true # Create PR/MR on ship auto_cleanup = true # Remove worktree after ship -draft = false # Create PRs as drafts +draft = false # Open PRs as drafts by default +# template = ".github/PULL_REQUEST_TEMPLATE.md" # Auto-fill PR description +# default_reviewers = ["alice", "bob"] # Reviewers added on every ship +# default_labels = ["needs-review"] # Labels added on every ship + +[observability] +# Enable per-step timing in `parsec log --export` JSONL output. +# Each command run gets a unique execution ID; useful for tooling and AI +# agents to correlate parsec actions with downstream effects. +enabled = true [hooks] # Commands to run in new worktrees after creation @@ -1348,9 +1423,13 @@ pre_ship = ["cargo test", "cargo clippy"] | `GH_TOKEN` | Fallback GitHub token | | `PARSEC_GITLAB_TOKEN` | GitLab token for MR creation | | `GITLAB_TOKEN` | Fallback GitLab token | +| `PARSEC_BITBUCKET_TOKEN` | Bitbucket Cloud app password / access token | +| `BITBUCKET_TOKEN` | Fallback Bitbucket token | +| `PARSEC_BITBUCKET_API_BASE` | Override Bitbucket API base URL (test/mock servers) | | `PARSEC_JIRA_PROJECT` | Default Jira project key for `board` | | `PARSEC_JIRA_BOARD_ID` | Default Jira board ID for `board` | | `PARSEC_JIRA_ASSIGNEE` | Default assignee filter for `board` | +| `PARSEC_OFFLINE` | Set to `1` to force offline mode (same as `--offline`) | Token priority: `PARSEC_*_TOKEN` > platform-specific variables. @@ -1390,11 +1469,12 @@ $ echo $? | Feature | parsec | GitButler | worktrunk | git worktree | git-town | |---------|--------|-----------|-----------|--------------|----------| -| Ticket tracker integration | Jira + GitHub Issues | No | No | No | No | +| Ticket tracker integration | Jira + GitHub + GitLab + Bitbucket | No | No | No | No | | Physical isolation | Yes (worktrees) | No (virtual branches) | Yes (worktrees) | Yes | No | | Conflict detection | Cross-worktree | N/A | No | No | No | | One-step ship (push+PR+clean) | Yes | No | No | No | Yes | -| GitHub + GitLab | Both | Both | GitHub | No | GitHub, GitLab, Gitea, Bitbucket | +| Forges | GitHub + GitLab + Bitbucket | Both | GitHub | No | GitHub, GitLab, Gitea, Bitbucket | +| CI integrations | GitHub Actions + GitLab CI + Bitbucket Pipelines | No | No | No | No | | Operation history + undo | Yes | Yes | No | No | Yes (undo) | | JSON output | Yes | Yes | No | No | No | | CI monitoring | Yes (--watch) | No | No | No | No | diff --git a/docs/guide/index.html b/docs/guide/index.html index fb60ee0..6d18d77 100644 --- a/docs/guide/index.html +++ b/docs/guide/index.html @@ -1087,6 +1087,7 @@
  • AI Agent Workflows
  • Stacked PRs
  • New Features
  • +
  • What's New in v0.4
  • @@ -1428,7 +1429,7 @@

    Config file reference

    tracker.type Issue tracker backend. - "jira" / "github" / "gitlab" + "jira" / "github" / "gitlab" / "bitbucket" tracker.base_url @@ -1736,6 +1737,179 @@

    Pre-ship hooks

    + +
    +
    +

    What's New in v0.4

    + # +
    + +

    + v0.4 broadens forge support to Bitbucket Cloud, adds workflow utilities (compress, --template, stack navigation comments), and introduces operational primitives — offline mode, observability JSONL, and a published config schema — designed for tooling and AI agents. +

    + +

    Bitbucket Cloud — full PR lifecycle

    +

    + parsec now speaks Bitbucket Cloud's API: parsec ship opens PRs, parsec pr-status reports CI from Bitbucket Pipelines, parsec ci tails build status, and parsec merge merges from the terminal. Tracker integration uses the same tracker.bitbucket config block. +

    + +
    +
    +
    + Bitbucket setup +
    +
    +# Auth via env var +$ export PARSEC_BITBUCKET_TOKEN="<app-password>" +  +# Configure in ~/.config/parsec/config.toml +[tracker] +provider = "bitbucket" +[tracker.bitbucket] +workspace = "my-team" +  +$ parsec ship CL-2208 + PR opened: bitbucket.org/my-team/repo/pull-requests/142 + Bitbucket Pipelines: BUILD #318 in_progress +
    +
    + +

    Compress branch history with parsec compress

    +

    + Squash a branch's commits into one tidy commit before shipping. Co-author trailers from squashed commits are preserved automatically. +

    + +
    +
    +
    + parsec compress +
    +
    +# Squash all branch commits into one +$ parsec compress + Compressed 7 commits into one on feature/PROJ-123 +  +# With a custom message +$ parsec compress -m "feat: add user authentication" +  +# Compose: tidy history then ship +$ parsec compress && parsec ship +
    +
    + +

    Stack navigation comments

    +

    + When you ship a stacked PR, parsec auto-posts "← previous PR" / "next PR →" navigation comments on every PR in the stack. Reviewers can walk the chain without leaving the PR view. +

    + +

    PR template auto-fill — ship --template

    +

    + Use the repository's .github/PULL_REQUEST_TEMPLATE.md (or the first match under .github/PULL_REQUEST_TEMPLATE/) as the PR description automatically. Combine with ship.template in config.toml to make it the default. +

    + +
    +
    +
    + ship --template +
    +
    +$ parsec ship PROJ-123 --template +Loaded .github/PULL_REQUEST_TEMPLATE.md (348 chars) + PR opened with template body +
    +
    + +

    Offline mode — --offline / [workspace].offline

    +

    + Skip all network operations: tracker lookups, PR creation, fetches. Use a global --offline flag, the PARSEC_OFFLINE=1 env var, or set offline = true under [workspace] in config.toml. Per-command escapes (--no-pr, --no-tracker) remain available for finer control. +

    + +
    +
    +
    + offline mode +
    +
    +# Per-invocation +$ parsec start CL-2208 --offline --title "Add login retry" +  +# Persistent — flight mode +[workspace] +offline = true +
    +
    + +

    Observability — execution IDs + JSONL export

    +

    + Every command run gets a unique execution ID and per-step timing. parsec log --export emits one JSON object per line for tooling and AI agents to consume. Combined with --json on individual commands, parsec is fully introspectable. +

    + +
    +
    +
    + parsec log --export +
    +
    +$ parsec log --export | jq 'select(.duration_ms > 1000)' +{ + "execution_id": "01HQ3D9V7Z2...", + "op": "ship", + "ticket": "PROJ-123", + "steps": [ + {"name":"push","ms":820}, + {"name":"create_pr","ms":1305}, + {"name":"cleanup","ms":42} + ], + "duration_ms": 2167 +} +
    +
    + +

    Config JSON Schema — editor autocomplete

    +

    + The schema for config.toml is published to schemastore.org, so VS Code, IntelliJ, Helix, and any editor with schemastore integration auto-complete and validate every key. parsec config schema emits the schema for offline use. +

    + +
    +
    +
    + config schema +
    +
    +$ parsec config schema > parsec-schema.json +  +# Pin schema in your config for editor support +#:schema https://json.schemastore.org/parsec.json +
    +
    + +

    Worktree build cache sharing — [worktree].shared_cache

    +

    + New worktrees can reuse target/, node_modules/, .venv/, etc. from the main repo via symlink (default) or recursive copy. Eliminates cold-build cost on parsec start for any project with significant dependency caches. +

    + +
    +
    +
    + [worktree] config +
    +
    +[worktree] +shared_cache = ["target", "node_modules", ".venv"] +# "symlink" (default) — fast, zero-disk; parallel build of same artifact may race +# "copy" — independent caches per worktree, no race risk, more disk +cache_strategy = "symlink" +
    +
    + +

    Draft-by-default — ship.draft

    +

    + Set [ship].draft = true in config.toml to open every PR as a draft, or pass --draft per ship. Useful for iterative WIP review flows where you want CI feedback before requesting human review. +

    +
    + diff --git a/docs/index.html b/docs/index.html index 3773010..5beba37 100644 --- a/docs/index.html +++ b/docs/index.html @@ -1960,6 +1960,66 @@

    Worktree Diff

    $ parsec diff --stat
    + + + + + +
    +
    + +
    +

    Compress Branch History

    +

    + New in v0.4. parsec compress squashes a branch's commits into one tidy commit before shipping, preserving co-author trailers. Optional --message for a custom commit subject. +

    +
    $ parsec compress -m "feat: add user authentication"
    +
    + + +
    +
    + +
    +

    Offline Mode

    +

    + New in v0.4. Global --offline flag (and [workspace].offline config) skips all network operations — tracker lookups, PR creation, fetches — so parsec keeps working on a plane, in CI without secrets, or in air-gapped environments. +

    +
    $ parsec start CL-2208 --offline --title "Add login retry"
    +
    + + + + + +
    +
    + +
    +

    Config JSON Schema

    +

    + New in v0.4. The config.toml JSON Schema is published to schemastore.org so VS Code, IntelliJ, and Helix auto-complete and validate every key. parsec config schema emits the schema for offline use. +

    +
    $ parsec config schema > parsec-schema.json
    +
    diff --git a/docs/reference/index.html b/docs/reference/index.html index 6da1f2f..7f35b48 100644 --- a/docs/reference/index.html +++ b/docs/reference/index.html @@ -984,6 +984,7 @@
  • create
  • new-issue
  • rename
  • +
  • compress
  • release
  • @@ -1065,6 +1066,7 @@

    Global Options — available on every command

    -q / --quiet suppress non-essential output
    --repo <PATH> target repository path
    --dry-run preview changes without executing
    +
    --offline skip all network ops (tracker, PR, fetch)
    @@ -1914,6 +1916,7 @@

    History

    --last <N>Show only the last N operations. + --exportEmit the log as JSONL (one JSON object per line). Each entry includes execution_id and per-step timing for observability/debugging by tooling and AI agents. @@ -1930,6 +1933,11 @@

    History

    2024-01-15 09:05 start PROJ-123 worktree created 2024-01-14 17:44 start PROJ-125 worktree created 2024-01-14 16:30 clean 3 worktrees removed +  +# JSONL export — one JSON object per line, with execution_id and per-step timing +$ parsec log --export +{"execution_id":"01HQ3D8R2K8...","op":"start","ticket":"PROJ-123","steps":[{"name":"fetch_title","ms":214},{"name":"create_worktree","ms":98}],"duration_ms":312} +{"execution_id":"01HQ3D9V7Z2...","op":"ship","ticket":"PROJ-123","steps":[{"name":"push","ms":820},{"name":"create_pr","ms":1305},{"name":"cleanup","ms":42}],"duration_ms":2167} @@ -2095,6 +2103,7 @@

    Setup

    showDisplay current configuration (redacts sensitive tokens). manOpen the parsec manual in your pager. completions <SHELL>Generate shell completion script for zsh, bash, or fish. + schemaOutput the JSON Schema for config.toml. The schema is also published to schemastore.org so editors auto-complete configuration files. shellDeprecated. Use parsec init <SHELL> instead. @@ -2118,6 +2127,9 @@

    Setup

      # Show current config $ parsec config show +  +# Output the JSON Schema (also at https://json.schemastore.org/parsec.json) +$ parsec config schema > parsec-schema.json @@ -2327,6 +2339,62 @@

    Setup

    + +
    +
    + compress + Squash all branch commits into one + # +
    +

    + Resets the branch to the merge-base with the base branch and re-commits all changes as a single commit. Co-author trailers from squashed commits are preserved. Useful before parsec ship to keep PR history tidy. +

    +
    + Usage + parsec compress [TICKET] [OPTIONS] +
    + +
    + + + + + + + +
    ArgumentDescription
    [TICKET]Optional. Auto-detects the current worktree's ticket if omitted.
    +
    + +
    + + + + + + + +
    OptionDescription
    -m, --message <TEXT>Custom commit message. Default: combines all squashed commit messages.
    +
    + +
    +
    +
    + parsec compress +
    +
    +# Compress current worktree's branch +$ parsec compress + Compressed 7 commits into one on feature/PROJ-1234 +  +# Compress with custom message +$ parsec compress PROJ-1234 -m "feat: add user authentication" +  +# Combine with ship +$ parsec compress && parsec ship +
    +
    +
    +
    From 77834122949f235cce3b17dd3879841fa437b7fd Mon Sep 17 00:00:00 2001 From: erish Date: Mon, 4 May 2026 20:15:54 +0900 Subject: [PATCH 18/18] docs: refactor README + landing page (heavy slim) (#284) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: refactor README + landing page (heavy slim) Critical review found four big problems: - README.md was 1527 lines — same value-prop messaging repeated 4-5 times (tagline → What is → Why → Problem → Solution), Use Cases section duplicated, the entire 933-line Command Reference was a copy of docs/reference/index.html. - docs/index.html had 22 feature cards laid out flat — no priority, scroll fatigue, mega-cap features (Bitbucket, Stacked PRs) shown at the same visual weight as ship.draft defaults. - AI agent messaging took ~30% of README, risking miscategorization as an "AI tool" when most users are general developers. - The "What's New in v0.4" section in the guide was version-specific — same problem will recur for v0.5, v0.6. Changes ------- README.md (1527 → 204 lines) - New tagline: "From ticket to PR. One command." - Single demo block (no repeats), 60-second tour, Top 6 features, comparison table, error code matrix, doc links. - Removed: duplicated Use Cases section, full Command Reference (now linked to docs/reference/), AI Token Efficiency / Before-After expansion, redundant Why/Problem/Solution sections. docs/index.html (22 cards → 6 featured cards + collapsible "More features") - 6 consolidated cards: Ticket-driven worktrees · Stacked PRs · Multi- forge & multi-CI · One-step ship + lifecycle · Built for agents · Build cache + ergonomics. - Remaining 16 features compressed into a `
    ` block — visible on demand, no scroll fatigue, all features still discoverable. - AI agent messaging consolidated to one card ("Built for agents") covering --json, JSONL, error codes, offline, headless, schemastore. docs/guide/index.html — "What's New in v0.4" section renamed to "Recipes & Examples" with a permanent home; future versions can keep adding recipes here without further renames. All HTML validated (depth balanced). README links to docs/{guide,reference} verified. The release workflow's docs snapshot will pick this up on next release. Co-Authored-By: Claude Opus 4.7 * docs: SEO + AEO upgrades (llms.txt, structured data, social cards) Extends the docs refactor with discovery layer improvements aimed at both classic search engines and modern answer engines (Anthropic / OpenAI / Perplexity / Google AI). AEO (Answer Engine Optimization) -------------------------------- - New `docs/llms.txt` (3.5 KB) — emerging standard for LLM crawlers. Concise project summary with key facts (latest version, install, auth, config, error codes, comparisons), structured for clean LLM ingestion. - New `docs/llms-full.txt` (13 KB) — comprehensive plain-text dump covering all 27 commands, configuration, auth, error codes, stacked PRs, multi-forge support, observability, FAQ. One-stop file for any LLM/agent that needs the full picture. - `docs/robots.txt` explicitly welcomes GPTBot, ChatGPT-User, ClaudeBot, anthropic-ai, PerplexityBot, Google-Extended, CCBot. - Added `` on every page so crawlers can discover the LLM-friendly variants. Structured data (Schema.org JSON-LD) ------------------------------------ docs/index.html now emits 4 JSON-LD blocks: 1. SoftwareApplication — bumped to v0.4.0, datePublished 2026-05-04, softwareRequirements, releaseNotes URL, screenshot URL, applicationSubCategory, expanded keywords (Bitbucket, JSONL, etc.) 2. HowTo — 5-step "60-second tour" (start → switch → commit → ship → ci/merge) so Google can render a HowTo carousel and AI engines can answer "how do I use parsec". 3. BreadcrumbList — supports Google sitelinks. 4. FAQPage — expanded from 10 → 18 Q&As with v0.4-specific entries (Bitbucket, offline, observability JSONL, compress, config schema, forges/CI, build cache). docs/guide/index.html — TechArticle + BreadcrumbList (replaces the two stale duplicate JSON-LD blocks that existed lower in the file). docs/reference/index.html — TechArticle + BreadcrumbList (same cleanup of duplicate blocks). Social / SEO meta polish ------------------------ - og:image + twitter:image (demo.gif as fallback) — previously absent; social shares now show a preview. - og:image:width / height / alt, og:locale, og:site_name on all pages. - Page titles tightened with primary keywords: - index: "From ticket to PR. One command. | Git worktree lifecycle…" - guide: "Getting Started Guide — git-parsec | Install, configure, ship in 5 minutes" - reference: "Command Reference — git-parsec | All 27 commands…" - Descriptions rewritten to surface v0.4 differentiators (Bitbucket, stacked PRs, JSONL observability) while staying under 160 chars. - robots meta with max-snippet/max-image-preview/max-video-preview signals for richer SERP rendering. Sitemap ------- - All `lastmod` dates bumped from 2026-04-22 → 2026-05-04 to trigger re-crawl. - llms.txt and llms-full.txt added to sitemap with priority 0.7. Validation ---------- - All 3 HTML pages parse with balanced depth. - All 8 JSON-LD blocks (4 + 2 + 2) parse as valid JSON, types verified. - Local server confirms files served with correct MIME types. Co-Authored-By: Claude Opus 4.7 --------- Co-authored-by: Claude Opus 4.7 --- README.md | 1553 +++---------------------------------- docs/guide/index.html | 97 ++- docs/index.html | 431 +++++----- docs/llms-full.txt | 316 ++++++++ docs/llms.txt | 47 ++ docs/reference/index.html | 70 +- docs/robots.txt | 23 + docs/sitemap.xml | 18 +- 8 files changed, 826 insertions(+), 1729 deletions(-) create mode 100644 docs/llms-full.txt create mode 100644 docs/llms.txt diff --git a/README.md b/README.md index 5c14cd0..9576da1 100644 --- a/README.md +++ b/README.md @@ -4,1459 +4,193 @@ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) [![CI](https://github.com/erishforG/git-parsec/actions/workflows/ci.yml/badge.svg)](https://github.com/erishforG/git-parsec/actions) -> Git worktree lifecycle manager for parallel AI agent workflows +> **From ticket to PR. One command.** +> Git worktree lifecycle manager — isolated workspaces tied to your tickets, with stacked PRs, multi-forge support, and clean shipping. -**parsec** manages isolated git worktrees tied to tickets (Jira, GitHub Issues), enabling multiple AI agents or developers to work on the same repository in parallel without lock conflicts. - -![demo](demo.gif) - -## What is parsec? - -**parsec** is a command-line tool (CLI) that automates the full lifecycle of git worktrees: create an isolated workspace from a ticket ID, work in parallel without lock conflicts, then push + create PR + clean up in one command. It integrates with **Jira**, **GitHub Issues**, and **GitLab Issues** for automatic ticket title lookup, and supports **GitHub** and **GitLab** for PR/MR creation. - -Unlike plain `git worktree`, parsec tracks workspace state, detects file conflicts across worktrees, provides operation history with undo, supports stacked PRs, and offers CI status monitoring — all from a single CLI. - ---- - -## Why parsec? - -### What changes day-to-day - -| What you do today | With parsec | -|---|---| -| `git checkout -b feat/xyz`, `git worktree add`, configure manually | `parsec start TICKET` | -| `git push`, `gh pr create`, `git worktree remove`, delete branch | `parsec ship TICKET` | -| Open GitHub web UI to check CI results | `parsec ci --watch` | -| Merge PR on GitHub, delete branch, clean local worktree | `parsec merge TICKET` | - -### Key metrics - -- **PR lead time**: Less time between starting a ticket and opening a PR — no setup friction, no stash management -- **Context switches eliminated**: Jump between tickets without losing state; each ticket lives in its own directory -- **Conflict prevention**: `parsec conflicts` catches cross-ticket file collisions before they become merge problems -- **0 `index.lock` conflicts**: Every worktree has its own `.git` index — no serialized writes - -### Use cases - -**Solo developer** — Work on several tickets in parallel without stashing. Ship complete features (push + PR + cleanup) with one command. See all in-flight work at a glance with `parsec list`. - -**Team** — View the active sprint as a Kanban board with `parsec board`. Detect which tickets touch the same files before review. Monitor all open PRs and their CI status from the terminal. - -**AI agent orchestration** — Run multiple coding agents on the same repo simultaneously. Every agent gets its own isolated worktree with no `index.lock` contention. Use `--json` on every command for structured output agents can parse directly. - ---- - -## The Problem - -Git uses a single working directory with a single `index.lock`. When multiple AI agents (or developers) try to work on the same repo simultaneously: - -- `git add/commit` operations collide on `.git/index.lock` -- Context switching between tasks requires stashing or committing WIP -- Worktrees exist but have poor lifecycle management -- No connection between tickets and working directories - -## The Solution - -```bash -# Create isolated workspaces for two tickets -$ parsec start PROJ-1234 --title "Add user authentication" -Created workspace for PROJ-1234 at /home/user/myapp.PROJ-1234 - Add user authentication - -$ parsec start PROJ-5678 --title "Fix payment timeout" -Created workspace for PROJ-5678 at /home/user/myapp.PROJ-5678 - Fix payment timeout - -# See all active workspaces -$ parsec list -╭───────────┬──────────────────────┬────────┬──────────────────┬──────────────────────────────╮ -│ Ticket │ Branch │ Status │ Created │ Path │ -├───────────┼──────────────────────┼────────┼──────────────────┼──────────────────────────────┤ -│ PROJ-1234 │ feature/PROJ-1234 │ active │ 2026-04-15 09:00 │ /home/user/myapp.PROJ-1234 │ -│ PROJ-5678 │ feature/PROJ-5678 │ active │ 2026-04-15 09:01 │ /home/user/myapp.PROJ-5678 │ -╰───────────┴──────────────────────┴────────┴──────────────────┴──────────────────────────────╯ - -# Check if any workspaces touch the same files -$ parsec conflicts -No conflicts detected. - -# Complete: push, create PR, and clean up in one step -$ parsec ship PROJ-1234 -Shipped PROJ-1234! - PR: https://github.com/org/repo/pull/42 - Workspace cleaned up. - -# Remove all remaining workspaces -$ parsec clean --all -Removed 1 worktree(s): - - PROJ-5678 -``` - -## Features - -- **Ticket-driven workspaces** -- Create worktrees named after Jira/GitHub Issues tickets -- **Zero-conflict parallelism** -- Each workspace has its own index, no lock contention -- **Conflict detection** -- Warns when multiple workspaces modify the same files -- **One-step shipping** -- `parsec ship` pushes, creates a GitHub PR or GitLab MR, and cleans up -- **Adopt existing branches** -- Import branches already in progress with `parsec adopt` -- **Attach to existing branches** -- Start a workspace from an existing local or remote branch with `--branch` -- **Operation history and undo** -- `parsec log` shows what happened, `parsec undo` reverts it -- **Keep branches fresh** -- `parsec sync` rebases or merges the latest base branch into any worktree -- **Agent-friendly output** -- `--json` flag on every command for machine consumption -- **Status dashboard** -- See all parallel work at a glance -- **Auto-cleanup** -- Remove worktrees for merged branches automatically -- **GitHub and GitLab** -- PR and MR creation for both platforms -- **Stacked PRs** -- Create dependent PR chains with `--on` and sync the entire stack -- **Sprint board view** -- See the active sprint as a Kanban board with `parsec board` -- **Environment diagnostics** -- `parsec doctor` validates your setup and shows what needs fixing -- **Pre-ship hooks** -- Run custom commands before shipping with configurable `[hooks]` pre_ship -- **Issue creation** -- Create GitHub/Jira issues and start worktrees in one step with `parsec create` -- **Release workflow** -- Merge, tag, and create GitHub Releases with `parsec release` -- **PR reviewers and labels** -- Assign reviewers and labels on ship with `--reviewer`/`--label` or config defaults -- **Stack submit** -- Ship an entire stack in topological order with `parsec stack --submit` -- **Stack navigation comments** -- Auto-posted "← prev / next →" comments on each PR in a stack -- **PR template auto-fill** -- `parsec ship --template` populates the PR description from `.github/PULL_REQUEST_TEMPLATE.md` -- **Compress branch history** -- `parsec compress` squashes a branch's commits into a single tidy commit before shipping -- **Bitbucket Cloud** -- Full PR lifecycle (create, list, view, merge, comments) and Bitbucket Pipelines CI status -- **Offline mode** -- Global `--offline` flag (and `[workspace].offline` config) skips all network operations (tracker, PR, fetch) so parsec keeps working without connectivity -- **Observability** -- Every command run gets an execution ID with per-step timing; `parsec log --export` emits JSONL for tooling and AI agents to consume -- **Config JSON Schema** -- `parsec config schema` outputs a JSON Schema (also published to schemastore.org) so editors auto-complete `config.toml` -- **Draft-by-default** -- `[ship].draft = true` config or `--draft` flag opens PRs as drafts for WIP work -- **Cross-platform** -- Tested on Linux, macOS, and Windows CI; UNC path handling on Windows - ---- - -## Impact - -### Concrete time savings - -The typical "start ticket, work, ship" flow goes from 5+ commands to 2: - -```bash -# Before parsec -git fetch origin -git checkout -b feature/PROJ-1234 origin/main -# ... open browser, look up Jira ticket title ... -git push -u origin feature/PROJ-1234 -gh pr create --title "Add user authentication" --base main -git checkout main -git worktree remove ../myapp.PROJ-1234 - -# With parsec: 2 commands, no browser -parsec start PROJ-1234 # fetches title from Jira automatically -parsec ship PROJ-1234 # push + PR + cleanup in one step -``` - -### Concrete risk reduction - -- **0 `index.lock` conflicts** — worktree isolation is physical; each workspace has its own `.git/index` -- **Conflict detection before it hurts** — `parsec conflicts` shows cross-worktree file overlap before any push -- **Undo for mistakes** — `parsec undo` reverses the last operation (start, ship, clean) - -### Token efficiency for AI agents - -Traditional AI agents waste tokens calling raw APIs. Each Jira or GitHub API call costs dozens of tokens for auth setup, pagination, and response parsing. **parsec packages git + tracker operations into single commands with structured output.** - -#### Before: Raw API Calls - -```bash -# Agent needs: sprint tickets + status + worktree info + PR status -# Step 1: Authenticate with Jira API -# Step 2: Find active sprint (GET /rest/agile/1.0/board/{id}/sprint?state=active) -# Step 3: Fetch sprint issues (GET /rest/agile/1.0/sprint/{id}/issue) -# Step 4: For each ticket, check local worktrees (git worktree list, parse output) -# Step 5: For each ticket, check PR status (GitHub API) -# → 5+ API calls, 100+ tokens, custom parsing logic -``` - -#### After: One parsec Command - -```bash -parsec board --json -# → Sprint + status-grouped tickets + worktree/PR flags in one structured JSON -``` - -#### Key benefits for AI agents - -| Capability | What it means | -|------------|---------------| -| `--json` on every command | Structured output AI can parse instantly | -| `parsec start` | git worktree + Jira fetch + state management in one call | -| `parsec board --json` | Sprint + tickets + worktree/PR status in one call | -| `parsec ship` | Push + PR creation + cleanup in one call | -| Env var defaults | Zero-arg commands after one-time setup | -| Conflict detection | AI agents can check before parallel edits | - ---- - -## Use Cases - -### Solo developer - -Work on multiple tickets in parallel without stashing or losing context. Each ticket lives in its own sibling directory, so switching is just `cd`. - -```bash -parsec start PROJ-1234 # new worktree from Jira ticket -parsec start PROJ-5678 # second worktree, works in parallel -cd $(parsec switch PROJ-1234) -# ... make changes, commit normally ... -parsec ship PROJ-1234 # push + PR + cleanup -``` - -### Team - -Keep the whole team's sprint visible from the terminal. Catch file conflicts before they become merge problems. Track all open PRs without leaving the shell. - -```bash -parsec board # sprint board: In Progress / In Review / Done -parsec conflicts # which tickets touch the same files? -parsec pr-status # CI and review state for all open PRs -parsec ci PROJ-1234 --watch # wait for CI to go green -``` - -### AI agent orchestration - -Run multiple coding agents on the same repo simultaneously. Each agent calls `parsec start` to get an isolated worktree, uses `--json` for structured output, and calls `parsec ship` when done. No `index.lock` contention, no custom shell parsing. - -```bash -# Agent 1 -parsec start PROJ-100 --json # isolated workspace, structured response - -# Agent 2 (same repo, same time) -parsec start PROJ-101 --json # separate worktree, no collision - -# Coordinator -parsec conflicts --json # detect overlap before agents commit -parsec board --json # full sprint + PR status in one call -``` - ---- - -## Installation - -### Pre-built binaries (recommended) - -Download the latest release for your platform from [GitHub Releases](https://github.com/erishforG/git-parsec/releases): - -```bash -# macOS (Apple Silicon) -curl -LO https://github.com/erishforG/git-parsec/releases/latest/download/parsec-{version}-aarch64-apple-darwin.tar.gz -tar xzf parsec-*-aarch64-apple-darwin.tar.gz -sudo mv parsec /usr/local/bin/ - -# macOS (Intel) -curl -LO https://github.com/erishforG/git-parsec/releases/latest/download/parsec-{version}-x86_64-apple-darwin.tar.gz - -# Linux (x86_64) -curl -LO https://github.com/erishforG/git-parsec/releases/latest/download/parsec-{version}-x86_64-unknown-linux-gnu.tar.gz - -# Windows — download .zip from the Releases page -``` - -### Via Cargo - -```bash -cargo install git-parsec -``` - -### Build from source - -```bash -git clone https://github.com/erishforG/git-parsec.git -cd git-parsec -cargo build --release -# Binary at ./target/release/parsec -``` - -## Quick Start - -```bash -# 1. (Optional) Run interactive setup -$ parsec config init - -# 2. Start work on a ticket -$ parsec start PROJ-1234 --title "Add rate limiting" -Created workspace for PROJ-1234 at /home/user/myapp.PROJ-1234 - Add rate limiting - - Tip: cd $(parsec switch PROJ-1234) - -# 3. Switch into the workspace -$ cd $(parsec switch PROJ-1234) - -# 4. Work, commit as normal... -$ git add . && git commit -m "Implement rate limiter" - -# 5. Start a second ticket in parallel -$ parsec start PROJ-5678 --title "Fix auth bug" - -# 6. Check for file conflicts across workspaces -$ parsec conflicts - -# 7. Ship when done -$ parsec ship PROJ-1234 -Shipped PROJ-1234! - PR: https://github.com/org/repo/pull/42 - Workspace cleaned up. - -# 8. See what happened -$ parsec log -``` - ---- - -## Command Reference - -| Command | What it does | -|---------|-------------| -| [`parsec start`](#parsec-start-ticket) | Create an isolated worktree for a ticket | -| [`parsec adopt`](#parsec-adopt-ticket) | Import an existing branch into parsec management | -| [`parsec list`](#parsec-list) | List all active parsec-managed worktrees | -| [`parsec status`](#parsec-status-ticket) | Show detailed status of a workspace | -| [`parsec ticket`](#parsec-ticket-ticket) | View ticket details from the configured tracker | -| [`parsec ship`](#parsec-ship-ticket) | Push, create PR/MR, and clean up in one step | -| [`parsec clean`](#parsec-clean) | Remove worktrees for merged branches | -| [`parsec conflicts`](#parsec-conflicts) | Detect files modified in more than one worktree | -| [`parsec switch`](#parsec-switch-ticket) | Print (or cd to) a ticket's worktree path | -| [`parsec log`](#parsec-log-ticket) | Show operation history | -| [`parsec undo`](#parsec-undo) | Undo the last parsec operation | -| [`parsec sync`](#parsec-sync-ticket) | Rebase/merge latest base branch into a worktree | -| [`parsec open`](#parsec-open-ticket) | Open PR or ticket page in browser | -| [`parsec pr-status`](#parsec-pr-status-ticket) | Check CI and review status of shipped PRs | -| [`parsec ci`](#parsec-ci-ticket---watch---all) | Check CI pipeline status for a PR | -| [`parsec merge`](#parsec-merge-ticket---rebase---no-wait---no-delete-branch) | Merge a PR from the terminal | -| [`parsec diff`](#parsec-diff-ticket---stat---name-only) | View changes vs base branch | -| [`parsec stack`](#parsec-stack---sync---submit) | View and manage stacked PR dependencies | -| [`parsec board`](#parsec-board) | Show sprint as a Kanban board | -| [`parsec init`](#parsec-init) | Install shell integration | -| [`parsec config`](#parsec-config) | Configure parsec | -| [`parsec doctor`](#parsec-doctor) | Validate environment and configuration | -| [`parsec create`](#parsec-create) | Create a new issue and optionally start a worktree | -| [`parsec new-issue`](#parsec-new-issue) | Create a new issue (alias with extra options) | -| [`parsec release`](#parsec-release-version) | Merge, tag, and create a GitHub Release | -| [`parsec rename`](#parsec-rename-ticket---new-ticket-id) | Re-ticket a workspace to a different ticket ID | -| [`parsec compress`](#parsec-compress-ticket--m-message) | Squash all branch commits into one | - ---- - -### `parsec start ` - -Create an isolated worktree for a ticket. Fetches the ticket title from your configured tracker (Jira, GitHub Issues) or accepts a manual title. - -``` -parsec start [--base ] [--title "text"] [--on ] [--branch ] [--hook "cmd"] -``` - -| Option | Description | -|--------|-------------| -| `-b, --base ` | Base branch to create from (default: main/master) | -| `--title "text"` | Set ticket title manually, skip tracker lookup | -| `--on ` | Stack on another ticket's branch (for dependent PRs) | -| `--branch ` | Use an existing branch instead of creating a new one | -| `--hook "cmd"` | Run a command after worktree creation (one-off hook) | - -```bash -# With Jira integration (title auto-fetched) -$ parsec start CL-2283 -Created workspace for CL-2283 at /home/user/myapp.CL-2283 - Implement rate limiting for API endpoints - - Tip: cd $(parsec switch CL-2283) - -# With manual title -$ parsec start 42 --title "Fix login redirect" -Created workspace for 42 at /home/user/myapp.42 - Fix login redirect - - Tip: cd $(parsec switch 42) - -# From a specific base branch -$ parsec start PROJ-99 --base release/2.0 - -# Attach to an existing branch (local or remote) -$ parsec start CL-2208 --branch feature/CL-2208 - -# Attach to a remote-only branch (auto-fetches and tracks) -$ parsec start CL-2208 --branch origin/feature/CL-2208 - -# Run a setup command after creation -$ parsec start PROJ-42 --hook "npm install" -``` - ---- - -### `parsec adopt ` - -Import an existing branch into parsec management. Useful when you started work before using parsec, or when taking over someone else's branch. - -``` -parsec adopt [--branch ] [--title "text"] -``` - -| Option | Description | -|--------|-------------| -| `-b, --branch ` | Branch to adopt (default: ``) | -| `--title "text"` | Set ticket title manually | - -```bash -# Adopt a branch matching the default prefix -$ parsec adopt PROJ-1234 -Adopted branch 'feature/PROJ-1234' as PROJ-1234 at /home/user/myapp.PROJ-1234 - -# Adopt a branch with a different name -$ parsec adopt PROJ-99 --branch fix/payment-timeout -Adopted branch 'fix/payment-timeout' as PROJ-99 at /home/user/myapp.PROJ-99 -``` - ---- - -### `parsec list` - -List all active parsec-managed worktrees. - -``` -parsec list [--full] [--no-pr] -``` - -```bash -$ parsec list -╭────────┬────────────────┬────────┬──────────────────┬────────────────────────────╮ -│ Ticket │ Branch │ Status │ Created │ Path │ -├────────┼────────────────┼────────┼──────────────────┼────────────────────────────┤ -│ TEST-1 │ feature/TEST-1 │ active │ 2026-04-15 09:00 │ /home/user/myapp.TEST-1 │ -│ TEST-2 │ feature/TEST-2 │ active │ 2026-04-15 09:05 │ /home/user/myapp.TEST-2 │ -╰────────┴────────────────┴────────┴──────────────────┴────────────────────────────╯ - -# Show extended metadata per worktree -$ parsec list --full -╭────────┬────────────────┬────────┬──────────────┬──────────┬─────────────────────┬───────────┬────────────────────────────╮ -│ Ticket │ Branch │ Status │ Ahead/Behind │ Unpushed │ Last Commit │ Age │ Path │ -├────────┼────────────────┼────────┼──────────────┼──────────┼─────────────────────┼───────────┼────────────────────────────┤ -│ TEST-1 │ feature/TEST-1 │ active │ +3 / -0 │ 1 │ Add rate limiting │ 2h ago │ /home/user/myapp.TEST-1 │ -│ TEST-2 │ feature/TEST-2 │ active │ +1 / -2 │ 0 │ Fix auth redirect │ 30m ago │ /home/user/myapp.TEST-2 │ -╰────────┴────────────────┴────────┴──────────────┴──────────┴─────────────────────┴───────────┴────────────────────────────╯ - -$ parsec list --json -[{"ticket":"TEST-1","path":"/home/user/myapp.TEST-1","branch":"feature/TEST-1","base_branch":"main","created_at":"2026-04-15T09:00:00Z","ticket_title":"Add auth","status":"active"}] -``` - -| Option | Description | -|--------|-------------| -| `--full` | Show extended metadata (commits, divergence, last commit) | -| `--no-pr` | Skip PR status lookup (faster, works offline) | - ---- - -### `parsec status [ticket]` - -Show detailed status of a workspace. Shows all workspaces if no ticket is specified. - -``` -parsec status [ticket] -``` - -```bash -$ parsec status PROJ-1234 -────────────────────────────────────────────────── - Ticket: PROJ-1234 - Title: Add user authentication - Branch: feature/PROJ-1234 - Base: main - Status: active - Created: 2026-04-15 09:00 UTC - Path: /home/user/myapp.PROJ-1234 -────────────────────────────────────────────────── -``` - ---- - -### `parsec ticket [ticket]` - -View ticket details from the configured tracker. Auto-detects the ticket from the current worktree if no argument is given. - -``` -parsec ticket [ticket] -``` - -```bash -# Auto-detect from current worktree -$ parsec ticket -CL-2283: Implement rate limiting for API endpoints - Status: In Progress - Assignee: eric.signal - URL: https://jira.example.com/browse/CL-2283 - -# Explicit ticket -$ parsec ticket CL-2283 - -# JSON output -$ parsec ticket CL-2283 --json -{"id":"CL-2283","title":"Implement rate limiting","status":"In Progress","assignee":"eric.signal","url":"https://jira.example.com/browse/CL-2283"} -``` - ---- - -### `parsec ship ` - -Push the branch, create a PR (GitHub) or MR (GitLab), and clean up the worktree. The forge is auto-detected from the remote URL. - -``` -parsec ship [--draft] [--no-pr] [--base ] [--skip-hooks] [--reviewer ]... [--label ]... -``` - -| Option | Description | -|--------|-------------| -| `--draft` | Create the PR/MR as a draft | -| `--no-pr` | Push only, skip PR/MR creation | -| `--base ` | Target base branch for PR (overrides config `default_base` and worktree base) | -| `--skip-hooks` | Skip pre-ship hooks defined in config | -| `-r, --reviewer ` | Request review from a GitHub user (repeatable) | -| `-l, --label ` | Add a label to the PR (repeatable) | - -```bash -# Push + PR + cleanup -$ parsec ship PROJ-1234 -Shipped PROJ-1234! - PR: https://github.com/org/repo/pull/42 - Workspace cleaned up. - -# Draft PR -$ parsec ship PROJ-5678 --draft - -# Push only, no PR -$ parsec ship PROJ-9000 --no-pr - -# Ship with reviewers and labels -$ parsec ship PROJ-1234 --reviewer alice --reviewer bob --label "needs-review" -``` - -Reviewers and labels can also be set as defaults in config: - -```toml -[ship] -default_reviewers = ["alice", "bob"] -default_labels = ["team-backend"] -``` - -Token required: set `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) for GitHub, or `PARSEC_GITLAB_TOKEN` (or `GITLAB_TOKEN`) for GitLab. - ---- - -### `parsec clean` - -Remove worktrees whose branches have been merged. Use `--all` to remove everything. - -``` -parsec clean [--all] [--dry-run] -``` - -| Option | Description | -|--------|-------------| -| `--all` | Remove all worktrees, including unmerged | -| `--dry-run` | Preview what would be removed | - -```bash -# Preview first -$ parsec clean --dry-run -Would remove 1 worktree(s): - - PROJ-1234 - -# Remove merged worktrees -$ parsec clean -Removed 1 worktree(s): - - PROJ-1234 - -# Remove everything -$ parsec clean --all -Removed 3 worktree(s): - - PROJ-1234 - - PROJ-5678 - - PROJ-9000 -``` - ---- - -### `parsec conflicts` - -Detect files modified in more than one active worktree. Workspaces with no changes are skipped. - -``` -parsec conflicts -``` - -```bash -# No conflicts -$ parsec conflicts -No conflicts detected. - -# Conflicts found -$ parsec conflicts -╭──────────────────┬──────────────────────╮ -│ File │ Worktrees │ -├──────────────────┼──────────────────────┤ -│ src/api/router.rs│ PROJ-1234, PROJ-5678 │ -╰──────────────────┴──────────────────────╯ -``` - ---- - -### `parsec switch [ticket]` - -Print the absolute path to a ticket's worktree. When called without a ticket, shows an interactive picker. Designed for `cd $(parsec switch ...)`. - -``` -parsec switch [ticket] -``` - -```bash -# Direct switch -$ parsec switch PROJ-1234 -/home/user/myapp.PROJ-1234 - -# Interactive picker (no argument) -$ parsec switch -? Switch to workspace › -❯ PROJ-1234 — Add user authentication - PROJ-5678 — Fix payment timeout - -# Use with cd -$ cd $(parsec switch PROJ-1234) -``` - ---- - -### `parsec log [ticket]` - -Show the history of parsec operations. Each mutating command (start, adopt, ship, clean, undo) is recorded with a timestamp. - -``` -parsec log [ticket] [-n, --last N] -``` - -| Option | Description | -|--------|-------------| -| `[ticket]` | Filter to a specific ticket | -| `-n, --last N` | Show last N entries (default: 20) | -| `--export` | Emit the log as JSONL (one JSON object per line). Each entry includes execution ID and per-step timing for observability/debugging | - -```bash -$ parsec log -╭───┬───────┬───────────┬───────────────────────────────────────────────┬──────────────────╮ -│ # │ Op │ Ticket │ Detail │ Time │ -├───┼───────┼───────────┼───────────────────────────────────────────────┼──────────────────┤ -│ 4 │ clean │ PROJ-5678 │ Cleaned workspace for branch 'feature/5678' │ 2026-04-15 14:30 │ -│ 3 │ ship │ PROJ-1234 │ Shipped branch 'feature/PROJ-1234' │ 2026-04-15 14:02 │ -│ 2 │ start │ PROJ-5678 │ Created workspace at /home/user/myapp.5678 │ 2026-04-15 13:55 │ -│ 1 │ start │ PROJ-1234 │ Created workspace at /home/user/myapp.1234 │ 2026-04-15 09:14 │ -╰───┴───────┴───────────┴───────────────────────────────────────────────┴──────────────────╯ - -# Filter by ticket -$ parsec log PROJ-1234 - -# Last 3 entries only -$ parsec log --last 3 - -# Export as JSONL (for tooling / AI agents) -$ parsec log --export -{"execution_id":"01HQ3D8R2K8...","op":"start","ticket":"PROJ-1234","steps":[{"name":"fetch_title","ms":214},{"name":"create_worktree","ms":98}],"started_at":"2026-04-15T09:14:01Z","duration_ms":312} -{"execution_id":"01HQ3D9V7Z2...","op":"ship","ticket":"PROJ-1234","steps":[{"name":"push","ms":820},{"name":"create_pr","ms":1305},{"name":"cleanup","ms":42}],"started_at":"2026-04-15T14:02:18Z","duration_ms":2167} -``` - ---- - -### `parsec undo` - -Undo the last parsec operation. - -- Undo `start` or `adopt`: removes the worktree and deletes the branch -- Undo `ship` or `clean`: re-creates the worktree from the branch (if still available locally or on remote) - -``` -parsec undo [--dry-run] -``` - -| Option | Description | -|--------|-------------| -| `--dry-run` | Preview what would be undone | - -```bash -# Preview -$ parsec undo --dry-run -Would undo: start PROJ-5678 - Would remove worktree at /home/user/myapp.PROJ-5678 - Would delete branch 'feature/PROJ-5678' - -# Execute -$ parsec undo -Undid start for PROJ-5678 - Worktree removed. - -# Nothing to undo -$ parsec undo -Error: nothing to undo. Run `parsec log` to see operation history. -``` - ---- - -### `parsec sync [ticket]` - -Fetch the latest base branch and rebase (or merge) the worktree on top. Detects the current worktree automatically when no ticket is given. - -``` -parsec sync [ticket] [--all] [--strategy rebase|merge] -``` - -| Option | Description | -|--------|-------------| -| `--all` | Sync all active worktrees | -| `--strategy` | `rebase` (default) or `merge` | - -```bash -# Sync current worktree -$ parsec sync -✓ rebase 1 worktree(s): - - PROJ-1234 - -# Sync a specific worktree -$ parsec sync PROJ-5678 - -# Sync all worktrees at once -$ parsec sync --all - -# Use merge instead of rebase -$ parsec sync --strategy merge -``` - ---- - -### `parsec open ` - -Open the associated PR/MR or ticket tracker page in your default browser. If the ticket has been shipped, opens the PR by default; otherwise opens the tracker page. - -``` -parsec open [--pr] [--ticket-page] -``` - -| Option | Description | -|--------|-------------| -| `--pr` | Force open the PR/MR page | -| `--ticket-page` | Force open the ticket tracker page | - -```bash -# Open PR if shipped, otherwise ticket page -$ parsec open PROJ-1234 -Opening https://github.com/org/repo/pull/42 - -# Force open the Jira ticket -$ parsec open PROJ-1234 --ticket-page -Opening https://yourcompany.atlassian.net/browse/PROJ-1234 - -# Force open the PR -$ parsec open PROJ-1234 --pr -Opening https://github.com/org/repo/pull/42 -``` - ---- - -### `parsec pr-status [ticket]` - -Check the CI and review status of shipped PRs. Shows CI check results, review approvals, and merge state in a color-coded table. - -``` -parsec pr-status [ticket] -``` +![demo](demo.gif) ```bash -# Check a specific ticket's PR -$ parsec pr-status PROJ-1234 -┌───────────┬─────┬────────┬──────────┬──────────────┐ -│ Ticket │ PR │ State │ CI │ Reviews │ -├───────────┼─────┼────────┼──────────┼──────────────┤ -│ PROJ-1234 │ #42 │ open │ ✓ passed │ ✓ approved │ -└───────────┴─────┴────────┴──────────┴──────────────┘ - -# Check all shipped PRs -$ parsec pr-status - -# JSON output -$ parsec pr-status PROJ-1234 --json +$ parsec start PROJ-1234 # creates worktree, fetches Jira title, sets up branch +$ parsec ship PROJ-1234 # pushes, opens PR, cleans worktree ``` -Requires: `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) +That's the whole loop. Plain `git worktree` doesn't track state, doesn't talk to your tracker, doesn't open PRs, doesn't clean up. **parsec** does. --- -### `parsec ci [ticket] [--watch] [--all]` - -Check CI/CD pipeline status for a ticket's PR. Shows individual check runs with status, duration, and an overall summary. - -``` -parsec ci [ticket] [--watch] [--all] -``` - -| Option | Description | -|--------|-------------| -| `ticket` | Ticket identifier (auto-detects current worktree if omitted) | -| `--watch` | Poll CI every 5s until all checks complete | -| `--all` | Show CI for all shipped PRs | - -```bash -# Auto-detect from current worktree -$ parsec ci -CI for PROJ-1234 (PR #42, a1b2c3d) -┌────────────┬───────────┬──────────┐ -│ Check │ Status │ Duration │ -├────────────┼───────────┼──────────┤ -│ Tests │ ✓ passed │ 2m 15s │ -│ Build │ ✓ passed │ 1m 42s │ -│ Lint │ ● running │ running… │ -└────────────┴───────────┴──────────┘ -✓ CI: 2/3 — 2 passed, 1 running - -# Check a specific ticket -$ parsec ci PROJ-1234 +## Why use it -# Watch mode — refreshes every 5s until done -$ parsec ci PROJ-1234 --watch - -# All shipped PRs -$ parsec ci --all - -# JSON output -$ parsec ci PROJ-1234 --json -``` - -Requires: `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) +- **No `index.lock` collisions** — every workspace has its own `.git/index`, so multiple developers (or AI agents) can run `git add` on the same repo at the same time. +- **One command per phase** — `start` (worktree + tracker fetch), `ship` (push + PR + cleanup), `merge` (merge + branch cleanup), `clean` (sweep merged branches). No web UI round-trips. +- **Stacked PRs that don't melt your brain** — `parsec start FOO --on BAR` chains workspaces; `parsec stack --submit` opens the whole stack in topological order with auto-posted "← prev / next →" navigation comments. +- **Cross-worktree conflict detection** — `parsec conflicts` flags files modified in two workspaces *before* anyone pushes. +- **Multi-forge** — GitHub, GitLab, Bitbucket Cloud. Multi-tracker — Jira, GitHub Issues, GitLab Issues, Bitbucket. +- **Agent-friendly** — `--json` on every command, structured error codes, JSONL execution log, headless/offline modes. --- -### `parsec merge [ticket] [--rebase] [--no-wait] [--no-delete-branch]` - -Merge a ticket's PR directly from the terminal. Waits for CI to pass before merging, then cleans up the local worktree. - -``` -parsec merge [ticket] [--rebase] [--no-wait] [--no-delete-branch] -``` - -| Option | Description | -|--------|-------------| -| `ticket` | Ticket identifier (auto-detects current worktree if omitted) | -| `--rebase` | Use rebase merge instead of squash (default: squash) | -| `--no-wait` | Skip CI check before merging | -| `--no-delete-branch` | Keep remote branch after merge | +## Install ```bash -# Squash merge (default) -$ parsec merge PROJ-1234 -Waiting for CI to pass... ✓ -Merged PR #42 for PROJ-1234! - Method: squash - SHA: a1b2c3d - -# Rebase merge -$ parsec merge PROJ-1234 --rebase - -# Skip CI wait -$ parsec merge PROJ-1234 --no-wait - -# JSON output -$ parsec merge PROJ-1234 --json -``` - -Requires: `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) +# Homebrew / pre-built binary (recommended) +curl -LO https://github.com/erishforG/git-parsec/releases/latest/download/parsec-x86_64-unknown-linux-gnu.tar.gz +tar xzf parsec-*.tar.gz && sudo mv parsec /usr/local/bin/ ---- - -### `parsec diff [ticket] [--stat] [--name-only]` - -View changes in a worktree compared to its base branch. Uses merge-base for accurate comparison. - -``` -parsec diff [ticket] [--stat] [--name-only] +# Cargo (Rust toolchain required) +cargo install git-parsec ``` -| Option | Description | -|--------|-------------| -| `ticket` | Ticket identifier (auto-detects current worktree if omitted) | -| `--stat` | Show file-level summary only | -| `--name-only` | List changed file names only | - -```bash -# Full diff for current worktree -$ parsec diff - -# File summary -$ parsec diff PROJ-1234 --stat - -# Just file names -$ parsec diff --name-only - -# JSON output (changed files list) -$ parsec diff PROJ-1234 --json -``` +Other targets (macOS arm64/x86_64, Windows x86_64) ship on every release — see [Releases](https://github.com/erishforG/git-parsec/releases). After install, run `parsec config init` for the interactive first-time setup, then `parsec doctor` to validate. --- -### `parsec stack [--sync] [--submit]` - -View and manage stacked PR dependencies. Worktrees created with `--on` form a dependency chain. PRs include a **stack navigation table** showing parent/child relationships. - -``` -parsec stack [--sync] [--submit] -``` - -| Option | Description | -|--------|-------------| -| `--sync` | Rebase the entire stack chain | -| `--submit` | Ship the entire stack in topological order (root first) | +## 60-second tour ```bash -# Create a stack -$ parsec start PROJ-1 --title "Add models" -$ parsec start PROJ-2 --on PROJ-1 --title "Add API endpoints" -$ parsec start PROJ-3 --on PROJ-2 --title "Add frontend" +# Pull a ticket from Jira / GitHub / GitLab / Bitbucket and start work +$ parsec start PROJ-1234 +✓ Worktree: ../myapp.PROJ-1234 + Title: Add rate limiting (fetched from Jira) + Branch: feature/PROJ-1234 -# View the dependency graph -$ parsec stack -Stack dependency graph: -└── PROJ-1 Add models - └── PROJ-2 Add API endpoints - └── PROJ-3 Add frontend +# See everything in flight +$ parsec list +╭───────────┬───────────────────┬────────┬─────────────────────────╮ +│ Ticket │ Branch │ Status │ Path │ +├───────────┼───────────────────┼────────┼─────────────────────────┤ +│ PROJ-1234 │ feature/PROJ-1234 │ active │ ../myapp.PROJ-1234 │ +│ PROJ-5678 │ feature/PROJ-5678 │ active │ ../myapp.PROJ-5678 │ +╰───────────┴───────────────────┴────────┴─────────────────────────╯ + +# Hop in (shell integration auto-cd's) +$ parsec switch PROJ-1234 -# Sync the entire stack -$ parsec stack --sync +# Make commits the normal way, then ship — push + PR + cleanup in one shot +$ parsec ship PROJ-1234 +✓ Pushed feature/PROJ-1234 +✓ PR opened: github.com/org/repo/pull/42 +✓ Worktree cleaned up -# Ship creates PRs with correct base branches -$ parsec ship PROJ-1 # PR to main -$ parsec ship PROJ-2 # PR to feature/PROJ-1 -$ parsec ship PROJ-3 # PR to feature/PROJ-2 +# Watch CI without leaving the terminal +$ parsec ci PROJ-1234 --watch -# Or ship the entire stack at once -$ parsec stack --submit -Submitting stack (3 worktrees): - 1. PROJ-1 - 2. PROJ-2 - 3. PROJ-3 -Stack submit complete: 3/3 shipped +# Merge from terminal once CI is green +$ parsec merge PROJ-1234 ``` -Each PR body includes a stack navigation table: - -| | Ticket | Branch | -|---|--------|--------| -| ⬆ Parent | PROJ-1 | `feature/PROJ-1` | -| **➡ Current** | **PROJ-2** | **`feature/PROJ-2`** | -| ⬇ Child | PROJ-3 | `feature/PROJ-3` | - --- -### `parsec board` - -Show the active sprint as a vertical board view. Fetches tickets from Jira grouped by status column, with worktree and PR indicators. - -``` -parsec board [--project ] [--board-id ] [--assignee ] [--all] -``` - -| Option | Description | -|--------|-------------| -| `-p, --project ` | Jira project key (default from env/config) | -| `--board-id ` | Jira board ID (auto-detected from project) | -| `--assignee ` | Filter by assignee (default from env/config) | -| `--all` | Show all tickets (ignore assignee filter) | +## Top features +### 🌿 Stacked PRs that don't melt your brain ```bash -# Show your tickets (with PARSEC_JIRA_ASSIGNEE configured) -$ parsec board - -26.04.06 ~ 26.04.20 - -In Progress (3) - CL-2283 [wt] 로그 분석 서비스 개발 - CL-2284 [wt] FDE 대시보드 관련 - CL-2291 반품 요청 API 개발 - -In Review (2) - CL-2281 [pr] ai 커피챗 준비 - CL-2280 이관 요청할 API 정리 - -# Show all team tickets -$ parsec board --all - -# JSON output for AI agents -$ parsec board --json -{"sprint":{"id":123,"name":"...","start":"...","end":"..."},"total_count":48,"columns":{"In Progress":[...],...}} +$ parsec start PROJ-2 --on PROJ-1 # new worktree on top of PROJ-1's branch +$ parsec stack --submit # open all PRs in the stack, root first ``` +parsec auto-posts `← previous PR` / `next PR →` navigation comments so reviewers can walk the chain. -Defaults can be set via environment variables or config file (see below). - ---- - -### `parsec init` +### 🔄 Multi-forge, multi-tracker +- **Forges**: GitHub · GitLab · Bitbucket Cloud (full PR lifecycle on each) +- **Trackers**: Jira · GitHub Issues · GitLab Issues · Bitbucket +- **CI status**: GitHub Actions · GitLab CI · Bitbucket Pipelines -Output or install shell integration for auto-cd on `parsec switch` and CWD recovery after `parsec merge`. +`parsec ci` and `pr-status` work the same shape across all of them. -``` -parsec init [shell] [--install] [--yes] -``` +### 🤖 Agent-friendly by design +Every command has `--json`. Errors emit structured codes (E001…E013). `parsec log --export` outputs JSONL with execution IDs and per-step timing for tooling/agents to consume. `--offline` and `[behavior].offline` config skip all network ops for air-gapped or CI environments. -| Option | Description | -|--------|-------------| -| `shell` | Shell type: `zsh` (default) or `bash` | -| `--install` | Auto-append integration to shell config file | -| `-y, --yes` | Skip confirmation prompt (for scripting) | +### 🧹 Lifecycle hygiene +`parsec clean` sweeps worktrees for already-merged branches. `parsec conflicts` flags cross-worktree file overlap before you push. `parsec undo` reverses the last operation (start, ship, clean). `parsec doctor` validates every part of your setup with actionable fix instructions. -```bash -# Print the shell function (pipe to eval) -$ parsec init zsh +### 📂 Worktree build cache sharing +`[worktree].shared_cache = ["target", "node_modules", ".venv"]` lets new worktrees reuse the main repo's caches via symlink (default) or copy. Eliminates cold-build cost on `parsec start` for any project with significant dependency caches. -# Auto-install into ~/.zshrc -$ parsec init --install -Add shell integration to /home/user/.zshrc? [Y/n] y -Shell integration added. Run `source ~/.zshrc` or restart your shell. +### 📋 Sprint board + issue creation +`parsec board` turns your active sprint into a Kanban board in the terminal. `parsec create` and `parsec new-issue` open issues in your tracker without leaving the shell. -# Non-interactive install -$ parsec init --install --yes -``` +> 27 commands total — see the [full command reference](https://erishforg.github.io/git-parsec/reference/) for every flag and example. --- -### `parsec config` - -```bash -# Interactive setup wizard -$ parsec config init - -# Show current configuration -$ parsec config show -[workspace] - layout = sibling - base_dir = .parsec/workspaces - branch_prefix = feature/ - -[tracker] - provider = jira - jira.base_url = https://yourcompany.atlassian.net - -[ship] - auto_pr = true - auto_cleanup = true - draft = false - # default_base = "develop" # Target branch for PRs (default: worktree base) - -# Output shell integration script -$ parsec config shell zsh - -# Generate shell completions -$ parsec config completions zsh - -# Install man page -$ sudo parsec config man - -# Output the JSON Schema for config.toml (also published to schemastore.org) -$ parsec config schema > parsec-schema.json -``` - -The JSON Schema is published to **schemastore.org** so editors with schemastore integration (VS Code, IntelliJ, Helix) auto-complete and validate `~/.config/parsec/config.toml` and per-repo `.parsec.toml` automatically. To pin the schema locally instead, add to your config: +## Configuration (minimal example) ```toml # ~/.config/parsec/config.toml #:schema https://json.schemastore.org/parsec.json -``` - ---- - -### `parsec doctor` - -Validate your environment and configuration. Prints ✓/✗ for each check with actionable fix instructions. - -```bash -$ parsec doctor -parsec doctor - ✓ git version 2.43.0 (worktree support ok) - ✓ config file found at ~/.config/parsec/config.toml - ✓ GitHub token configured (github.com) via gh auth token - ✗ shell integration not found in shell config - Add to ~/.zshrc: eval "$(parsec init zsh)" - ✗ tab completions not configured - Add to ~/.zshrc: eval "$(parsec config completions zsh)" - ✓ remote origin accessible - -2 check(s) failed. - -$ parsec doctor --json -{"checks":[...],"all_ok":false} -``` - -**AI agent mode** — output parsec workflow rules as a Markdown document for AI agents to consume: - -```bash -$ parsec doctor --ai -# Outputs structured Markdown with workflow rules, command patterns, -# and best practices for AI agents using parsec -``` - ---- - -### `parsec create` - -Create a new issue on the configured tracker (GitHub Issues or Jira) and optionally start a worktree for it immediately. - -``` -parsec create --title "text" [--body "text"] [--label "a,b"] [--project KEY] [--start] -``` - -| Option | Description | -|--------|-------------| -| `--title "text"` | Issue title (required) | -| `--body "text"` | Issue body/description | -| `--label "a,b"` | Comma-separated labels | -| `-p, --project KEY` | Jira project key (auto-detected from config) | -| `--start` | Start a worktree after creation | - -```bash -# Create a GitHub issue -$ parsec create --title "Fix login redirect" --label "bug" -Created #145: Fix login redirect - https://github.com/org/repo/issues/145 - -# Create and immediately start working -$ parsec create --title "Add caching layer" --start -Created #146: Add caching layer -Created workspace for #146 at /home/user/myapp.146 -``` - ---- - -### `parsec new-issue` - -Create a new issue on the tracker (alias for `create` with additional options). Supports GitHub Issues and Jira with configurable issue type. - -``` -parsec new-issue --title "text" [--body "text"] [--label "a"] [--project KEY] [--issue-type TYPE] [--start] -``` - -| Option | Description | -|--------|-------------| -| `--title "text"` | Issue title (required) | -| `--body "text"` | Issue body/description | -| `--label "a"` | Labels (can be specified multiple times) | -| `-p, --project KEY` | Jira project key (auto-detected from config) | -| `--issue-type TYPE` | Jira issue type (default: Task) | -| `--start` | Auto-start a worktree for the new issue | - -```bash -# Create with issue type for Jira -$ parsec new-issue --title "Implement API caching" --issue-type Story --project CL - -# Multiple labels -$ parsec new-issue --title "Fix auth bug" --label bug --label priority -``` - ---- - -### `parsec release ` - -Create a release: merge develop to main, create a git tag, and optionally create a GitHub Release with auto-generated changelog. - -``` -parsec release [--from ] [--no-github-release] [--dry-run] -``` - -| Option | Description | -|--------|-------------| -| `` | Version string (e.g., "0.3.0") | -| `--from ` | Source branch to release from (default: develop) | -| `--no-github-release` | Skip creating GitHub Release | -| `--dry-run` | Show what would happen without making changes | - -```bash -# Full release -$ parsec release 0.3.0 -✓ Merged develop → main -✓ Tagged v0.3.0 -✓ GitHub Release created: https://github.com/org/repo/releases/tag/v0.3.0 - -# Dry run first -$ parsec release 0.4.0 --dry-run - -# Skip GitHub Release -$ parsec release 0.3.1 --no-github-release -``` - ---- - -### `parsec rename --new ` - -Re-ticket an existing workspace to a different ticket ID. Renames the branch and updates internal state. Useful when a ticket is split or re-assigned. - -``` -parsec rename --new -``` - -| Option | Description | -|--------|-------------| -| `--new ` | New ticket ID to assign (required) | - -```bash -# Re-ticket a workspace -$ parsec rename PROJ-100 --new PROJ-200 -Renamed PROJ-100 → PROJ-200 - Branch: feature/PROJ-100 → feature/PROJ-200 - Path: /home/user/myapp.PROJ-200 - -# JSON output -$ parsec rename PROJ-100 --new PROJ-200 --json -``` - ---- - -### `parsec compress [ticket] [-m ]` - -Squash all of a branch's commits into a single tidy commit before shipping. The branch is reset to the merge-base with the base branch and the cumulative changes are re-committed as one. Co-author trailers from squashed commits are preserved. - -``` -parsec compress [ticket] [-m ] -``` - -| Option | Description | -|--------|-------------| -| `ticket` | Optional. Auto-detects the current worktree's ticket if omitted. | -| `-m, --message ` | Custom commit message. Default: combines all squashed commit messages. | - -```bash -# Compress the current worktree's branch -$ parsec compress -Compressed 7 commits into one on feature/PROJ-1234. - -# Compress a specific ticket with a custom message -$ parsec compress PROJ-1234 -m "feat: add user authentication" - -# Combine with ship -$ parsec compress && parsec ship -``` - ---- - -## Global Flags - -These flags work on every command: - -| Flag | Description | -|------|-------------| -| `--dry-run` | Preview what a command would do without making changes | -| `--offline` | Skip all network operations (tracker, PR, fetch). Also enabled by `[workspace].offline = true` in config | -| `--json` | Machine-readable JSON output | -| `-q, --quiet` | Suppress non-essential output | -| `--repo ` | Target a different repository | - -```bash -$ parsec list --json -$ parsec ship PROJ-1234 --quiet -$ parsec status --repo /path/to/other-repo -``` - ---- - -## Shell Integration - -`parsec switch` prints a path but cannot `cd` for you. The shell integration wraps `parsec switch` so it changes your directory automatically: - -```bash -# Preferred: auto-install (appends to your shell config with confirmation) -$ parsec init --install -Add shell integration to /home/user/.zshrc? [Y/n] y -Shell integration added to /home/user/.zshrc. Run `source ~/.zshrc` or restart your shell. - -# Or with --yes for scripted setup -$ parsec init --install --yes - -# Manual: add to ~/.zshrc yourself -eval "$(parsec init zsh)" - -# Or for bash -eval "$(parsec init bash)" -``` - -After sourcing, `parsec switch ` will `cd` into the worktree directly: - -```bash -$ parsec switch PROJ-1234 -# Now you're in /home/user/myapp.PROJ-1234 -``` - ---- - -## Shell Completions - -Generate tab-completion scripts for your shell: - -```bash -# Zsh — add to ~/.zshrc -eval "$(parsec config completions zsh)" - -# Bash — add to ~/.bashrc -eval "$(parsec config completions bash)" - -# Fish — add to ~/.config/fish/config.fish -parsec config completions fish | source - -# Other shells -parsec config completions elvish -parsec config completions powershell -``` - ---- - -## Man Page - -Install the man page so `man parsec` works: - -```bash -sudo parsec config man -# Man page installed to /usr/local/share/man/man1/parsec.1 - -# Custom directory -parsec config man --dir ~/.local/share/man -``` - ---- - -## Configuration - -Config file: `~/.config/parsec/config.toml` -```toml [workspace] -# "sibling" (default) creates worktrees next to repo: ../repo.ticket/ -# "internal" creates inside repo: .parsec/workspaces/ticket/ -layout = "sibling" -base_dir = ".parsec/workspaces" -branch_prefix = "feature/" -# Skip all network operations (tracker, PR, fetch). Equivalent to passing -# --offline on every command. Useful for air-gapped / flight-mode dev. -offline = false - -[worktree] -# Directories to share from the main repo into new worktrees so that -# `parsec start` doesn't trigger a cold rebuild. Default is empty (no sharing). -shared_cache = ["target", "node_modules", ".venv"] -# "symlink" (default): fast, zero-disk overhead. All worktrees and the main -# repo share one cache — running parallel builds of the -# same artifact may race. -# "copy": full copy at start time. Each worktree gets an independent cache, -# no race risk, but uses more disk and the initial copy takes time. -cache_strategy = "symlink" +layout = "sibling" # ../repo.ticket/ (alt: "internal") +branch_prefix = "feature/" +offline = false # true = skip all network ops [tracker] -# "jira" | "github" | "gitlab" | "bitbucket" | "none" -provider = "jira" +provider = "jira" # jira | github | gitlab | bitbucket | none [tracker.jira] base_url = "https://yourcompany.atlassian.net" -# Auth: PARSEC_JIRA_TOKEN or JIRA_PAT env var -# project = "CL" # Default project for board -# board_id = 123 # Default board ID -# assignee = "eric.signal" # Default assignee filter - -[tracker.gitlab] -base_url = "https://gitlab.com" -# Auth: PARSEC_GITLAB_TOKEN env var +# Auth: PARSEC_JIRA_TOKEN env var -[tracker.bitbucket] -# workspace = "your-bitbucket-workspace" -# Auth: PARSEC_BITBUCKET_TOKEN (or BITBUCKET_TOKEN) env var -# api_base override: PARSEC_BITBUCKET_API_BASE env var +[ship] +auto_pr = true +auto_cleanup = true +draft = false # true = open PRs as drafts +# template = ".github/PULL_REQUEST_TEMPLATE.md" -[forge] -# Auto-detected from the remote URL when omitted. Override here if multiple -# forges are reachable (e.g., GitHub for PRs, Bitbucket Pipelines for CI). -# provider = "github" | "gitlab" | "bitbucket" +[worktree] +shared_cache = ["target", "node_modules", ".venv"] +cache_strategy = "symlink" # alt: "copy" +``` -[ship] -auto_pr = true # Create PR/MR on ship -auto_cleanup = true # Remove worktree after ship -draft = false # Open PRs as drafts by default -# template = ".github/PULL_REQUEST_TEMPLATE.md" # Auto-fill PR description -# default_reviewers = ["alice", "bob"] # Reviewers added on every ship -# default_labels = ["needs-review"] # Labels added on every ship +**Auth tokens** (set via env vars, all optional): -[observability] -# Enable per-step timing in `parsec log --export` JSONL output. -# Each command run gets a unique execution ID; useful for tooling and AI -# agents to correlate parsec actions with downstream effects. -enabled = true +``` +PARSEC_JIRA_TOKEN PARSEC_GITHUB_TOKEN PARSEC_GITLAB_TOKEN +PARSEC_BITBUCKET_TOKEN GITHUB_TOKEN (fallback) GITLAB_TOKEN (fallback) +PARSEC_OFFLINE=1 — force offline mode globally +``` -[hooks] -# Commands to run in new worktrees after creation -post_create = ["npm install"] -# Commands to run before shipping (pre-push hooks) -pre_ship = ["cargo test", "cargo clippy"] +Full schema and every option: `parsec config schema` (also published to [schemastore.org](https://schemastore.org)). -[release] -# branch = "main" # Target release branch (default: main) -# tag_prefix = "v" # Tag prefix (default: "v") -# changelog = true # Generate changelog in release notes +--- -[policy] -# protected_branches = ["main", "develop", "release/*"] # Branches that cannot be shipped to -# allowed_ship_targets = ["develop"] # Restrict PR target branches -# require_ci = false # Require CI pass before merge +## Comparison -[tracker.auto_transition] -# on_start = "In Progress" # Transition when `parsec start` runs -# on_ship = "In Review" # Transition when `parsec ship` runs -# on_merge = "Done" # Transition when `parsec merge` runs -``` +| | parsec | GitButler | worktrunk | git worktree | git-town | +|---|---|---|---|---|---| +| Tracker integration | Jira + GitHub + GitLab + Bitbucket | — | — | — | — | +| Physical worktree isolation | ✅ | ❌ (virtual) | ✅ | ✅ | ❌ | +| Cross-worktree conflict detection | ✅ | n/a | ❌ | ❌ | ❌ | +| One-step ship (push + PR + cleanup) | ✅ | ❌ | ❌ | ❌ | ✅ | +| Forges | GitHub + GitLab + Bitbucket | Both | GitHub | — | GitHub, GitLab, Gitea, Bitbucket | +| CI integrations | Actions + GitLab CI + Bitbucket Pipelines | — | — | — | — | +| Operation log + undo | ✅ | ✅ | ❌ | ❌ | partial | +| JSON output | ✅ | ✅ | ❌ | ❌ | ❌ | +| Stacked PRs | ✅ | ✅ | ❌ | ❌ | ✅ | +| GUI | CLI only | Desktop + TUI | CLI | CLI | CLI | --- -## Environment Variables - -| Variable | Description | -|----------|-------------| -| `PARSEC_JIRA_TOKEN` | Jira API token (or personal access token) | -| `JIRA_PAT` | Alternative Jira token variable | -| `JIRA_BASE_URL` | Jira URL (overrides config) | -| `PARSEC_GITHUB_TOKEN` | GitHub token for PR creation | -| `GITHUB_TOKEN` | Fallback GitHub token | -| `GH_TOKEN` | Fallback GitHub token | -| `PARSEC_GITLAB_TOKEN` | GitLab token for MR creation | -| `GITLAB_TOKEN` | Fallback GitLab token | -| `PARSEC_BITBUCKET_TOKEN` | Bitbucket Cloud app password / access token | -| `BITBUCKET_TOKEN` | Fallback Bitbucket token | -| `PARSEC_BITBUCKET_API_BASE` | Override Bitbucket API base URL (test/mock servers) | -| `PARSEC_JIRA_PROJECT` | Default Jira project key for `board` | -| `PARSEC_JIRA_BOARD_ID` | Default Jira board ID for `board` | -| `PARSEC_JIRA_ASSIGNEE` | Default assignee filter for `board` | -| `PARSEC_OFFLINE` | Set to `1` to force offline mode (same as `--offline`) | +## Documentation -Token priority: `PARSEC_*_TOKEN` > platform-specific variables. +- 📘 **[Getting Started Guide](https://erishforg.github.io/git-parsec/guide/)** — install, first ship, tracker config, recipes +- 📗 **[Command Reference](https://erishforg.github.io/git-parsec/reference/)** — every command, every flag, with examples +- 🌐 **[Project home](https://erishforg.github.io/git-parsec/)** — features tour and live demo --- -## Error Codes +## Error codes -When using `--json`, errors include a structured error code for programmatic handling: +Every command exits with a structured code. JSON output (`--json`) includes the same code: -| Code | Meaning | Exit Code | -|------|---------|-----------| -| E001 | No authentication token configured | 2 | -| E002 | CI checks failing | 4 | -| E003 | Merge conflicts detected | 3 | -| E004 | PR not mergeable | 5 | -| E005 | Workspace not found | 5 | -| E006 | Workspace already exists | 5 | -| E007 | No active workspaces | 5 | -| E008 | Pre-ship hook failed | 1 | -| E009 | Policy violation | 6 | -| E010 | PR not found | 5 | -| E011 | Tracker not configured | 2 | -| E012 | Ship partially completed | 1 | -| E013 | Cannot undo operation | 1 | +| | | | +|---|---|---| +| `E001` no auth token | `E005` workspace not found | `E009` policy violation | +| `E002` CI failing | `E006` workspace already exists | `E010` PR not found | +| `E003` merge conflict | `E007` no active workspaces | `E011` tracker not configured | +| `E004` PR not mergeable | `E008` pre-ship hook failed | `E012` ship partial | +| | | `E013` cannot undo | ```bash -# JSON error output example $ parsec ship PROJ-1234 --json 2>&1 {"error":{"code":"E001","message":"No GitHub token configured","hint":"Set PARSEC_GITHUB_TOKEN or run gh auth login"}} $ echo $? @@ -1465,63 +199,6 @@ $ echo $? --- -## Comparison with Alternatives - -| Feature | parsec | GitButler | worktrunk | git worktree | git-town | -|---------|--------|-----------|-----------|--------------|----------| -| Ticket tracker integration | Jira + GitHub + GitLab + Bitbucket | No | No | No | No | -| Physical isolation | Yes (worktrees) | No (virtual branches) | Yes (worktrees) | Yes | No | -| Conflict detection | Cross-worktree | N/A | No | No | No | -| One-step ship (push+PR+clean) | Yes | No | No | No | Yes | -| Forges | GitHub + GitLab + Bitbucket | Both | GitHub | No | GitHub, GitLab, Gitea, Bitbucket | -| CI integrations | GitHub Actions + GitLab CI + Bitbucket Pipelines | No | No | No | No | -| Operation history + undo | Yes | Yes | No | No | Yes (undo) | -| JSON output | Yes | Yes | No | No | No | -| CI monitoring | Yes (--watch) | No | No | No | No | -| Stacked PRs | Yes | Yes | No | No | Yes | -| Auto-cleanup merged | Yes | No | No | Manual | No | -| Post-create hooks | Yes | No | Yes | No | No | -| Issue creation from CLI | Yes | No | No | No | No | -| AI token efficiency | Single-command ops | N/A | N/A | N/A | N/A | -| GUI | CLI only | Desktop + TUI | CLI | CLI | CLI | -| Zero config start | Yes | No | Yes | No | No | - ---- - -## FAQ - -**How do I set up parsec with Jira?** -Set `PARSEC_JIRA_TOKEN` (or `JIRA_PAT`) and configure `[tracker.jira]` in `~/.config/parsec/config.toml` with your `base_url` and `project`. Run `parsec config init` for interactive setup, or `parsec doctor` to validate. - -**How do I set up parsec with GitHub Issues?** -Set `provider = "github"` under `[tracker]` in your config. Authentication uses `PARSEC_GITHUB_TOKEN`, `GITHUB_TOKEN`, `GH_TOKEN`, or `gh auth token` automatically. - -**Does parsec support GitLab?** -Yes. parsec supports GitLab for both issue tracking and MR creation. Set `provider = "gitlab"` and configure `[tracker.gitlab]` with your `base_url`. Set `PARSEC_GITLAB_TOKEN` for authentication. - -**Can I use parsec without a ticket tracker?** -Yes. Set `provider = "none"` or use `--title` with `parsec start` to skip tracker lookup entirely. - -**How do stacked PRs work?** -Use `parsec start CHILD --on PARENT` to create dependent worktrees. `parsec ship` automatically sets the correct base branch. `parsec stack --sync` rebases the entire chain. - -**What happens if two worktrees modify the same file?** -`parsec conflicts` detects cross-worktree file overlap before you push. It compares changed files across all active worktrees and warns about collisions. - -**Can I undo a ship or clean?** -Yes. `parsec undo` reverses the last operation. For `ship`, it re-creates the worktree from the branch. For `clean`, it restores from the remote branch if still available. - -**How do I protect branches from accidental shipping?** -Add a `[policy]` section to your config with `protected_branches` and `allowed_ship_targets`. parsec will reject operations that violate these rules. - -**Does parsec work with GitHub Enterprise?** -Yes. parsec auto-detects GitHub Enterprise from the remote URL and routes API calls to the correct host. Token resolution is host-aware. - -**How do AI agents use parsec?** -Every command supports `--json` for structured output. Run `parsec doctor --ai` to get a Markdown document with workflow rules and command patterns optimized for AI agent consumption. - ---- - ## License -MIT +MIT — see [LICENSE](LICENSE). diff --git a/docs/guide/index.html b/docs/guide/index.html index 6d18d77..f73627d 100644 --- a/docs/guide/index.html +++ b/docs/guide/index.html @@ -3,16 +3,64 @@ - Guide — git-parsec - - + Getting Started Guide — git-parsec | Install, configure, ship in 5 minutes + + - + + - - + + + + + + + + + + + + + + + @@ -1019,33 +1067,6 @@ } .version-banner a:hover { color: var(--accent-cyan); } - - - - - @@ -1087,7 +1108,7 @@
  • AI Agent Workflows
  • Stacked PRs
  • New Features
  • -
  • What's New in v0.4
  • +
  • Recipes & Examples
  • @@ -1740,14 +1761,14 @@

    Pre-ship hooks

    -
    +
    -

    What's New in v0.4

    - # +

    Recipes & Examples

    + #

    - v0.4 broadens forge support to Bitbucket Cloud, adds workflow utilities (compress, --template, stack navigation comments), and introduces operational primitives — offline mode, observability JSONL, and a published config schema — designed for tooling and AI agents. + End-to-end examples for the workflow patterns parsec is built around — Bitbucket Cloud setup, history compression, stacked PR navigation, PR templates, offline / headless mode, observability via JSONL, editor autocomplete via the JSON Schema, and worktree build cache sharing. Each recipe is self-contained — copy the snippets and adapt to your repo.

    Bitbucket Cloud — full PR lifecycle

    diff --git a/docs/index.html b/docs/index.html index 5beba37..a12bcf4 100644 --- a/docs/index.html +++ b/docs/index.html @@ -3,36 +3,52 @@ - git-parsec — Full-lifecycle worktree management - - + git-parsec — From ticket to PR. One command. | Git worktree lifecycle manager + + + - - + + + + + + + - - + + + + + + + + + + @@ -926,19 +973,6 @@ } .version-banner a:hover { color: var(--accent-cyan); } - - - - diff --git a/docs/robots.txt b/docs/robots.txt index 4e53dda..891fa51 100644 --- a/docs/robots.txt +++ b/docs/robots.txt @@ -1,4 +1,27 @@ User-agent: * Allow: / +# Explicitly welcome major AI/LLM crawlers — git-parsec is open-source +# and benefits from being surfaced in AI-generated answers. +User-agent: GPTBot +Allow: / + +User-agent: ChatGPT-User +Allow: / + +User-agent: ClaudeBot +Allow: / + +User-agent: anthropic-ai +Allow: / + +User-agent: PerplexityBot +Allow: / + +User-agent: Google-Extended +Allow: / + +User-agent: CCBot +Allow: / + Sitemap: https://erishforg.github.io/git-parsec/sitemap.xml diff --git a/docs/sitemap.xml b/docs/sitemap.xml index 380a220..47968a2 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -2,22 +2,34 @@ https://erishforg.github.io/git-parsec/ - 2026-04-22 + 2026-05-04 weekly 1.0 https://erishforg.github.io/git-parsec/guide/ - 2026-04-22 + 2026-05-04 weekly 0.8 https://erishforg.github.io/git-parsec/reference/ - 2026-04-22 + 2026-05-04 weekly 0.8 + + https://erishforg.github.io/git-parsec/llms.txt + 2026-05-04 + weekly + 0.7 + + + https://erishforg.github.io/git-parsec/llms-full.txt + 2026-05-04 + weekly + 0.7 + https://erishforg.github.io/git-parsec/v/0.3.3/ 2026-04-23